VIT#

Classes#

class models.coda_prompt_utils.vit.Attention(dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0)[source]#

Bases: Module

forward(x, prompt=None)[source]#
get_attention_map()[source]#
get_attn_gradients()[source]#
save_attention_map(attention_map)[source]#
save_attn_gradients(attn_gradients)[source]#
class models.coda_prompt_utils.vit.Block(dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=<class 'torch.nn.modules.activation.GELU'>, norm_layer=<class 'torch.nn.modules.normalization.LayerNorm'>)[source]#

Bases: Module

forward(x, prompt=None)[source]#
class models.coda_prompt_utils.vit.VisionTransformer(qk_scale=None, args=None, **kwargs)[source]#

Bases: VisionTransformer

forward(x, prompt=None, q=None, train=False, task_id=None)[source]#