VIT#

Classes#

class models.ranpac_utils.vit.Adapter(config=None, d_model=None, bottleneck=None, dropout=0.0, init_option='bert', adapter_scalar='1.0', adapter_layernorm_option='in')[source]#

Bases: Module

forward(x, add_residual=True, residual=None)[source]#
class models.ranpac_utils.vit.Attention(dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0)[source]#

Bases: Module

forward(x)[source]#
class models.ranpac_utils.vit.Block(dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=<class 'torch.nn.modules.activation.GELU'>, norm_layer=<class 'torch.nn.modules.normalization.LayerNorm'>, config=None, layer_id=None)[source]#

Bases: Module

forward(x)[source]#
class models.ranpac_utils.vit.VisionTransformer(global_pool=False, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True, representation_size=None, distilled=False, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, embed_layer=<class 'timm.layers.patch_embed.PatchEmbed'>, norm_layer=None, act_layer=None, weight_init='', tuning_config=None)[source]#

Bases: Module

Vision Transformer with support for global average pooling

forward(x)[source]#
forward_features(x)[source]#
get_classifier()[source]#
init_weights(mode='')[source]#
no_weight_decay()[source]#
reset_classifier(num_classes, global_pool='')[source]#