From d3415e31346f84d66a57f9b35f8d4fa0f7c34e5f Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Mon, 7 Nov 2022 16:02:25 -0800 Subject: [PATCH] Add latest CLIP ViT fine-tune pretrained configs / model entrypt updates --- timm/models/vision_transformer.py | 561 +++++++++++++++++------------- 1 file changed, 319 insertions(+), 242 deletions(-) diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py index f38cbcc5..6b79cd28 100644 --- a/timm/models/vision_transformer.py +++ b/timm/models/vision_transformer.py @@ -40,237 +40,6 @@ from .registry import register_model _logger = logging.getLogger(__name__) -def _cfg(url='', **kwargs): - return { - 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, - 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, - 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, - 'first_conv': 'patch_embed.proj', 'classifier': 'head', - **kwargs - } - - -default_cfgs = generate_defaults({ - # patch models (weights from official Google JAX impl) - 'vit_tiny_patch16_224.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', - custom_load=True), - 'vit_tiny_patch16_384.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', - custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), - 'vit_small_patch32_224.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', - custom_load=True), - 'vit_small_patch32_384.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', - custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), - 'vit_small_patch16_224.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', - custom_load=True), - 'vit_small_patch16_384.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', - custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), - 'vit_base_patch32_224.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', - custom_load=True), - 'vit_base_patch32_384.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', - custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), - 'vit_base_patch16_224.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', - custom_load=True), - 'vit_base_patch16_384.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', - custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), - 'vit_base_patch8_224.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', - custom_load=True), - 'vit_large_patch32_384.v1_in21k_ft_1k': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', - input_size=(3, 384, 384), crop_pct=1.0), - 'vit_large_patch16_224.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', - custom_load=True), - 'vit_large_patch16_384.augreg_in21k_ft_1k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', - custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), - - 'vit_large_patch14_224.untrained': _cfg(url=''), - 'vit_huge_patch14_224.untrained': _cfg(url=''), - 'vit_giant_patch14_224.untrained': _cfg(url=''), - 'vit_gigantic_patch14_224.untrained': _cfg(url=''), - - - # patch models, imagenet21k (weights from official Google JAX impl) - 'vit_tiny_patch16_224.augreg_in21k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', - custom_load=True, num_classes=21843), - 'vit_small_patch32_224.augreg_in21k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', - custom_load=True, num_classes=21843), - 'vit_small_patch16_224.augreg_in21k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', - custom_load=True, num_classes=21843), - 'vit_base_patch32_224.augreg_in21k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', - custom_load=True, num_classes=21843), - 'vit_base_patch16_224.augreg_in21k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', - custom_load=True, num_classes=21843), - 'vit_base_patch8_224.augreg_in21k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', - custom_load=True, num_classes=21843), - 'vit_large_patch32_224.v1_in21k': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', - num_classes=21843), - 'vit_large_patch16_224.augreg_in21k': _cfg( - url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', - custom_load=True, num_classes=21843), - 'vit_huge_patch14_224.v1_in21k': _cfg( - url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', - hf_hub_id='timm/vit_huge_patch14_224_in21k', - custom_load=True, num_classes=21843), - - # SAM trained models (https://arxiv.org/abs/2106.01548) - 'vit_base_patch32_224.sam': _cfg( - url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz', custom_load=True), - 'vit_base_patch16_224.sam': _cfg( - url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz', custom_load=True), - - # DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only) - 'vit_small_patch16_224.dino': _cfg( - url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', - mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), - 'vit_small_patch8_224.dino': _cfg( - url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', - mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), - 'vit_base_patch16_224.dino': _cfg( - url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', - mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), - 'vit_base_patch8_224.dino': _cfg( - url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', - mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), - - - # ViT ImageNet-21K-P pretraining by MILL - 'vit_base_patch16_224_miil.in21k': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth', - mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221), - 'vit_base_patch16_224_miil.in21k_ft_1k': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth', - mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear'), - - # custom timm variants - 'vit_base_patch16_rpn_224.in1k': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth'), - 'vit_medium_patch16_gap_240.in12k': _cfg( - url='', - input_size=(3, 240, 240), crop_pct=0.95, num_classes=11821), - 'vit_medium_patch16_gap_256.in12k_ft_1k': _cfg( - url='', - input_size=(3, 256, 256), crop_pct=0.95), - 'vit_medium_patch16_gap_384.in12k_ft_1k': _cfg( - url='', - input_size=(3, 384, 384), crop_pct=0.95), - - # CLIP pretrained image tower and related fine-tuned weights - 'vit_base_patch32_clip_224.laion2b': _cfg( - hf_hub_id='laion/CLIP-ViT-B-32-laion2B-s34B-b79K', - hf_hub_filename='open_clip_pytorch_model.bin', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), - 'vit_large_patch14_clip_224.laion2b': _cfg( - hf_hub_id='laion/CLIP-ViT-L-14-laion2B-s32B-b82K', - hf_hub_filename='open_clip_pytorch_model.bin', - mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=768), - 'vit_huge_patch14_clip_224.laion2b': _cfg( - hf_hub_id='laion/CLIP-ViT-H-14-laion2B-s32B-b79K', - hf_hub_filename='open_clip_pytorch_model.bin', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=1024), - 'vit_giant_patch14_clip_224.laion2b': _cfg( - hf_hub_id='laion/CLIP-ViT-g-14-laion2B-s12B-b42K', - hf_hub_filename='open_clip_pytorch_model.bin', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=1024), - - 'vit_base_patch32_clip_224.laion2b_ft_in1k': _cfg( - hf_hub_id='', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - 'vit_large_patch14_clip_224.laion2b_ft_in1k': _cfg( - hf_hub_id='', - mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), - 'vit_huge_patch14_clip_224.laion2b_ft_in1k': _cfg( - hf_hub_id='', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - - 'vit_base_patch32_clip_224.laion2b_ft_in12k_in1k': _cfg( - hf_hub_id='', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - 'vit_large_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( - hf_hub_id='', - mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), - 'vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( - hf_hub_id='', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - - 'vit_base_patch32_clip_224.laion2b_ft_in12k': _cfg( - hf_hub_id='', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), - 'vit_large_patch14_clip_224.laion2b_ft_in12k': _cfg( - hf_hub_id='', - mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=11821), - 'vit_huge_patch14_clip_224.laion2b_ft_in12k': _cfg( - hf_hub_id='', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), - - 'vit_base_patch32_clip_224.openai': _cfg( - hf_hub_id='timm/clip_vit_base_patch32_224.openai', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), - 'vit_base_patch16_clip_224.openai': _cfg( - hf_hub_id='timm/clip_vit_base_patch16_224.openai', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), - 'vit_large_patch14_clip_224.openai': _cfg( - hf_hub_id='timm/clip_vit_large_patch14_224.openai', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=768), - - 'vit_base_patch32_clip_224.openai_ft_in1k': _cfg( - hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in1k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - 'vit_base_patch16_clip_224.openai_ft_in1k': _cfg( - hf_hub_id='timm/vit_base_patch16_clip_224.openai_ft_in1k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - 'vit_large_patch14_clip_224.openai_ft_in1k': _cfg( - hf_hub_id='timm/vit_large_patch14_clip_224.openai_ft_in1k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - - 'vit_base_patch32_clip_224.openai_ft_in12k_in1k': _cfg( - hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k_in1k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - 'vit_base_patch16_clip_224.openai_ft_in12k_in1k': _cfg( - hf_hub_id='timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - 'vit_large_patch14_clip_224.openai_ft_in12k_in1k': _cfg( - hf_hub_id='timm/vit_large_patch14_clip_224.openai_ft_in12k_in1k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), - - 'vit_base_patch32_clip_224.openai_ft_in12k': _cfg( - hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), - 'vit_base_patch16_clip_224.openai_ft_in12k': _cfg( - hf_hub_id='timm/vit_base_patch16_clip_224.openai_ft_in12k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), - 'vit_large_patch14_clip_224.openai_ft_in12k': _cfg( - hf_hub_id='timm/vit_large_patch14_clip_224.openai_ft_in12k', - mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), - - # experimental (may be removed) - 'vit_base_patch32_plus_256': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), - 'vit_base_patch16_plus_240': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95), - 'vit_small_patch16_36x1_224': _cfg(url=''), - 'vit_small_patch16_18x2_224': _cfg(url=''), - 'vit_base_patch16_18x2_224': _cfg(url=''), -}) - - class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() @@ -850,6 +619,280 @@ def checkpoint_filter_fn(state_dict, model, adapt_layer_scale=False): return out_dict +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_defaults({ + # patch models (weights from official Google JAX impl) + 'vit_tiny_patch16_224.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + custom_load=True), + 'vit_tiny_patch16_384.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch32_224.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + custom_load=True), + 'vit_small_patch32_384.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch16_224.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + custom_load=True), + 'vit_small_patch16_384.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch32_224.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + custom_load=True), + 'vit_base_patch32_384.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_224.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', + custom_load=True), + 'vit_base_patch16_384.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch8_224.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', + custom_load=True), + 'vit_large_patch32_384.v1_in21k_ft_1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch16_224.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', + custom_load=True), + 'vit_large_patch16_384.augreg_in21k_ft_1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + + 'vit_large_patch14_224.untrained': _cfg(url=''), + 'vit_huge_patch14_224.untrained': _cfg(url=''), + 'vit_giant_patch14_224.untrained': _cfg(url=''), + 'vit_gigantic_patch14_224.untrained': _cfg(url=''), + + + # patch models, imagenet21k (weights from official Google JAX impl) + 'vit_tiny_patch16_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + custom_load=True, num_classes=21843), + 'vit_small_patch32_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + custom_load=True, num_classes=21843), + 'vit_small_patch16_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + custom_load=True, num_classes=21843), + 'vit_base_patch32_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', + custom_load=True, num_classes=21843), + 'vit_base_patch16_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + custom_load=True, num_classes=21843), + 'vit_base_patch8_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + custom_load=True, num_classes=21843), + 'vit_large_patch32_224.v1_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', + num_classes=21843), + 'vit_large_patch16_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', + custom_load=True, num_classes=21843), + 'vit_huge_patch14_224.v1_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', + hf_hub_id='timm/vit_huge_patch14_224_in21k', + custom_load=True, num_classes=21843), + + # SAM trained models (https://arxiv.org/abs/2106.01548) + 'vit_base_patch32_224.sam': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz', custom_load=True), + 'vit_base_patch16_224.sam': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz', custom_load=True), + + # DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only) + 'vit_small_patch16_224.dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_small_patch8_224.dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_base_patch16_224.dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_base_patch8_224.dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + + + # ViT ImageNet-21K-P pretraining by MILL + 'vit_base_patch16_224_miil.in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221), + 'vit_base_patch16_224_miil.in21k_ft_1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear'), + + # custom timm variants + 'vit_base_patch16_rpn_224.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth'), + 'vit_medium_patch16_gap_240.in12k': _cfg( + url='', + input_size=(3, 240, 240), crop_pct=0.95, num_classes=11821), + 'vit_medium_patch16_gap_256.in12k_ft_1k': _cfg( + url='', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_medium_patch16_gap_384.in12k_ft_1k': _cfg( + url='', + input_size=(3, 384, 384), crop_pct=0.95), + + # CLIP pretrained image tower and related fine-tuned weights + 'vit_base_patch32_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-B-32-laion2B-s34B-b79K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_base_patch16_clip_224.laion2b': _cfg( + #hf_hub_id='laion/CLIP-ViT-B-16-laion2B-s34B-b88K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_large_patch14_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-L-14-laion2B-s32B-b82K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=768), + 'vit_huge_patch14_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-H-14-laion2B-s32B-b79K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), + 'vit_giant_patch14_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-g-14-laion2B-s12B-b42K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), + + 'vit_base_patch32_clip_224.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/vit_base_patch32_clip_224.laion2b_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch32_clip_384.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/vit_base_patch32_clip_384.laion2b_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), + 'vit_base_patch16_clip_224.laion2b_ft_in1k': _cfg( + #hf_hub_id='timm/vit_base_patch16_clip_224.laion2b_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch16_clip_384.laion2b_ft_in1k': _cfg( + #hf_hub_id='timm/vit_base_patch16_clip_384.laion2b_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), + 'vit_base_patch32_clip_448.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/vit_base_patch32_clip_448.laion2b_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 448, 448)), + 'vit_large_patch14_clip_224.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/vit_large_patch14_clip_224.laion2b_ft_in1k', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), + 'vit_large_patch14_clip_336.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/vit_large_patch14_clip_336.laion2b_ft_in1k', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336)), + 'vit_huge_patch14_clip_224.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/vit_huge_patch14_clip_224.laion2b_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + 'vit_huge_patch14_clip_336.laion2b_ft_in1k': _cfg( + hf_hub_id='', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336)), + + 'vit_base_patch32_clip_224.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/vit_base_patch32_clip_224.laion2b_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch32_clip_384.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/vit_base_patch32_clip_384.laion2b_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), + 'vit_base_patch32_clip_448.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 448, 448)), + 'vit_base_patch16_clip_224.laion2b_ft_in12k_in1k': _cfg( + #hf_hub_id='timm/vit_base_patch16_clip_224.laion2b_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch16_clip_384.laion2b_ft_in12k_in1k': _cfg( + #hf_hub_id='timm/vit_base_patch16_clip_384.laion2b_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), + 'vit_large_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/vit_large_patch14_clip_224.laion2b_ft_in12k_in1k', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), + 'vit_large_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336)), + 'vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + 'vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336)), + + 'vit_base_patch32_clip_224.laion2b_ft_in12k': _cfg( + hf_hub_id='timm/vit_base_patch32_clip_224.laion2b_ft_in12k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), + 'vit_base_patch16_clip_224.laion2b_ft_in12k': _cfg( + #hf_hub_id='timm/vit_base_patch16_clip_224.laion2b_ft_in12k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), + 'vit_large_patch14_clip_224.laion2b_ft_in12k': _cfg( + hf_hub_id='timm/vit_large_patch14_clip_224.laion2b_ft_in12k', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=11821), + 'vit_huge_patch14_clip_224.laion2b_ft_in12k': _cfg( + hf_hub_id='timm/vit_huge_patch14_clip_224.laion2b_ft_in12k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), + + 'vit_base_patch32_clip_224.openai': _cfg( + hf_hub_id='timm/clip_vit_base_patch32_224.openai', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_base_patch16_clip_224.openai': _cfg( + hf_hub_id='timm/clip_vit_base_patch16_224.openai', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_large_patch14_clip_224.openai': _cfg( + hf_hub_id='timm/clip_vit_large_patch14_224.openai', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), + + 'vit_base_patch32_clip_224.openai_ft_in1k': _cfg( + #hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch16_clip_224.openai_ft_in1k': _cfg( + #hf_hub_id='timm/vit_base_patch16_clip_224.openai_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_large_patch14_clip_224.openai_ft_in1k': _cfg( + hf_hub_id='timm/vit_large_patch14_clip_224.openai_ft_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + + 'vit_base_patch32_clip_224.openai_ft_in12k_in1k': _cfg( + #hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch16_clip_224.openai_ft_in12k_in1k': _cfg( + #hf_hub_id='timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_large_patch14_clip_224.openai_ft_in12k_in1k': _cfg( + hf_hub_id='timm/vit_large_patch14_clip_224.openai_ft_in12k_in1k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + + 'vit_base_patch32_clip_224.openai_ft_in12k': _cfg( + #hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), + 'vit_base_patch16_clip_224.openai_ft_in12k': _cfg( + #hf_hub_id='timm/vit_base_patch16_clip_224.openai_ft_in12k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), + 'vit_large_patch14_clip_224.openai_ft_in12k': _cfg( + hf_hub_id='timm/vit_large_patch14_clip_224.openai_ft_in12k', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), + + # experimental (may be removed) + 'vit_base_patch32_plus_256': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), + 'vit_base_patch16_plus_240': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95), + 'vit_small_patch16_36x1_224': _cfg(url=''), + 'vit_small_patch16_18x2_224': _cfg(url=''), + 'vit_base_patch16_18x2_224': _cfg(url=''), +}) + + def _create_vision_transformer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') @@ -1094,8 +1137,7 @@ def vit_medium_patch16_gap_384(pretrained=False, **kwargs): @register_model def vit_base_patch32_clip_224(pretrained=False, **kwargs): - """ ViT-B/32 - Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + """ ViT-B/32 CLIP image tower @ 224x224 """ model_kwargs = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) @@ -1103,10 +1145,29 @@ def vit_base_patch32_clip_224(pretrained=False, **kwargs): return model +@register_model +def vit_base_patch32_clip_384(pretrained=False, **kwargs): + """ ViT-B/32 CLIP image tower @ 384x384 + """ + model_kwargs = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) + model = _create_vision_transformer('vit_base_patch32_clip_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_clip_448(pretrained=False, **kwargs): + """ ViT-B/32 CLIP image tower @ 448x448 + """ + model_kwargs = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) + model = _create_vision_transformer('vit_base_patch32_clip_448', pretrained=pretrained, **model_kwargs) + return model + + @register_model def vit_base_patch16_clip_224(pretrained=False, **kwargs): - """ ViT-B/16 - Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + """ ViT-B/16 CLIP image tower """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) @@ -1116,8 +1177,7 @@ def vit_base_patch16_clip_224(pretrained=False, **kwargs): @register_model def vit_large_patch14_clip_224(pretrained=False, **kwargs): - """ ViT-Large model (ViT-L/14) - Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + """ ViT-Large model (ViT-L/14) CLIP image tower """ model_kwargs = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) @@ -1125,10 +1185,19 @@ def vit_large_patch14_clip_224(pretrained=False, **kwargs): return model +@register_model +def vit_large_patch14_clip_336(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/14) CLIP image tower @ 336x336 + """ + model_kwargs = dict( + patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) + model = _create_vision_transformer('vit_large_patch14_clip_336', pretrained=pretrained, **model_kwargs) + return model + + @register_model def vit_huge_patch14_clip_224(pretrained=False, **kwargs): - """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). - Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + """ ViT-Huge model (ViT-H/14) CLIP image tower. """ model_kwargs = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) @@ -1136,10 +1205,20 @@ def vit_huge_patch14_clip_224(pretrained=False, **kwargs): return model +@register_model +def vit_huge_patch14_clip_336(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) CLIP image tower @ 336x336 + """ + model_kwargs = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_clip_336', pretrained=pretrained, **model_kwargs) + return model + + @register_model def vit_giant_patch14_clip_224(pretrained=False, **kwargs): """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 - Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + Pretrained weights from CLIP image tower. """ model_kwargs = dict( patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, @@ -1211,5 +1290,3 @@ def vit_base_patch16_18x2_224(pretrained=False, **kwargs): patch_size=16, embed_dim=768, depth=18, num_heads=12, init_values=1e-5, block_fn=ParallelBlock, **kwargs) model = _create_vision_transformer('vit_base_patch16_18x2_224', pretrained=pretrained, **model_kwargs) return model - -