diff --git a/timm/models/mobilenetv3.py b/timm/models/mobilenetv3.py index 42395388..543b33ea 100644 --- a/timm/models/mobilenetv3.py +++ b/timm/models/mobilenetv3.py @@ -39,10 +39,10 @@ default_cfgs = { 'mobilenetv3_large_100': _cfg( interpolation='bicubic', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'), - 'mobilenetv3_large_100_1k_miil_78_0': _cfg( + 'mobilenetv3_large_100_miil': _cfg( interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_1k_miil_78_0.pth'), - 'mobilenetv3_large_100_in21k_miil': _cfg( + 'mobilenetv3_large_100_miil_in21k': _cfg( interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_in21k_miil.pth', num_classes=11221), 'mobilenetv3_small_075': _cfg(url=''), @@ -374,20 +374,20 @@ def mobilenetv3_large_100(pretrained=False, **kwargs): @register_model -def mobilenetv3_large_100_1k_miil(pretrained=False, **kwargs): +def mobilenetv3_large_100_miil(pretrained=False, **kwargs): """ MobileNet V3 Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K """ - model = _gen_mobilenet_v3('mobilenetv3_large_100_1k_miil_78_0', 1.0, pretrained=pretrained, **kwargs) + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil', 1.0, pretrained=pretrained, **kwargs) return model @register_model -def mobilenetv3_large_100_in21k_miil(pretrained=False, **kwargs): +def mobilenetv3_large_100_miil_in21k(pretrained=False, **kwargs): """ MobileNet V3, 21k pretraining Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K """ - model = _gen_mobilenet_v3('mobilenetv3_large_100_in21k_miil', 1.0, pretrained=pretrained, **kwargs) + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil_in21k', 1.0, pretrained=pretrained, **kwargs) return model diff --git a/timm/models/tresnet.py b/timm/models/tresnet.py index 20a9cc96..ee1f3fc1 100644 --- a/timm/models/tresnet.py +++ b/timm/models/tresnet.py @@ -33,7 +33,7 @@ def _cfg(url='', **kwargs): default_cfgs = { 'tresnet_m': _cfg( url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_1k_miil_83_1.pth'), - 'tresnet_m_in21k_miil': _cfg( + 'tresnet_m_miil_in21k': _cfg( url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_miil_in21k.pth', num_classes=11221), 'tresnet_l': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth'), @@ -266,10 +266,12 @@ def tresnet_m(pretrained=False, **kwargs): model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) + @register_model -def tresnet_m_in21k_miil(pretrained=False, **kwargs): +def tresnet_m_miil_in21k(pretrained=False, **kwargs): model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) - return _create_tresnet('tresnet_m_in21k_miil', pretrained=pretrained, **model_kwargs) + return _create_tresnet('tresnet_m_miil_in21k', pretrained=pretrained, **model_kwargs) + @register_model def tresnet_l(pretrained=False, **kwargs): diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py index e55a9ca3..4bf1dec5 100644 --- a/timm/models/vision_transformer.py +++ b/timm/models/vision_transformer.py @@ -120,11 +120,11 @@ default_cfgs = { input_size=(3, 384, 384), crop_pct=1.0, classifier=('head', 'head_dist')), # ViT ImageNet-21K-P pretraining - 'vit_base_patch16_224_in21k_miil': _cfg( + 'vit_base_patch16_224_miil_in21k': _cfg( url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth', mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, ), - 'vit_base_patch16_224_1k_miil': _cfg( + 'vit_base_patch16_224_miil': _cfg( url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm' '/vit_base_patch16_224_1k_miil_84_4.pth', mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', @@ -699,20 +699,22 @@ def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs): 'vit_deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs) return model + @register_model -def vit_base_patch16_224_in21k_miil(pretrained=False, **kwargs): +def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs): """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) - model = _create_vision_transformer('vit_base_patch16_224_in21k_miil', pretrained=pretrained, **model_kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs) return model + @register_model -def vit_base_patch16_224_1k_miil(pretrained=False, **kwargs): +def vit_base_patch16_224_miil(pretrained=False, **kwargs): """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) - model = _create_vision_transformer('vit_base_patch16_224_1k_miil', pretrained=pretrained, **model_kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs) return model \ No newline at end of file