Change 21k model naming from _21k to _in21k for consistency with existing 21k models.

pull/647/head
Ross Wightman 4 years ago
parent 2a72d38ba2
commit 7077f16c6a

@ -26,8 +26,8 @@ I'm fortunate to be able to dedicate significant time and money of my own suppor
### May 14, 2021 ### May 14, 2021
* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl. * Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl.
* 1k trained variants: `tf_efficientnetv2_s/m/l` * 1k trained variants: `tf_efficientnetv2_s/m/l`
* 21k trained variants: `tf_efficientnetv2_s/m/l_21k` * 21k trained variants: `tf_efficientnetv2_s/m/l_in21k`
* 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_21ft1k` * 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k`
* v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3` * v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3`
* Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s` * Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s`
* Some blank `efficientnetv2_*` models in-place for future native PyTorch training * Some blank `efficientnetv2_*` models in-place for future native PyTorch training

@ -332,28 +332,28 @@ default_cfgs = {
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
'tf_efficientnetv2_s_21ft1k': _cfg( 'tf_efficientnetv2_s_in21ft1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
'tf_efficientnetv2_m_21ft1k': _cfg( 'tf_efficientnetv2_m_in21ft1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
'tf_efficientnetv2_l_21ft1k': _cfg( 'tf_efficientnetv2_l_in21ft1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
'tf_efficientnetv2_s_21k': _cfg( 'tf_efficientnetv2_s_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
'tf_efficientnetv2_m_21k': _cfg( 'tf_efficientnetv2_m_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
'tf_efficientnetv2_l_21k': _cfg( 'tf_efficientnetv2_l_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
@ -1929,62 +1929,62 @@ def tf_efficientnetv2_l(pretrained=False, **kwargs):
@register_model @register_model
def tf_efficientnetv2_s_21ft1k(pretrained=False, **kwargs): def tf_efficientnetv2_s_in21ft1k(pretrained=False, **kwargs):
""" EfficientNet-V2 Small. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant """ EfficientNet-V2 Small. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant
""" """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same' kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_s('tf_efficientnetv2_s_21ft1k', pretrained=pretrained, **kwargs) model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21ft1k', pretrained=pretrained, **kwargs)
return model return model
@register_model @register_model
def tf_efficientnetv2_m_21ft1k(pretrained=False, **kwargs): def tf_efficientnetv2_m_in21ft1k(pretrained=False, **kwargs):
""" EfficientNet-V2 Medium. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant """ EfficientNet-V2 Medium. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant
""" """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same' kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_m('tf_efficientnetv2_m_21ft1k', pretrained=pretrained, **kwargs) model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21ft1k', pretrained=pretrained, **kwargs)
return model return model
@register_model @register_model
def tf_efficientnetv2_l_21ft1k(pretrained=False, **kwargs): def tf_efficientnetv2_l_in21ft1k(pretrained=False, **kwargs):
""" EfficientNet-V2 Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant """ EfficientNet-V2 Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant
""" """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same' kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_l('tf_efficientnetv2_l_21ft1k', pretrained=pretrained, **kwargs) model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21ft1k', pretrained=pretrained, **kwargs)
return model return model
@register_model @register_model
def tf_efficientnetv2_s_21k(pretrained=False, **kwargs): def tf_efficientnetv2_s_in21k(pretrained=False, **kwargs):
""" EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant """ EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant
""" """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same' kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_s('tf_efficientnetv2_s_21k', pretrained=pretrained, **kwargs) model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21k', pretrained=pretrained, **kwargs)
return model return model
@register_model @register_model
def tf_efficientnetv2_m_21k(pretrained=False, **kwargs): def tf_efficientnetv2_m_in21k(pretrained=False, **kwargs):
""" EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant """ EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant
""" """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same' kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_m('tf_efficientnetv2_m_21k', pretrained=pretrained, **kwargs) model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21k', pretrained=pretrained, **kwargs)
return model return model
@register_model @register_model
def tf_efficientnetv2_l_21k(pretrained=False, **kwargs): def tf_efficientnetv2_l_in21k(pretrained=False, **kwargs):
""" EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant """ EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant
""" """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same' kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_l('tf_efficientnetv2_l_21k', pretrained=pretrained, **kwargs) model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21k', pretrained=pretrained, **kwargs)
return model return model

Loading…
Cancel
Save