BotNet models were still off, remove weights for bad configs. Add good SE-HaloNet33-TS weights.

pull/821/head
Ross Wightman 3 years ago
parent 24720abe3b
commit cf5ac2800c

@ -36,22 +36,22 @@ default_cfgs = {
'botnet26t_256': _cfg( 'botnet26t_256': _cfg(
url='', url='',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
'botnet50t_256': _cfg( 'botnet50ts_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet50t_256-a0e6c3b1.pth', url='',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
'eca_botnext26ts_256': _cfg( 'eca_botnext26ts_256': _cfg(
url='', url='',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
'eca_botnext50ts_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_botnext26ts_256-fb3bf984.pth',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
'halonet_h1': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'halonet_h1': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)),
'halonet26t': _cfg( 'halonet26t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_256-9b4bf0b3.pth', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_256-9b4bf0b3.pth',
input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)),
'sehalonet33ts': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'sehalonet33ts': _cfg(
'halonet50ts': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth',
input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94),
'halonet50ts': _cfg(
url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)),
'eca_halonext26ts': _cfg( 'eca_halonext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_256-1e55880b.pth', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_256-1e55880b.pth',
input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)),
@ -78,16 +78,17 @@ model_cfgs = dict(
self_attn_layer='bottleneck', self_attn_layer='bottleneck',
self_attn_kwargs=dict() self_attn_kwargs=dict()
), ),
botnet50t=ByoModelCfg( botnet50ts=ByoModelCfg(
blocks=( blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25),
ByoBlockCfg(type='self_attn', d=3, c=2048, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25),
), ),
stem_chs=64, stem_chs=64,
stem_type='tiered', stem_type='tiered',
stem_pool='maxpool', stem_pool='maxpool',
act_layer='silu',
fixed_input_size=True, fixed_input_size=True,
self_attn_layer='bottleneck', self_attn_layer='bottleneck',
self_attn_kwargs=dict() self_attn_kwargs=dict()
@ -108,22 +109,6 @@ model_cfgs = dict(
self_attn_layer='bottleneck', self_attn_layer='bottleneck',
self_attn_kwargs=dict() self_attn_kwargs=dict()
), ),
eca_botnext50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=16, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=16, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25),
ByoBlockCfg(type='self_attn', d=3, c=2048, s=2, gs=16, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
fixed_input_size=True,
act_layer='silu',
attn_layer='eca',
self_attn_layer='bottleneck',
self_attn_kwargs=dict()
),
halonet_h1=ByoModelCfg( halonet_h1=ByoModelCfg(
blocks=( blocks=(
@ -227,38 +212,31 @@ def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs):
@register_model @register_model
def botnet26t_256(pretrained=False, **kwargs): def botnet26t_256(pretrained=False, **kwargs):
""" Bottleneck Transformer w/ ResNet26-T backbone. Bottleneck attn in final two stages. """ Bottleneck Transformer w/ ResNet26-T backbone.
FIXME 26t variant was mixed up with 50t arch cfg, retraining and determining why so low NOTE: this isn't performing well, may remove
""" """
kwargs.setdefault('img_size', 256) kwargs.setdefault('img_size', 256)
return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs)
@register_model @register_model
def botnet50t_256(pretrained=False, **kwargs): def botnet50ts_256(pretrained=False, **kwargs):
""" Bottleneck Transformer w/ ResNet50-T backbone. Bottleneck attn in final two stages. """ Bottleneck Transformer w/ ResNet50-T backbone, silu act.
NOTE: this isn't performing well, may remove
""" """
kwargs.setdefault('img_size', 256) kwargs.setdefault('img_size', 256)
return _create_byoanet('botnet50t_256', 'botnet50t', pretrained=pretrained, **kwargs) return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs)
@register_model @register_model
def eca_botnext26ts_256(pretrained=False, **kwargs): def eca_botnext26ts_256(pretrained=False, **kwargs):
""" Bottleneck Transformer w/ ResNet26-T backbone, silu act, Bottleneck attn in final two stages. """ Bottleneck Transformer w/ ResNet26-T backbone, silu act.
FIXME 26ts variant was mixed up with 50ts arch cfg, retraining and determining why so low NOTE: this isn't performing well, may remove
""" """
kwargs.setdefault('img_size', 256) kwargs.setdefault('img_size', 256)
return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs)
@register_model
def eca_botnext50ts_256(pretrained=False, **kwargs):
""" Bottleneck Transformer w/ ResNet26-T backbone, silu act, Bottleneck attn in final two stages.
"""
kwargs.setdefault('img_size', 256)
return _create_byoanet('eca_botnext50ts_256', 'eca_botnext50ts', pretrained=pretrained, **kwargs)
@register_model @register_model
def halonet_h1(pretrained=False, **kwargs): def halonet_h1(pretrained=False, **kwargs):
""" HaloNet-H1. Halo attention in all stages as per the paper. """ HaloNet-H1. Halo attention in all stages as per the paper.

Loading…
Cancel
Save