From 9a3ae97311d7971c532d19e6262c600fcdd7808d Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Sat, 15 May 2021 22:56:51 -0700 Subject: [PATCH] Another set of byoanet models w/ ECA channel + SA + groups --- timm/models/byoanet.py | 101 +++++++++++++++++++++++++++++++++++++++++ timm/models/byobnet.py | 2 +- 2 files changed, 102 insertions(+), 1 deletion(-) diff --git a/timm/models/byoanet.py b/timm/models/byoanet.py index a58eea63..c179a01c 100644 --- a/timm/models/byoanet.py +++ b/timm/models/byoanet.py @@ -47,17 +47,21 @@ default_cfgs = { # GPU-Efficient (ResNet) weights 'botnet26t_256': _cfg(url='', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'botnet50ts_256': _cfg(url='', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'eca_botnext26ts_256': _cfg(url='', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'halonet_h1': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'halonet_h1_c4c5': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'halonet26t': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'halonet50ts': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'eca_halonext26ts': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'lambda_resnet26t': _cfg(url='', min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), 'lambda_resnet50t': _cfg(url='', min_input_size=(3, 128, 128)), + 'eca_lambda_resnext26ts': _cfg(url='', min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), 'swinnet26t_256': _cfg(url='', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'swinnet50ts_256': _cfg(url='', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'eca_swinnext26ts_256': _cfg(url='', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'rednet26t': _cfg(url='', fixed_input_size=False, input_size=(3, 256, 256), pool_size=(8, 8)), 'rednet50ts': _cfg(url='', fixed_input_size=False, input_size=(3, 256, 256), pool_size=(8, 8)), @@ -129,6 +133,23 @@ model_cfgs = dict( self_attn_fixed_size=True, self_attn_kwargs=dict() ), + eca_botnext26ts=ByoaCfg( + blocks=( + ByoaBlocksCfg(type='bottle', d=3, c=256, s=1, gs=16, br=0.25), + ByoaBlocksCfg(type='bottle', d=4, c=512, s=2, gs=16, br=0.25), + interleave_attn(types=('bottle', 'self_attn'), every=1, d=2, c=1024, s=2, gs=16, br=0.25), + ByoaBlocksCfg(type='self_attn', d=3, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + num_features=0, + act_layer='silu', + attn_layer='eca', + self_attn_layer='bottleneck', + self_attn_fixed_size=True, + self_attn_kwargs=dict() + ), halonet_h1=ByoaCfg( blocks=( @@ -187,6 +208,22 @@ model_cfgs = dict( self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=2) ), + eca_halonext26ts=ByoaCfg( + blocks=( + ByoaBlocksCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoaBlocksCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_attn(types=('bottle', 'self_attn'), every=1, d=2, c=1024, s=2, gs=16, br=0.25), + ByoaBlocksCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + num_features=0, + act_layer='silu', + attn_layer='eca', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2) # intended for 256x256 res + ), lambda_resnet26t=ByoaCfg( blocks=( @@ -216,6 +253,22 @@ model_cfgs = dict( self_attn_layer='lambda', self_attn_kwargs=dict() ), + eca_lambda_resnext26ts=ByoaCfg( + blocks=( + ByoaBlocksCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoaBlocksCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_attn(types=('bottle', 'self_attn'), every=1, d=2, c=1024, s=2, gs=16, br=0.25), + ByoaBlocksCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + num_features=0, + act_layer='silu', + attn_layer='eca', + self_attn_layer='lambda', + self_attn_kwargs=dict() + ), swinnet26t=ByoaCfg( blocks=( @@ -248,6 +301,24 @@ model_cfgs = dict( self_attn_fixed_size=True, self_attn_kwargs=dict(win_size=8) ), + eca_swinnext26ts=ByoaCfg( + blocks=( + ByoaBlocksCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + interleave_attn(types=('bottle', 'self_attn'), every=1, d=2, c=512, s=2, gs=16, br=0.25), + interleave_attn(types=('bottle', 'self_attn'), every=1, d=2, c=1024, s=2, gs=16, br=0.25), + ByoaBlocksCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + num_features=0, + act_layer='silu', + attn_layer='eca', + self_attn_layer='swin', + self_attn_fixed_size=True, + self_attn_kwargs=dict(win_size=8) + ), + rednet26t=ByoaCfg( blocks=( @@ -454,6 +525,14 @@ def botnet50ts_256(pretrained=False, **kwargs): return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) +@register_model +def eca_botnext26ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone. Bottleneck attn in final stage. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) + + @register_model def halonet_h1(pretrained=False, **kwargs): """ HaloNet-H1. Halo attention in all stages as per the paper. @@ -484,6 +563,13 @@ def halonet50ts(pretrained=False, **kwargs): return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) +@register_model +def eca_halonext26ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone, Hallo attention in final stage + """ + return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) + + @register_model def lambda_resnet26t(pretrained=False, **kwargs): """ Lambda-ResNet-26T. Lambda layers in one C4 stage and all C5. @@ -498,6 +584,13 @@ def lambda_resnet50t(pretrained=False, **kwargs): return _create_byoanet('lambda_resnet50t', pretrained=pretrained, **kwargs) +@register_model +def eca_lambda_resnext26ts(pretrained=False, **kwargs): + """ Lambda-ResNet-26T. Lambda layers in one C4 stage and all C5. + """ + return _create_byoanet('eca_lambda_resnext26ts', pretrained=pretrained, **kwargs) + + @register_model def swinnet26t_256(pretrained=False, **kwargs): """ @@ -514,6 +607,14 @@ def swinnet50ts_256(pretrained=False, **kwargs): return _create_byoanet('swinnet50ts_256', 'swinnet50ts', pretrained=pretrained, **kwargs) +@register_model +def eca_swinnext26ts_256(pretrained=False, **kwargs): + """ + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('eca_swinnext26ts_256', 'eca_swinnext26ts', pretrained=pretrained, **kwargs) + + @register_model def rednet26t(pretrained=False, **kwargs): """ diff --git a/timm/models/byobnet.py b/timm/models/byobnet.py index 75610f67..8f4a2020 100644 --- a/timm/models/byobnet.py +++ b/timm/models/byobnet.py @@ -98,7 +98,7 @@ class BlocksCfg: s: int = 2 # stride of stage (first block) gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 br: float = 1. # bottleneck-ratio of blocks in stage - no_attn: bool = True # disable channel attn (ie SE) when layer is set for model + no_attn: bool = False # disable channel attn (ie SE) when layer is set for model @dataclass