Merge branch 'master' into norm_norm_norm

pull/1014/head
Ross Wightman 3 years ago
commit de5fa791c6

@ -48,4 +48,5 @@ jobs:
env: env:
LD_PRELOAD: /usr/lib/x86_64-linux-gnu/libtcmalloc.so.4 LD_PRELOAD: /usr/lib/x86_64-linux-gnu/libtcmalloc.so.4
run: | run: |
export PYTHONDONTWRITEBYTECODE=1
pytest -vv --forked --durations=0 ./tests pytest -vv --forked --durations=0 ./tests

@ -103,15 +103,16 @@ class RepeatAugSampler(Sampler):
g = torch.Generator() g = torch.Generator()
g.manual_seed(self.epoch) g.manual_seed(self.epoch)
if self.shuffle: if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist() indices = torch.randperm(len(self.dataset), generator=g)
else: else:
indices = list(range(len(self.dataset))) indices = torch.arange(start=0, end=len(self.dataset))
# produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....] # produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....]
indices = [x for x in indices for _ in range(self.num_repeats)] indices = torch.repeat_interleave(indices, repeats=self.num_repeats, dim=0)
# add extra samples to make it evenly divisible # add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices) padding_size = self.total_size - len(indices)
indices += indices[:padding_size] if padding_size > 0:
indices = torch.cat([indices, indices[:padding_size]], dim=0)
assert len(indices) == self.total_size assert len(indices) == self.total_size
# subsample per rank # subsample per rank

@ -23,6 +23,10 @@ An implementation of EfficienNet that covers variety of related models with effi
* Single-Path NAS Pixel1 * Single-Path NAS Pixel1
- Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877
* TinyNet
- Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819
- Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch
* And likely more... * And likely more...
The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available
@ -427,11 +431,27 @@ default_cfgs = {
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'), url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'),
'tf_mixnet_l': _cfg( 'tf_mixnet_l': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'), url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'),
"tinynet_a": _cfg(
input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth'),
"tinynet_b": _cfg(
input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth'),
"tinynet_c": _cfg(
input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth'),
"tinynet_d": _cfg(
input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth'),
"tinynet_e": _cfg(
input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth'),
} }
class EfficientNet(nn.Module): class EfficientNet(nn.Module):
""" (Generic) EfficientNet """ EfficientNet
A flexible and performant PyTorch implementation of efficient network architectures, including: A flexible and performant PyTorch implementation of efficient network architectures, including:
* EfficientNet-V2 Small, Medium, Large, XL & B0-B3 * EfficientNet-V2 Small, Medium, Large, XL & B0-B3
@ -443,7 +463,7 @@ class EfficientNet(nn.Module):
* MobileNet-V2 * MobileNet-V2
* FBNet C * FBNet C
* Single-Path NAS Pixel1 * Single-Path NAS Pixel1
* TinyNet
""" """
def __init__(self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False, def __init__(self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False,
@ -1160,6 +1180,31 @@ def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrai
return model return model
def _gen_tinynet(
variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs
):
"""Creates a TinyNet model.
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'),
num_features=max(1280, round_channels(1280, model_width, 8, None)),
stem_size=32,
fix_stem=True,
round_chs_fn=partial(round_channels, multiplier=model_width),
act_layer=resolve_act_layer(kwargs, 'swish'),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
@register_model @register_model
def mnasnet_050(pretrained=False, **kwargs): def mnasnet_050(pretrained=False, **kwargs):
""" MNASNet B1, depth multiplier of 0.5. """ """ MNASNet B1, depth multiplier of 0.5. """
@ -2298,3 +2343,33 @@ def tf_mixnet_l(pretrained=False, **kwargs):
model = _gen_mixnet_m( model = _gen_mixnet_m(
'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model return model
@register_model
def tinynet_a(pretrained=False, **kwargs):
model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_b(pretrained=False, **kwargs):
model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_c(pretrained=False, **kwargs):
model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_d(pretrained=False, **kwargs):
model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_e(pretrained=False, **kwargs):
model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs)
return model

@ -167,14 +167,14 @@ class Visformer(nn.Module):
self.patch_embed1 = PatchEmbed( self.patch_embed1 = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, img_size=img_size, patch_size=patch_size, in_chans=in_chans,
embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) embed_dim=embed_dim, norm_layer=embed_norm, flatten=False)
img_size = [x // 16 for x in img_size] img_size = [x // patch_size for x in img_size]
else: else:
if self.init_channels is None: if self.init_channels is None:
self.stem = None self.stem = None
self.patch_embed1 = PatchEmbed( self.patch_embed1 = PatchEmbed(
img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans,
embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False)
img_size = [x // 8 for x in img_size] img_size = [x // (patch_size // 2) for x in img_size]
else: else:
self.stem = nn.Sequential( self.stem = nn.Sequential(
nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False),
@ -185,7 +185,7 @@ class Visformer(nn.Module):
self.patch_embed1 = PatchEmbed( self.patch_embed1 = PatchEmbed(
img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels,
embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False)
img_size = [x // 4 for x in img_size] img_size = [x // (patch_size // 4) for x in img_size]
if self.pos_embed: if self.pos_embed:
if self.vit_stem: if self.vit_stem:
@ -207,7 +207,7 @@ class Visformer(nn.Module):
self.patch_embed2 = PatchEmbed( self.patch_embed2 = PatchEmbed(
img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2,
embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) embed_dim=embed_dim, norm_layer=embed_norm, flatten=False)
img_size = [x // 2 for x in img_size] img_size = [x // (patch_size // 8) for x in img_size]
if self.pos_embed: if self.pos_embed:
self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
self.stage2 = nn.ModuleList([ self.stage2 = nn.ModuleList([
@ -224,7 +224,7 @@ class Visformer(nn.Module):
self.patch_embed3 = PatchEmbed( self.patch_embed3 = PatchEmbed(
img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim,
embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False) embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False)
img_size = [x // 2 for x in img_size] img_size = [x // (patch_size // 8) for x in img_size]
if self.pos_embed: if self.pos_embed:
self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size))
self.stage3 = nn.ModuleList([ self.stage3 = nn.ModuleList([

Loading…
Cancel
Save