commit
b57a03bd0d
@ -0,0 +1,353 @@
|
||||
""" ConViT Model
|
||||
|
||||
@article{d2021convit,
|
||||
title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases},
|
||||
author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent},
|
||||
journal={arXiv preprint arXiv:2103.10697},
|
||||
year={2021}
|
||||
}
|
||||
|
||||
Paper link: https://arxiv.org/abs/2103.10697
|
||||
Original code: https://github.com/facebookresearch/convit, original copyright below
|
||||
"""
|
||||
# Copyright (c) 2015-present, Facebook, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the CC-by-NC license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
#
|
||||
'''These modules are adapted from those of timm, see
|
||||
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
|
||||
'''
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from functools import partial
|
||||
import torch.nn.functional as F
|
||||
|
||||
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
|
||||
from .helpers import build_model_with_cfg
|
||||
from .layers import DropPath, to_2tuple, trunc_normal_, PatchEmbed, Mlp
|
||||
from .registry import register_model
|
||||
from .vision_transformer_hybrid import HybridEmbed
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def _cfg(url='', **kwargs):
|
||||
return {
|
||||
'url': url,
|
||||
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
|
||||
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
|
||||
'first_conv': 'patch_embed.proj', 'classifier': 'head',
|
||||
**kwargs
|
||||
}
|
||||
|
||||
|
||||
default_cfgs = {
|
||||
# ConViT
|
||||
'convit_tiny': _cfg(
|
||||
url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth"),
|
||||
'convit_small': _cfg(
|
||||
url="https://dl.fbaipublicfiles.com/convit/convit_small.pth"),
|
||||
'convit_base': _cfg(
|
||||
url="https://dl.fbaipublicfiles.com/convit/convit_base.pth")
|
||||
}
|
||||
|
||||
|
||||
class GPSA(nn.Module):
|
||||
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
|
||||
locality_strength=1.):
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
self.dim = dim
|
||||
head_dim = dim // num_heads
|
||||
self.scale = qk_scale or head_dim ** -0.5
|
||||
self.locality_strength = locality_strength
|
||||
|
||||
self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias)
|
||||
self.v = nn.Linear(dim, dim, bias=qkv_bias)
|
||||
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
self.pos_proj = nn.Linear(3, num_heads)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
self.locality_strength = locality_strength
|
||||
self.gating_param = nn.Parameter(torch.ones(self.num_heads))
|
||||
self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None
|
||||
|
||||
def forward(self, x):
|
||||
B, N, C = x.shape
|
||||
if self.rel_indices is None or self.rel_indices.shape[1] != N:
|
||||
self.rel_indices = self.get_rel_indices(N)
|
||||
attn = self.get_attention(x)
|
||||
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
def get_attention(self, x):
|
||||
B, N, C = x.shape
|
||||
qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
||||
q, k = qk[0], qk[1]
|
||||
pos_score = self.rel_indices.expand(B, -1, -1, -1)
|
||||
pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2)
|
||||
patch_score = (q @ k.transpose(-2, -1)) * self.scale
|
||||
patch_score = patch_score.softmax(dim=-1)
|
||||
pos_score = pos_score.softmax(dim=-1)
|
||||
|
||||
gating = self.gating_param.view(1, -1, 1, 1)
|
||||
attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
|
||||
attn /= attn.sum(dim=-1).unsqueeze(-1)
|
||||
attn = self.attn_drop(attn)
|
||||
return attn
|
||||
|
||||
def get_attention_map(self, x, return_map=False):
|
||||
attn_map = self.get_attention(x).mean(0) # average over batch
|
||||
distances = self.rel_indices.squeeze()[:, :, -1] ** .5
|
||||
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0)
|
||||
if return_map:
|
||||
return dist, attn_map
|
||||
else:
|
||||
return dist
|
||||
|
||||
def local_init(self):
|
||||
self.v.weight.data.copy_(torch.eye(self.dim))
|
||||
locality_distance = 1 # max(1,1/locality_strength**.5)
|
||||
|
||||
kernel_size = int(self.num_heads ** .5)
|
||||
center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2
|
||||
for h1 in range(kernel_size):
|
||||
for h2 in range(kernel_size):
|
||||
position = h1 + kernel_size * h2
|
||||
self.pos_proj.weight.data[position, 2] = -1
|
||||
self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance
|
||||
self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance
|
||||
self.pos_proj.weight.data *= self.locality_strength
|
||||
|
||||
def get_rel_indices(self, num_patches: int) -> torch.Tensor:
|
||||
img_size = int(num_patches ** .5)
|
||||
rel_indices = torch.zeros(1, num_patches, num_patches, 3)
|
||||
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
|
||||
indx = ind.repeat(img_size, img_size)
|
||||
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
|
||||
indd = indx ** 2 + indy ** 2
|
||||
rel_indices[:, :, :, 2] = indd.unsqueeze(0)
|
||||
rel_indices[:, :, :, 1] = indy.unsqueeze(0)
|
||||
rel_indices[:, :, :, 0] = indx.unsqueeze(0)
|
||||
device = self.qk.weight.device
|
||||
return rel_indices.to(device)
|
||||
|
||||
|
||||
class MHSA(nn.Module):
|
||||
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
self.scale = qk_scale or head_dim ** -0.5
|
||||
|
||||
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
|
||||
def get_attention_map(self, x, return_map=False):
|
||||
B, N, C = x.shape
|
||||
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
||||
q, k, v = qkv[0], qkv[1], qkv[2]
|
||||
attn_map = (q @ k.transpose(-2, -1)) * self.scale
|
||||
attn_map = attn_map.softmax(dim=-1).mean(0)
|
||||
|
||||
img_size = int(N ** .5)
|
||||
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
|
||||
indx = ind.repeat(img_size, img_size)
|
||||
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
|
||||
indd = indx ** 2 + indy ** 2
|
||||
distances = indd ** .5
|
||||
distances = distances.to('cuda')
|
||||
|
||||
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N
|
||||
if return_map:
|
||||
return dist, attn_map
|
||||
else:
|
||||
return dist
|
||||
|
||||
def forward(self, x):
|
||||
B, N, C = x.shape
|
||||
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
||||
q, k, v = qkv[0], qkv[1], qkv[2]
|
||||
|
||||
attn = (q @ k.transpose(-2, -1)) * self.scale
|
||||
attn = attn.softmax(dim=-1)
|
||||
attn = self.attn_drop(attn)
|
||||
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
|
||||
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
||||
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
|
||||
super().__init__()
|
||||
self.norm1 = norm_layer(dim)
|
||||
self.use_gpsa = use_gpsa
|
||||
if self.use_gpsa:
|
||||
self.attn = GPSA(
|
||||
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
|
||||
proj_drop=drop, **kwargs)
|
||||
else:
|
||||
self.attn = MHSA(
|
||||
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
|
||||
proj_drop=drop, **kwargs)
|
||||
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
self.norm2 = norm_layer(dim)
|
||||
mlp_hidden_dim = int(dim * mlp_ratio)
|
||||
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
||||
|
||||
def forward(self, x):
|
||||
x = x + self.drop_path(self.attn(self.norm1(x)))
|
||||
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
||||
return x
|
||||
|
||||
|
||||
class ConViT(nn.Module):
|
||||
""" Vision Transformer with support for patch or hybrid CNN input stage
|
||||
"""
|
||||
|
||||
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
|
||||
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
|
||||
drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, global_pool=None,
|
||||
local_up_to_layer=3, locality_strength=1., use_pos_embed=True):
|
||||
super().__init__()
|
||||
embed_dim *= num_heads
|
||||
self.num_classes = num_classes
|
||||
self.local_up_to_layer = local_up_to_layer
|
||||
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
||||
self.locality_strength = locality_strength
|
||||
self.use_pos_embed = use_pos_embed
|
||||
|
||||
if hybrid_backbone is not None:
|
||||
self.patch_embed = HybridEmbed(
|
||||
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
|
||||
else:
|
||||
self.patch_embed = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
||||
num_patches = self.patch_embed.num_patches
|
||||
self.num_patches = num_patches
|
||||
|
||||
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
||||
self.pos_drop = nn.Dropout(p=drop_rate)
|
||||
|
||||
if self.use_pos_embed:
|
||||
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
|
||||
trunc_normal_(self.pos_embed, std=.02)
|
||||
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
||||
self.blocks = nn.ModuleList([
|
||||
Block(
|
||||
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
||||
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
||||
use_gpsa=True,
|
||||
locality_strength=locality_strength)
|
||||
if i < local_up_to_layer else
|
||||
Block(
|
||||
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
||||
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
||||
use_gpsa=False)
|
||||
for i in range(depth)])
|
||||
self.norm = norm_layer(embed_dim)
|
||||
|
||||
# Classifier head
|
||||
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
|
||||
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
||||
|
||||
trunc_normal_(self.cls_token, std=.02)
|
||||
self.apply(self._init_weights)
|
||||
for n, m in self.named_modules():
|
||||
if hasattr(m, 'local_init'):
|
||||
m.local_init()
|
||||
|
||||
def _init_weights(self, m):
|
||||
if isinstance(m, nn.Linear):
|
||||
trunc_normal_(m.weight, std=.02)
|
||||
if isinstance(m, nn.Linear) and m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.LayerNorm):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
||||
|
||||
@torch.jit.ignore
|
||||
def no_weight_decay(self):
|
||||
return {'pos_embed', 'cls_token'}
|
||||
|
||||
def get_classifier(self):
|
||||
return self.head
|
||||
|
||||
def reset_classifier(self, num_classes, global_pool=''):
|
||||
self.num_classes = num_classes
|
||||
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
||||
|
||||
def forward_features(self, x):
|
||||
B = x.shape[0]
|
||||
x = self.patch_embed(x)
|
||||
|
||||
cls_tokens = self.cls_token.expand(B, -1, -1)
|
||||
|
||||
if self.use_pos_embed:
|
||||
x = x + self.pos_embed
|
||||
x = self.pos_drop(x)
|
||||
|
||||
for u, blk in enumerate(self.blocks):
|
||||
if u == self.local_up_to_layer:
|
||||
x = torch.cat((cls_tokens, x), dim=1)
|
||||
x = blk(x)
|
||||
|
||||
x = self.norm(x)
|
||||
return x[:, 0]
|
||||
|
||||
def forward(self, x):
|
||||
x = self.forward_features(x)
|
||||
x = self.head(x)
|
||||
return x
|
||||
|
||||
|
||||
def _create_convit(variant, pretrained=False, **kwargs):
|
||||
if kwargs.get('features_only', None):
|
||||
raise RuntimeError('features_only not implemented for Vision Transformer models.')
|
||||
|
||||
return build_model_with_cfg(
|
||||
ConViT, variant, pretrained,
|
||||
default_cfg=default_cfgs[variant],
|
||||
**kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def convit_tiny(pretrained=False, **kwargs):
|
||||
model_args = dict(
|
||||
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
|
||||
num_heads=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
||||
model = _create_convit(variant='convit_tiny', pretrained=pretrained, **model_args)
|
||||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def convit_small(pretrained=False, **kwargs):
|
||||
model_args = dict(
|
||||
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
|
||||
num_heads=9, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
||||
model = _create_convit(variant='convit_small', pretrained=pretrained, **model_args)
|
||||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def convit_base(pretrained=False, **kwargs):
|
||||
model_args = dict(
|
||||
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
|
||||
num_heads=16, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
||||
model = _create_convit(variant='convit_base', pretrained=pretrained, **model_args)
|
||||
return model
|
@ -1,22 +0,0 @@
|
||||
from .bottleneck_attn import BottleneckAttn
|
||||
from .halo_attn import HaloAttn
|
||||
from .lambda_layer import LambdaLayer
|
||||
from .swin_attn import WindowAttention
|
||||
|
||||
|
||||
def get_self_attn(attn_type):
|
||||
if attn_type == 'bottleneck':
|
||||
return BottleneckAttn
|
||||
elif attn_type == 'halo':
|
||||
return HaloAttn
|
||||
elif attn_type == 'lambda':
|
||||
return LambdaLayer
|
||||
elif attn_type == 'swin':
|
||||
return WindowAttention
|
||||
else:
|
||||
assert False, f"Unknown attn type ({attn_type})"
|
||||
|
||||
|
||||
def create_self_attn(attn_type, dim, stride=1, **kwargs):
|
||||
attn_fn = get_self_attn(attn_type)
|
||||
return attn_fn(dim, stride=stride, **kwargs)
|
@ -0,0 +1,90 @@
|
||||
""" Gather-Excite Attention Block
|
||||
|
||||
Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348
|
||||
|
||||
Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet
|
||||
|
||||
I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another
|
||||
impl that covers all of the cases.
|
||||
|
||||
NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation
|
||||
|
||||
Hacked together by / Copyright 2021 Ross Wightman
|
||||
"""
|
||||
import math
|
||||
|
||||
from torch import nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .create_act import create_act_layer, get_act_layer
|
||||
from .create_conv2d import create_conv2d
|
||||
from .helpers import make_divisible
|
||||
from .mlp import ConvMlp
|
||||
|
||||
|
||||
class GatherExcite(nn.Module):
|
||||
""" Gather-Excite Attention Module
|
||||
"""
|
||||
def __init__(
|
||||
self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True,
|
||||
rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False,
|
||||
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'):
|
||||
super(GatherExcite, self).__init__()
|
||||
self.add_maxpool = add_maxpool
|
||||
act_layer = get_act_layer(act_layer)
|
||||
self.extent = extent
|
||||
if extra_params:
|
||||
self.gather = nn.Sequential()
|
||||
if extent == 0:
|
||||
assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params'
|
||||
self.gather.add_module(
|
||||
'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True))
|
||||
if norm_layer:
|
||||
self.gather.add_module(f'norm1', nn.BatchNorm2d(channels))
|
||||
else:
|
||||
assert extent % 2 == 0
|
||||
num_conv = int(math.log2(extent))
|
||||
for i in range(num_conv):
|
||||
self.gather.add_module(
|
||||
f'conv{i + 1}',
|
||||
create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True))
|
||||
if norm_layer:
|
||||
self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels))
|
||||
if i != num_conv - 1:
|
||||
self.gather.add_module(f'act{i + 1}', act_layer(inplace=True))
|
||||
else:
|
||||
self.gather = None
|
||||
if self.extent == 0:
|
||||
self.gk = 0
|
||||
self.gs = 0
|
||||
else:
|
||||
assert extent % 2 == 0
|
||||
self.gk = self.extent * 2 - 1
|
||||
self.gs = self.extent
|
||||
|
||||
if not rd_channels:
|
||||
rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
|
||||
self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity()
|
||||
self.gate = create_act_layer(gate_layer)
|
||||
|
||||
def forward(self, x):
|
||||
size = x.shape[-2:]
|
||||
if self.gather is not None:
|
||||
x_ge = self.gather(x)
|
||||
else:
|
||||
if self.extent == 0:
|
||||
# global extent
|
||||
x_ge = x.mean(dim=(2, 3), keepdims=True)
|
||||
if self.add_maxpool:
|
||||
# experimental codepath, may remove or change
|
||||
x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True)
|
||||
else:
|
||||
x_ge = F.avg_pool2d(
|
||||
x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False)
|
||||
if self.add_maxpool:
|
||||
# experimental codepath, may remove or change
|
||||
x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2)
|
||||
x_ge = self.mlp(x_ge)
|
||||
if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1:
|
||||
x_ge = F.interpolate(x_ge, size=size)
|
||||
return x * self.gate(x_ge)
|
@ -0,0 +1,67 @@
|
||||
""" Global Context Attention Block
|
||||
|
||||
Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond`
|
||||
- https://arxiv.org/abs/1904.11492
|
||||
|
||||
Official code consulted as reference: https://github.com/xvjiarui/GCNet
|
||||
|
||||
Hacked together by / Copyright 2021 Ross Wightman
|
||||
"""
|
||||
from torch import nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .create_act import create_act_layer, get_act_layer
|
||||
from .helpers import make_divisible
|
||||
from .mlp import ConvMlp
|
||||
from .norm import LayerNorm2d
|
||||
|
||||
|
||||
class GlobalContext(nn.Module):
|
||||
|
||||
def __init__(self, channels, use_attn=True, fuse_add=True, fuse_scale=False, init_last_zero=False,
|
||||
rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'):
|
||||
super(GlobalContext, self).__init__()
|
||||
act_layer = get_act_layer(act_layer)
|
||||
|
||||
self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None
|
||||
|
||||
if rd_channels is None:
|
||||
rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
|
||||
if fuse_add:
|
||||
self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
|
||||
else:
|
||||
self.mlp_add = None
|
||||
if fuse_scale:
|
||||
self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
|
||||
else:
|
||||
self.mlp_scale = None
|
||||
|
||||
self.gate = create_act_layer(gate_layer)
|
||||
self.init_last_zero = init_last_zero
|
||||
self.reset_parameters()
|
||||
|
||||
def reset_parameters(self):
|
||||
if self.conv_attn is not None:
|
||||
nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu')
|
||||
if self.mlp_add is not None:
|
||||
nn.init.zeros_(self.mlp_add.fc2.weight)
|
||||
|
||||
def forward(self, x):
|
||||
B, C, H, W = x.shape
|
||||
|
||||
if self.conv_attn is not None:
|
||||
attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W)
|
||||
attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1)
|
||||
context = x.reshape(B, C, H * W).unsqueeze(1) @ attn
|
||||
context = context.view(B, C, 1, 1)
|
||||
else:
|
||||
context = x.mean(dim=(2, 3), keepdim=True)
|
||||
|
||||
if self.mlp_scale is not None:
|
||||
mlp_x = self.mlp_scale(context)
|
||||
x = x * self.gate(mlp_x)
|
||||
if self.mlp_add is not None:
|
||||
mlp_x = self.mlp_add(context)
|
||||
x = x + mlp_x
|
||||
|
||||
return x
|
@ -0,0 +1,50 @@
|
||||
""" PyTorch Involution Layer
|
||||
|
||||
Official impl: https://github.com/d-li14/involution/blob/main/cls/mmcls/models/utils/involution_naive.py
|
||||
Paper: `Involution: Inverting the Inherence of Convolution for Visual Recognition` - https://arxiv.org/abs/2103.06255
|
||||
"""
|
||||
import torch.nn as nn
|
||||
from .conv_bn_act import ConvBnAct
|
||||
from .create_conv2d import create_conv2d
|
||||
|
||||
|
||||
class Involution(nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
channels,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
group_size=16,
|
||||
rd_ratio=4,
|
||||
norm_layer=nn.BatchNorm2d,
|
||||
act_layer=nn.ReLU,
|
||||
):
|
||||
super(Involution, self).__init__()
|
||||
self.kernel_size = kernel_size
|
||||
self.stride = stride
|
||||
self.channels = channels
|
||||
self.group_size = group_size
|
||||
self.groups = self.channels // self.group_size
|
||||
self.conv1 = ConvBnAct(
|
||||
in_channels=channels,
|
||||
out_channels=channels // rd_ratio,
|
||||
kernel_size=1,
|
||||
norm_layer=norm_layer,
|
||||
act_layer=act_layer)
|
||||
self.conv2 = self.conv = create_conv2d(
|
||||
in_channels=channels // rd_ratio,
|
||||
out_channels=kernel_size**2 * self.groups,
|
||||
kernel_size=1,
|
||||
stride=1)
|
||||
self.avgpool = nn.AvgPool2d(stride, stride) if stride == 2 else nn.Identity()
|
||||
self.unfold = nn.Unfold(kernel_size, 1, (kernel_size-1)//2, stride)
|
||||
|
||||
def forward(self, x):
|
||||
weight = self.conv2(self.conv1(self.avgpool(x)))
|
||||
B, C, H, W = weight.shape
|
||||
KK = int(self.kernel_size ** 2)
|
||||
weight = weight.view(B, self.groups, KK, H, W).unsqueeze(2)
|
||||
out = self.unfold(x).view(B, self.groups, self.group_size, KK, H, W)
|
||||
out = (weight * out).sum(dim=3).view(B, self.channels, H, W)
|
||||
return out
|
@ -0,0 +1,145 @@
|
||||
""" Bilinear-Attention-Transform and Non-Local Attention
|
||||
|
||||
Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms`
|
||||
- https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html
|
||||
Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification
|
||||
"""
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from .conv_bn_act import ConvBnAct
|
||||
from .helpers import make_divisible
|
||||
|
||||
|
||||
class NonLocalAttn(nn.Module):
|
||||
"""Spatial NL block for image classification.
|
||||
|
||||
This was adapted from https://github.com/BA-Transform/BAT-Image-Classification
|
||||
Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net.
|
||||
"""
|
||||
|
||||
def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs):
|
||||
super(NonLocalAttn, self).__init__()
|
||||
if rd_channels is None:
|
||||
rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor)
|
||||
self.scale = in_channels ** -0.5 if use_scale else 1.0
|
||||
self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
|
||||
self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
|
||||
self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
|
||||
self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True)
|
||||
self.norm = nn.BatchNorm2d(in_channels)
|
||||
self.reset_parameters()
|
||||
|
||||
def forward(self, x):
|
||||
shortcut = x
|
||||
|
||||
t = self.t(x)
|
||||
p = self.p(x)
|
||||
g = self.g(x)
|
||||
|
||||
B, C, H, W = t.size()
|
||||
t = t.view(B, C, -1).permute(0, 2, 1)
|
||||
p = p.view(B, C, -1)
|
||||
g = g.view(B, C, -1).permute(0, 2, 1)
|
||||
|
||||
att = torch.bmm(t, p) * self.scale
|
||||
att = F.softmax(att, dim=2)
|
||||
x = torch.bmm(att, g)
|
||||
|
||||
x = x.permute(0, 2, 1).reshape(B, C, H, W)
|
||||
x = self.z(x)
|
||||
x = self.norm(x) + shortcut
|
||||
|
||||
return x
|
||||
|
||||
def reset_parameters(self):
|
||||
for name, m in self.named_modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(
|
||||
m.weight, mode='fan_out', nonlinearity='relu')
|
||||
if len(list(m.parameters())) > 1:
|
||||
nn.init.constant_(m.bias, 0.0)
|
||||
elif isinstance(m, nn.BatchNorm2d):
|
||||
nn.init.constant_(m.weight, 0)
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.GroupNorm):
|
||||
nn.init.constant_(m.weight, 0)
|
||||
nn.init.constant_(m.bias, 0)
|
||||
|
||||
|
||||
class BilinearAttnTransform(nn.Module):
|
||||
|
||||
def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
|
||||
super(BilinearAttnTransform, self).__init__()
|
||||
|
||||
self.conv1 = ConvBnAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer)
|
||||
self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1))
|
||||
self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size))
|
||||
self.conv2 = ConvBnAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
|
||||
self.block_size = block_size
|
||||
self.groups = groups
|
||||
self.in_channels = in_channels
|
||||
|
||||
def resize_mat(self, x, t):
|
||||
B, C, block_size, block_size1 = x.shape
|
||||
assert block_size == block_size1
|
||||
if t <= 1:
|
||||
return x
|
||||
x = x.view(B * C, -1, 1, 1)
|
||||
x = x * torch.eye(t, t, dtype=x.dtype, device=x.device)
|
||||
x = x.view(B * C, block_size, block_size, t, t)
|
||||
x = torch.cat(torch.split(x, 1, dim=1), dim=3)
|
||||
x = torch.cat(torch.split(x, 1, dim=2), dim=4)
|
||||
x = x.view(B, C, block_size * t, block_size * t)
|
||||
return x
|
||||
|
||||
def forward(self, x):
|
||||
assert x.shape[-1] % self.block_size == 0 and x.shape[-2] % self.block_size == 0
|
||||
B, C, H, W = x.shape
|
||||
out = self.conv1(x)
|
||||
rp = F.adaptive_max_pool2d(out, (self.block_size, 1))
|
||||
cp = F.adaptive_max_pool2d(out, (1, self.block_size))
|
||||
p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size)
|
||||
q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size)
|
||||
p = F.sigmoid(p)
|
||||
q = F.sigmoid(q)
|
||||
p = p / p.sum(dim=3, keepdim=True)
|
||||
q = q / q.sum(dim=2, keepdim=True)
|
||||
p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size(
|
||||
0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous()
|
||||
p = p.view(B, C, self.block_size, self.block_size)
|
||||
q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size(
|
||||
0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous()
|
||||
q = q.view(B, C, self.block_size, self.block_size)
|
||||
p = self.resize_mat(p, H // self.block_size)
|
||||
q = self.resize_mat(q, W // self.block_size)
|
||||
y = p.matmul(x)
|
||||
y = y.matmul(q)
|
||||
|
||||
y = self.conv2(y)
|
||||
return y
|
||||
|
||||
|
||||
class BatNonLocalAttn(nn.Module):
|
||||
""" BAT
|
||||
Adapted from: https://github.com/BA-Transform/BAT-Image-Classification
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8,
|
||||
drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_):
|
||||
super().__init__()
|
||||
if rd_channels is None:
|
||||
rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor)
|
||||
self.conv1 = ConvBnAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
|
||||
self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer)
|
||||
self.conv2 = ConvBnAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
|
||||
self.dropout = nn.Dropout2d(p=drop_rate)
|
||||
|
||||
def forward(self, x):
|
||||
xl = self.conv1(x)
|
||||
y = self.ba(xl)
|
||||
y = self.conv2(y)
|
||||
y = self.dropout(y)
|
||||
return y + x
|
@ -1,50 +0,0 @@
|
||||
from torch import nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .create_act import create_act_layer
|
||||
from .helpers import make_divisible
|
||||
|
||||
|
||||
class SEModule(nn.Module):
|
||||
""" SE Module as defined in original SE-Nets with a few additions
|
||||
Additions include:
|
||||
* min_channels can be specified to keep reduced channel count at a minimum (default: 8)
|
||||
* divisor can be specified to keep channels rounded to specified values (default: 1)
|
||||
* reduction channels can be specified directly by arg (if reduction_channels is set)
|
||||
* reduction channels can be specified by float ratio (if reduction_ratio is set)
|
||||
"""
|
||||
def __init__(self, channels, reduction=16, act_layer=nn.ReLU, gate_layer='sigmoid',
|
||||
reduction_ratio=None, reduction_channels=None, min_channels=8, divisor=1):
|
||||
super(SEModule, self).__init__()
|
||||
if reduction_channels is not None:
|
||||
reduction_channels = reduction_channels # direct specification highest priority, no rounding/min done
|
||||
elif reduction_ratio is not None:
|
||||
reduction_channels = make_divisible(channels * reduction_ratio, divisor, min_channels)
|
||||
else:
|
||||
reduction_channels = make_divisible(channels // reduction, divisor, min_channels)
|
||||
self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, bias=True)
|
||||
self.act = act_layer(inplace=True)
|
||||
self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, bias=True)
|
||||
self.gate = create_act_layer(gate_layer)
|
||||
|
||||
def forward(self, x):
|
||||
x_se = x.mean((2, 3), keepdim=True)
|
||||
x_se = self.fc1(x_se)
|
||||
x_se = self.act(x_se)
|
||||
x_se = self.fc2(x_se)
|
||||
return x * self.gate(x_se)
|
||||
|
||||
|
||||
class EffectiveSEModule(nn.Module):
|
||||
""" 'Effective Squeeze-Excitation
|
||||
From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
|
||||
"""
|
||||
def __init__(self, channels, gate_layer='hard_sigmoid'):
|
||||
super(EffectiveSEModule, self).__init__()
|
||||
self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0)
|
||||
self.gate = create_act_layer(gate_layer, inplace=True)
|
||||
|
||||
def forward(self, x):
|
||||
x_se = x.mean((2, 3), keepdim=True)
|
||||
x_se = self.fc(x_se)
|
||||
return x * self.gate(x_se)
|
@ -0,0 +1,74 @@
|
||||
""" Squeeze-and-Excitation Channel Attention
|
||||
|
||||
An SE implementation originally based on PyTorch SE-Net impl.
|
||||
Has since evolved with additional functionality / configuration.
|
||||
|
||||
Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
|
||||
|
||||
Also included is Effective Squeeze-Excitation (ESE).
|
||||
Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
|
||||
|
||||
Hacked together by / Copyright 2021 Ross Wightman
|
||||
"""
|
||||
from torch import nn as nn
|
||||
|
||||
from .create_act import create_act_layer
|
||||
from .helpers import make_divisible
|
||||
|
||||
|
||||
class SEModule(nn.Module):
|
||||
""" SE Module as defined in original SE-Nets with a few additions
|
||||
Additions include:
|
||||
* divisor can be specified to keep channels % div == 0 (default: 8)
|
||||
* reduction channels can be specified directly by arg (if rd_channels is set)
|
||||
* reduction channels can be specified by float rd_ratio (default: 1/16)
|
||||
* global max pooling can be added to the squeeze aggregation
|
||||
* customizable activation, normalization, and gate layer
|
||||
"""
|
||||
def __init__(
|
||||
self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False,
|
||||
act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'):
|
||||
super(SEModule, self).__init__()
|
||||
self.add_maxpool = add_maxpool
|
||||
if not rd_channels:
|
||||
rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
|
||||
self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=True)
|
||||
self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity()
|
||||
self.act = create_act_layer(act_layer, inplace=True)
|
||||
self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=True)
|
||||
self.gate = create_act_layer(gate_layer)
|
||||
|
||||
def forward(self, x):
|
||||
x_se = x.mean((2, 3), keepdim=True)
|
||||
if self.add_maxpool:
|
||||
# experimental codepath, may remove or change
|
||||
x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True)
|
||||
x_se = self.fc1(x_se)
|
||||
x_se = self.act(self.bn(x_se))
|
||||
x_se = self.fc2(x_se)
|
||||
return x * self.gate(x_se)
|
||||
|
||||
|
||||
SqueezeExcite = SEModule # alias
|
||||
|
||||
|
||||
class EffectiveSEModule(nn.Module):
|
||||
""" 'Effective Squeeze-Excitation
|
||||
From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
|
||||
"""
|
||||
def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_):
|
||||
super(EffectiveSEModule, self).__init__()
|
||||
self.add_maxpool = add_maxpool
|
||||
self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0)
|
||||
self.gate = create_act_layer(gate_layer)
|
||||
|
||||
def forward(self, x):
|
||||
x_se = x.mean((2, 3), keepdim=True)
|
||||
if self.add_maxpool:
|
||||
# experimental codepath, may remove or change
|
||||
x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True)
|
||||
x_se = self.fc(x_se)
|
||||
return x * self.gate(x_se)
|
||||
|
||||
|
||||
EffectiveSqueezeExcite = EffectiveSEModule # alias
|
@ -0,0 +1,570 @@
|
||||
""" LeViT
|
||||
|
||||
Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference`
|
||||
- https://arxiv.org/abs/2104.01136
|
||||
|
||||
@article{graham2021levit,
|
||||
title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference},
|
||||
author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze},
|
||||
journal={arXiv preprint arXiv:22104.01136},
|
||||
year={2021}
|
||||
}
|
||||
|
||||
Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow.
|
||||
|
||||
This version combines both conv/linear models and fixes torchscript compatibility.
|
||||
|
||||
Modifications by/coyright Copyright 2021 Ross Wightman
|
||||
"""
|
||||
|
||||
# Copyright (c) 2015-present, Facebook, Inc.
|
||||
# All rights reserved.
|
||||
|
||||
# Modified from
|
||||
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
|
||||
# Copyright 2020 Ross Wightman, Apache-2.0 License
|
||||
import itertools
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN
|
||||
from .helpers import build_model_with_cfg, overlay_external_default_cfg
|
||||
from .layers import to_ntuple, get_act_layer
|
||||
from .vision_transformer import trunc_normal_
|
||||
from .registry import register_model
|
||||
|
||||
|
||||
def _cfg(url='', **kwargs):
|
||||
return {
|
||||
'url': url,
|
||||
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
|
||||
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
|
||||
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
|
||||
'first_conv': 'patch_embed.0.c', 'classifier': ('head.l', 'head_dist.l'),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
|
||||
default_cfgs = dict(
|
||||
levit_128s=_cfg(
|
||||
url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth'
|
||||
),
|
||||
levit_128=_cfg(
|
||||
url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth'
|
||||
),
|
||||
levit_192=_cfg(
|
||||
url='https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth'
|
||||
),
|
||||
levit_256=_cfg(
|
||||
url='https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth'
|
||||
),
|
||||
levit_384=_cfg(
|
||||
url='https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth'
|
||||
),
|
||||
)
|
||||
|
||||
model_cfgs = dict(
|
||||
levit_128s=dict(
|
||||
embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)),
|
||||
levit_128=dict(
|
||||
embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)),
|
||||
levit_192=dict(
|
||||
embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)),
|
||||
levit_256=dict(
|
||||
embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)),
|
||||
levit_384=dict(
|
||||
embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)),
|
||||
)
|
||||
|
||||
__all__ = ['Levit']
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_128s(pretrained=False, fuse=False,distillation=True, use_conv=False, **kwargs):
|
||||
return create_levit(
|
||||
'levit_128s', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_128(pretrained=False, fuse=False, distillation=True, use_conv=False, **kwargs):
|
||||
return create_levit(
|
||||
'levit_128', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_192(pretrained=False, fuse=False, distillation=True, use_conv=False, **kwargs):
|
||||
return create_levit(
|
||||
'levit_192', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_256(pretrained=False, fuse=False, distillation=True, use_conv=False, **kwargs):
|
||||
return create_levit(
|
||||
'levit_256', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_384(pretrained=False, fuse=False, distillation=True, use_conv=False, **kwargs):
|
||||
return create_levit(
|
||||
'levit_384', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_c_128s(pretrained=False, fuse=False, distillation=True, use_conv=True,**kwargs):
|
||||
return create_levit(
|
||||
'levit_128s', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_c_128(pretrained=False, fuse=False,distillation=True, use_conv=True, **kwargs):
|
||||
return create_levit(
|
||||
'levit_128', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_c_192(pretrained=False, fuse=False, distillation=True, use_conv=True, **kwargs):
|
||||
return create_levit(
|
||||
'levit_192', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_c_256(pretrained=False, fuse=False, distillation=True, use_conv=True, **kwargs):
|
||||
return create_levit(
|
||||
'levit_256', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def levit_c_384(pretrained=False, fuse=False, distillation=True, use_conv=True, **kwargs):
|
||||
return create_levit(
|
||||
'levit_384', pretrained=pretrained, fuse=fuse, distillation=distillation, use_conv=use_conv, **kwargs)
|
||||
|
||||
|
||||
class ConvNorm(nn.Sequential):
|
||||
def __init__(
|
||||
self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1, resolution=-10000):
|
||||
super().__init__()
|
||||
self.add_module('c', nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False))
|
||||
bn = nn.BatchNorm2d(b)
|
||||
nn.init.constant_(bn.weight, bn_weight_init)
|
||||
nn.init.constant_(bn.bias, 0)
|
||||
self.add_module('bn', bn)
|
||||
|
||||
@torch.no_grad()
|
||||
def fuse(self):
|
||||
c, bn = self._modules.values()
|
||||
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||
w = c.weight * w[:, None, None, None]
|
||||
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||
m = nn.Conv2d(
|
||||
w.size(1), w.size(0), w.shape[2:], stride=self.c.stride,
|
||||
padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups)
|
||||
m.weight.data.copy_(w)
|
||||
m.bias.data.copy_(b)
|
||||
return m
|
||||
|
||||
|
||||
class LinearNorm(nn.Sequential):
|
||||
def __init__(self, a, b, bn_weight_init=1, resolution=-100000):
|
||||
super().__init__()
|
||||
self.add_module('c', nn.Linear(a, b, bias=False))
|
||||
bn = nn.BatchNorm1d(b)
|
||||
nn.init.constant_(bn.weight, bn_weight_init)
|
||||
nn.init.constant_(bn.bias, 0)
|
||||
self.add_module('bn', bn)
|
||||
|
||||
@torch.no_grad()
|
||||
def fuse(self):
|
||||
l, bn = self._modules.values()
|
||||
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||
w = l.weight * w[:, None]
|
||||
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||
m = nn.Linear(w.size(1), w.size(0))
|
||||
m.weight.data.copy_(w)
|
||||
m.bias.data.copy_(b)
|
||||
return m
|
||||
|
||||
def forward(self, x):
|
||||
x = self.c(x)
|
||||
return self.bn(x.flatten(0, 1)).reshape_as(x)
|
||||
|
||||
|
||||
class NormLinear(nn.Sequential):
|
||||
def __init__(self, a, b, bias=True, std=0.02):
|
||||
super().__init__()
|
||||
self.add_module('bn', nn.BatchNorm1d(a))
|
||||
l = nn.Linear(a, b, bias=bias)
|
||||
trunc_normal_(l.weight, std=std)
|
||||
if bias:
|
||||
nn.init.constant_(l.bias, 0)
|
||||
self.add_module('l', l)
|
||||
|
||||
@torch.no_grad()
|
||||
def fuse(self):
|
||||
bn, l = self._modules.values()
|
||||
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||
b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||
w = l.weight * w[None, :]
|
||||
if l.bias is None:
|
||||
b = b @ self.l.weight.T
|
||||
else:
|
||||
b = (l.weight @ b[:, None]).view(-1) + self.l.bias
|
||||
m = nn.Linear(w.size(1), w.size(0))
|
||||
m.weight.data.copy_(w)
|
||||
m.bias.data.copy_(b)
|
||||
return m
|
||||
|
||||
|
||||
def stem_b16(in_chs, out_chs, activation, resolution=224):
|
||||
return nn.Sequential(
|
||||
ConvNorm(in_chs, out_chs // 8, 3, 2, 1, resolution=resolution),
|
||||
activation(),
|
||||
ConvNorm(out_chs // 8, out_chs // 4, 3, 2, 1, resolution=resolution // 2),
|
||||
activation(),
|
||||
ConvNorm(out_chs // 4, out_chs // 2, 3, 2, 1, resolution=resolution // 4),
|
||||
activation(),
|
||||
ConvNorm(out_chs // 2, out_chs, 3, 2, 1, resolution=resolution // 8))
|
||||
|
||||
|
||||
class Residual(nn.Module):
|
||||
def __init__(self, m, drop):
|
||||
super().__init__()
|
||||
self.m = m
|
||||
self.drop = drop
|
||||
|
||||
def forward(self, x):
|
||||
if self.training and self.drop > 0:
|
||||
return x + self.m(x) * torch.rand(
|
||||
x.size(0), 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach()
|
||||
else:
|
||||
return x + self.m(x)
|
||||
|
||||
|
||||
class Subsample(nn.Module):
|
||||
def __init__(self, stride, resolution):
|
||||
super().__init__()
|
||||
self.stride = stride
|
||||
self.resolution = resolution
|
||||
|
||||
def forward(self, x):
|
||||
B, N, C = x.shape
|
||||
x = x.view(B, self.resolution, self.resolution, C)[:, ::self.stride, ::self.stride]
|
||||
return x.reshape(B, -1, C)
|
||||
|
||||
|
||||
class Attention(nn.Module):
|
||||
ab: Dict[str, torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self, dim, key_dim, num_heads=8, attn_ratio=4, act_layer=None, resolution=14, use_conv=False):
|
||||
super().__init__()
|
||||
|
||||
self.num_heads = num_heads
|
||||
self.scale = key_dim ** -0.5
|
||||
self.key_dim = key_dim
|
||||
self.nh_kd = nh_kd = key_dim * num_heads
|
||||
self.d = int(attn_ratio * key_dim)
|
||||
self.dh = int(attn_ratio * key_dim) * num_heads
|
||||
self.attn_ratio = attn_ratio
|
||||
self.use_conv = use_conv
|
||||
ln_layer = ConvNorm if self.use_conv else LinearNorm
|
||||
h = self.dh + nh_kd * 2
|
||||
self.qkv = ln_layer(dim, h, resolution=resolution)
|
||||
self.proj = nn.Sequential(
|
||||
act_layer(),
|
||||
ln_layer(self.dh, dim, bn_weight_init=0, resolution=resolution))
|
||||
|
||||
points = list(itertools.product(range(resolution), range(resolution)))
|
||||
N = len(points)
|
||||
attention_offsets = {}
|
||||
idxs = []
|
||||
for p1 in points:
|
||||
for p2 in points:
|
||||
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
|
||||
if offset not in attention_offsets:
|
||||
attention_offsets[offset] = len(attention_offsets)
|
||||
idxs.append(attention_offsets[offset])
|
||||
self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
|
||||
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N))
|
||||
self.ab = {}
|
||||
|
||||
@torch.no_grad()
|
||||
def train(self, mode=True):
|
||||
super().train(mode)
|
||||
if mode and self.ab:
|
||||
self.ab = {} # clear ab cache
|
||||
|
||||
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
|
||||
if self.training:
|
||||
return self.attention_biases[:, self.attention_bias_idxs]
|
||||
else:
|
||||
device_key = str(device)
|
||||
if device_key not in self.ab:
|
||||
self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs]
|
||||
return self.ab[device_key]
|
||||
|
||||
def forward(self, x): # x (B,C,H,W)
|
||||
if self.use_conv:
|
||||
B, C, H, W = x.shape
|
||||
q, k, v = self.qkv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.d], dim=2)
|
||||
|
||||
attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device)
|
||||
attn = attn.softmax(dim=-1)
|
||||
|
||||
x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W)
|
||||
else:
|
||||
B, N, C = x.shape
|
||||
qkv = self.qkv(x)
|
||||
q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3)
|
||||
q = q.permute(0, 2, 1, 3)
|
||||
k = k.permute(0, 2, 1, 3)
|
||||
v = v.permute(0, 2, 1, 3)
|
||||
|
||||
attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device)
|
||||
attn = attn.softmax(dim=-1)
|
||||
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
|
||||
x = self.proj(x)
|
||||
return x
|
||||
|
||||
|
||||
class AttentionSubsample(nn.Module):
|
||||
ab: Dict[str, torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=2,
|
||||
act_layer=None, stride=2, resolution=14, resolution_=7, use_conv=False):
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
self.scale = key_dim ** -0.5
|
||||
self.key_dim = key_dim
|
||||
self.nh_kd = nh_kd = key_dim * num_heads
|
||||
self.d = int(attn_ratio * key_dim)
|
||||
self.dh = self.d * self.num_heads
|
||||
self.attn_ratio = attn_ratio
|
||||
self.resolution_ = resolution_
|
||||
self.resolution_2 = resolution_ ** 2
|
||||
self.use_conv = use_conv
|
||||
if self.use_conv:
|
||||
ln_layer = ConvNorm
|
||||
sub_layer = partial(nn.AvgPool2d, kernel_size=1, padding=0)
|
||||
else:
|
||||
ln_layer = LinearNorm
|
||||
sub_layer = partial(Subsample, resolution=resolution)
|
||||
|
||||
h = self.dh + nh_kd
|
||||
self.kv = ln_layer(in_dim, h, resolution=resolution)
|
||||
self.q = nn.Sequential(
|
||||
sub_layer(stride=stride),
|
||||
ln_layer(in_dim, nh_kd, resolution=resolution_))
|
||||
self.proj = nn.Sequential(
|
||||
act_layer(),
|
||||
ln_layer(self.dh, out_dim, resolution=resolution_))
|
||||
|
||||
self.stride = stride
|
||||
self.resolution = resolution
|
||||
points = list(itertools.product(range(resolution), range(resolution)))
|
||||
points_ = list(itertools.product(range(resolution_), range(resolution_)))
|
||||
N = len(points)
|
||||
N_ = len(points_)
|
||||
attention_offsets = {}
|
||||
idxs = []
|
||||
for p1 in points_:
|
||||
for p2 in points:
|
||||
size = 1
|
||||
offset = (
|
||||
abs(p1[0] * stride - p2[0] + (size - 1) / 2),
|
||||
abs(p1[1] * stride - p2[1] + (size - 1) / 2))
|
||||
if offset not in attention_offsets:
|
||||
attention_offsets[offset] = len(attention_offsets)
|
||||
idxs.append(attention_offsets[offset])
|
||||
self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
|
||||
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N_, N))
|
||||
self.ab = {} # per-device attention_biases cache
|
||||
|
||||
@torch.no_grad()
|
||||
def train(self, mode=True):
|
||||
super().train(mode)
|
||||
if mode and self.ab:
|
||||
self.ab = {} # clear ab cache
|
||||
|
||||
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
|
||||
if self.training:
|
||||
return self.attention_biases[:, self.attention_bias_idxs]
|
||||
else:
|
||||
device_key = str(device)
|
||||
if device_key not in self.ab:
|
||||
self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs]
|
||||
return self.ab[device_key]
|
||||
|
||||
def forward(self, x):
|
||||
if self.use_conv:
|
||||
B, C, H, W = x.shape
|
||||
k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.d], dim=2)
|
||||
q = self.q(x).view(B, self.num_heads, self.key_dim, self.resolution_2)
|
||||
|
||||
attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device)
|
||||
attn = attn.softmax(dim=-1)
|
||||
|
||||
x = (v @ attn.transpose(-2, -1)).reshape(B, -1, self.resolution_, self.resolution_)
|
||||
else:
|
||||
B, N, C = x.shape
|
||||
k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.d], dim=3)
|
||||
k = k.permute(0, 2, 1, 3) # BHNC
|
||||
v = v.permute(0, 2, 1, 3) # BHNC
|
||||
q = self.q(x).view(B, self.resolution_2, self.num_heads, self.key_dim).permute(0, 2, 1, 3)
|
||||
|
||||
attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device)
|
||||
attn = attn.softmax(dim=-1)
|
||||
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, -1, self.dh)
|
||||
x = self.proj(x)
|
||||
return x
|
||||
|
||||
|
||||
class Levit(nn.Module):
|
||||
""" Vision Transformer with support for patch or hybrid CNN input stage
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
img_size=224,
|
||||
patch_size=16,
|
||||
in_chans=3,
|
||||
num_classes=1000,
|
||||
embed_dim=(192,),
|
||||
key_dim=64,
|
||||
depth=(12,),
|
||||
num_heads=(3,),
|
||||
attn_ratio=2,
|
||||
mlp_ratio=2,
|
||||
hybrid_backbone=None,
|
||||
down_ops=None,
|
||||
act_layer='hard_swish',
|
||||
attn_act_layer='hard_swish',
|
||||
distillation=True,
|
||||
use_conv=False,
|
||||
drop_path=0):
|
||||
super().__init__()
|
||||
act_layer = get_act_layer(act_layer)
|
||||
attn_act_layer = get_act_layer(attn_act_layer)
|
||||
if isinstance(img_size, tuple):
|
||||
# FIXME origin impl passes single img/res dim through whole hierarchy,
|
||||
# not sure this model will be used enough to spend time fixing it.
|
||||
assert img_size[0] == img_size[1]
|
||||
img_size = img_size[0]
|
||||
self.num_classes = num_classes
|
||||
self.num_features = embed_dim[-1]
|
||||
self.embed_dim = embed_dim
|
||||
N = len(embed_dim)
|
||||
assert len(depth) == len(num_heads) == N
|
||||
key_dim = to_ntuple(N)(key_dim)
|
||||
attn_ratio = to_ntuple(N)(attn_ratio)
|
||||
mlp_ratio = to_ntuple(N)(mlp_ratio)
|
||||
down_ops = down_ops or (
|
||||
# ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride)
|
||||
('Subsample', key_dim[0], embed_dim[0] // key_dim[0], 4, 2, 2),
|
||||
('Subsample', key_dim[0], embed_dim[1] // key_dim[1], 4, 2, 2),
|
||||
('',)
|
||||
)
|
||||
self.distillation = distillation
|
||||
self.use_conv = use_conv
|
||||
ln_layer = ConvNorm if self.use_conv else LinearNorm
|
||||
|
||||
self.patch_embed = hybrid_backbone or stem_b16(in_chans, embed_dim[0], activation=act_layer)
|
||||
|
||||
self.blocks = []
|
||||
resolution = img_size // patch_size
|
||||
for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate(
|
||||
zip(embed_dim, key_dim, depth, num_heads, attn_ratio, mlp_ratio, down_ops)):
|
||||
for _ in range(dpth):
|
||||
self.blocks.append(
|
||||
Residual(
|
||||
Attention(
|
||||
ed, kd, nh, attn_ratio=ar, act_layer=attn_act_layer,
|
||||
resolution=resolution, use_conv=use_conv),
|
||||
drop_path))
|
||||
if mr > 0:
|
||||
h = int(ed * mr)
|
||||
self.blocks.append(
|
||||
Residual(nn.Sequential(
|
||||
ln_layer(ed, h, resolution=resolution),
|
||||
act_layer(),
|
||||
ln_layer(h, ed, bn_weight_init=0, resolution=resolution),
|
||||
), drop_path))
|
||||
if do[0] == 'Subsample':
|
||||
# ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride)
|
||||
resolution_ = (resolution - 1) // do[5] + 1
|
||||
self.blocks.append(
|
||||
AttentionSubsample(
|
||||
*embed_dim[i:i + 2], key_dim=do[1], num_heads=do[2],
|
||||
attn_ratio=do[3], act_layer=attn_act_layer, stride=do[5],
|
||||
resolution=resolution, resolution_=resolution_, use_conv=use_conv))
|
||||
resolution = resolution_
|
||||
if do[4] > 0: # mlp_ratio
|
||||
h = int(embed_dim[i + 1] * do[4])
|
||||
self.blocks.append(
|
||||
Residual(nn.Sequential(
|
||||
ln_layer(embed_dim[i + 1], h, resolution=resolution),
|
||||
act_layer(),
|
||||
ln_layer(h, embed_dim[i + 1], bn_weight_init=0, resolution=resolution),
|
||||
), drop_path))
|
||||
self.blocks = nn.Sequential(*self.blocks)
|
||||
|
||||
# Classifier head
|
||||
self.head = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity()
|
||||
if distillation:
|
||||
self.head_dist = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity()
|
||||
else:
|
||||
self.head_dist = None
|
||||
|
||||
@torch.jit.ignore
|
||||
def no_weight_decay(self):
|
||||
return {x for x in self.state_dict().keys() if 'attention_biases' in x}
|
||||
|
||||
def forward(self, x):
|
||||
x = self.patch_embed(x)
|
||||
if not self.use_conv:
|
||||
x = x.flatten(2).transpose(1, 2)
|
||||
x = self.blocks(x)
|
||||
x = x.mean((-2, -1)) if self.use_conv else x.mean(1)
|
||||
if self.head_dist is not None:
|
||||
x, x_dist = self.head(x), self.head_dist(x)
|
||||
if self.training and not torch.jit.is_scripting():
|
||||
return x, x_dist
|
||||
else:
|
||||
# during inference, return the average of both classifier predictions
|
||||
return (x + x_dist) / 2
|
||||
else:
|
||||
x = self.head(x)
|
||||
return x
|
||||
|
||||
|
||||
def checkpoint_filter_fn(state_dict, model):
|
||||
if 'model' in state_dict:
|
||||
# For deit models
|
||||
state_dict = state_dict['model']
|
||||
D = model.state_dict()
|
||||
for k in state_dict.keys():
|
||||
if D[k].ndim == 4 and state_dict[k].ndim == 2:
|
||||
state_dict[k] = state_dict[k][:, :, None, None]
|
||||
return state_dict
|
||||
|
||||
|
||||
def create_levit(variant, pretrained=False, default_cfg=None, fuse=False, **kwargs):
|
||||
if kwargs.get('features_only', None):
|
||||
raise RuntimeError('features_only not implemented for Vision Transformer models.')
|
||||
|
||||
model_cfg = dict(**model_cfgs[variant], **kwargs)
|
||||
model = build_model_with_cfg(
|
||||
Levit, variant, pretrained,
|
||||
default_cfg=default_cfgs[variant],
|
||||
pretrained_filter_fn=checkpoint_filter_fn,
|
||||
**model_cfg)
|
||||
#if fuse:
|
||||
# utils.replace_batchnorm(model)
|
||||
return model
|
||||
|
@ -0,0 +1,420 @@
|
||||
""" Twins
|
||||
A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers`
|
||||
- https://arxiv.org/pdf/2104.13840.pdf
|
||||
|
||||
Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below
|
||||
|
||||
"""
|
||||
# --------------------------------------------------------
|
||||
# Twins
|
||||
# Copyright (c) 2021 Meituan
|
||||
# Licensed under The Apache 2.0 License [see LICENSE for details]
|
||||
# Written by Xinjie Li, Xiangxiang Chu
|
||||
# --------------------------------------------------------
|
||||
import math
|
||||
from copy import deepcopy
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from functools import partial
|
||||
|
||||
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
|
||||
from .layers import Mlp, DropPath, to_2tuple, trunc_normal_
|
||||
from .registry import register_model
|
||||
from .vision_transformer import Attention
|
||||
from .helpers import build_model_with_cfg, overlay_external_default_cfg
|
||||
|
||||
|
||||
def _cfg(url='', **kwargs):
|
||||
return {
|
||||
'url': url,
|
||||
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
|
||||
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
|
||||
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
|
||||
'first_conv': 'patch_embeds.0.proj', 'classifier': 'head',
|
||||
**kwargs
|
||||
}
|
||||
|
||||
|
||||
default_cfgs = {
|
||||
'twins_pcpvt_small': _cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth',
|
||||
),
|
||||
'twins_pcpvt_base': _cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_base-e5ecb09b.pth',
|
||||
),
|
||||
'twins_pcpvt_large': _cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_large-d273f802.pth',
|
||||
),
|
||||
'twins_svt_small': _cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_small-42e5f78c.pth',
|
||||
),
|
||||
'twins_svt_base': _cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_base-c2265010.pth',
|
||||
),
|
||||
'twins_svt_large': _cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_large-90f6aaa9.pth',
|
||||
),
|
||||
}
|
||||
|
||||
Size_ = Tuple[int, int]
|
||||
|
||||
|
||||
class LocallyGroupedAttn(nn.Module):
|
||||
""" LSA: self attention within a group
|
||||
"""
|
||||
def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1):
|
||||
assert ws != 1
|
||||
super(LocallyGroupedAttn, self).__init__()
|
||||
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
|
||||
|
||||
self.dim = dim
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
self.scale = head_dim ** -0.5
|
||||
|
||||
self.qkv = nn.Linear(dim, dim * 3, bias=True)
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
self.ws = ws
|
||||
|
||||
def forward(self, x, size: Size_):
|
||||
# There are two implementations for this function, zero padding or mask. We don't observe obvious difference for
|
||||
# both. You can choose any one, we recommend forward_padding because it's neat. However,
|
||||
# the masking implementation is more reasonable and accurate.
|
||||
B, N, C = x.shape
|
||||
H, W = size
|
||||
x = x.view(B, H, W, C)
|
||||
pad_l = pad_t = 0
|
||||
pad_r = (self.ws - W % self.ws) % self.ws
|
||||
pad_b = (self.ws - H % self.ws) % self.ws
|
||||
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
|
||||
_, Hp, Wp, _ = x.shape
|
||||
_h, _w = Hp // self.ws, Wp // self.ws
|
||||
x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3)
|
||||
qkv = self.qkv(x).reshape(
|
||||
B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
|
||||
q, k, v = qkv[0], qkv[1], qkv[2]
|
||||
attn = (q @ k.transpose(-2, -1)) * self.scale
|
||||
attn = attn.softmax(dim=-1)
|
||||
attn = self.attn_drop(attn)
|
||||
attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
|
||||
x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
|
||||
if pad_r > 0 or pad_b > 0:
|
||||
x = x[:, :H, :W, :].contiguous()
|
||||
x = x.reshape(B, N, C)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
# def forward_mask(self, x, size: Size_):
|
||||
# B, N, C = x.shape
|
||||
# H, W = size
|
||||
# x = x.view(B, H, W, C)
|
||||
# pad_l = pad_t = 0
|
||||
# pad_r = (self.ws - W % self.ws) % self.ws
|
||||
# pad_b = (self.ws - H % self.ws) % self.ws
|
||||
# x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
|
||||
# _, Hp, Wp, _ = x.shape
|
||||
# _h, _w = Hp // self.ws, Wp // self.ws
|
||||
# mask = torch.zeros((1, Hp, Wp), device=x.device)
|
||||
# mask[:, -pad_b:, :].fill_(1)
|
||||
# mask[:, :, -pad_r:].fill_(1)
|
||||
#
|
||||
# x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C
|
||||
# mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws)
|
||||
# attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws
|
||||
# attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0))
|
||||
# qkv = self.qkv(x).reshape(
|
||||
# B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
|
||||
# # n_h, B, _w*_h, nhead, ws*ws, dim
|
||||
# q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head
|
||||
# attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws
|
||||
# attn = attn + attn_mask.unsqueeze(2)
|
||||
# attn = attn.softmax(dim=-1)
|
||||
# attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head
|
||||
# attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
|
||||
# x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
|
||||
# if pad_r > 0 or pad_b > 0:
|
||||
# x = x[:, :H, :W, :].contiguous()
|
||||
# x = x.reshape(B, N, C)
|
||||
# x = self.proj(x)
|
||||
# x = self.proj_drop(x)
|
||||
# return x
|
||||
|
||||
|
||||
class GlobalSubSampleAttn(nn.Module):
|
||||
""" GSA: using a key to summarize the information for a group to be efficient.
|
||||
"""
|
||||
def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1):
|
||||
super().__init__()
|
||||
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
|
||||
|
||||
self.dim = dim
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
self.scale = head_dim ** -0.5
|
||||
|
||||
self.q = nn.Linear(dim, dim, bias=True)
|
||||
self.kv = nn.Linear(dim, dim * 2, bias=True)
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
|
||||
self.sr_ratio = sr_ratio
|
||||
if sr_ratio > 1:
|
||||
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
|
||||
self.norm = nn.LayerNorm(dim)
|
||||
else:
|
||||
self.sr = None
|
||||
self.norm = None
|
||||
|
||||
def forward(self, x, size: Size_):
|
||||
B, N, C = x.shape
|
||||
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
|
||||
|
||||
if self.sr is not None:
|
||||
x = x.permute(0, 2, 1).reshape(B, C, *size)
|
||||
x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1)
|
||||
x = self.norm(x)
|
||||
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
||||
k, v = kv[0], kv[1]
|
||||
|
||||
attn = (q @ k.transpose(-2, -1)) * self.scale
|
||||
attn = attn.softmax(dim=-1)
|
||||
attn = self.attn_drop(attn)
|
||||
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
|
||||
def __init__(self, dim, num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0.,
|
||||
act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None):
|
||||
super().__init__()
|
||||
self.norm1 = norm_layer(dim)
|
||||
if ws is None:
|
||||
self.attn = Attention(dim, num_heads, False, None, attn_drop, drop)
|
||||
elif ws == 1:
|
||||
self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, drop, sr_ratio)
|
||||
else:
|
||||
self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, drop, ws)
|
||||
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
self.norm2 = norm_layer(dim)
|
||||
mlp_hidden_dim = int(dim * mlp_ratio)
|
||||
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
||||
|
||||
def forward(self, x, size: Size_):
|
||||
x = x + self.drop_path(self.attn(self.norm1(x), size))
|
||||
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
||||
return x
|
||||
|
||||
|
||||
class PosConv(nn.Module):
|
||||
# PEG from https://arxiv.org/abs/2102.10882
|
||||
def __init__(self, in_chans, embed_dim=768, stride=1):
|
||||
super(PosConv, self).__init__()
|
||||
self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), )
|
||||
self.stride = stride
|
||||
|
||||
def forward(self, x, size: Size_):
|
||||
B, N, C = x.shape
|
||||
cnn_feat_token = x.transpose(1, 2).view(B, C, *size)
|
||||
x = self.proj(cnn_feat_token)
|
||||
if self.stride == 1:
|
||||
x += cnn_feat_token
|
||||
x = x.flatten(2).transpose(1, 2)
|
||||
return x
|
||||
|
||||
def no_weight_decay(self):
|
||||
return ['proj.%d.weight' % i for i in range(4)]
|
||||
|
||||
|
||||
class PatchEmbed(nn.Module):
|
||||
""" Image to Patch Embedding
|
||||
"""
|
||||
|
||||
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
|
||||
super().__init__()
|
||||
img_size = to_2tuple(img_size)
|
||||
patch_size = to_2tuple(patch_size)
|
||||
|
||||
self.img_size = img_size
|
||||
self.patch_size = patch_size
|
||||
assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
|
||||
f"img_size {img_size} should be divided by patch_size {patch_size}."
|
||||
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
|
||||
self.num_patches = self.H * self.W
|
||||
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
||||
self.norm = nn.LayerNorm(embed_dim)
|
||||
|
||||
def forward(self, x) -> Tuple[torch.Tensor, Size_]:
|
||||
B, C, H, W = x.shape
|
||||
|
||||
x = self.proj(x).flatten(2).transpose(1, 2)
|
||||
x = self.norm(x)
|
||||
out_size = (H // self.patch_size[0], W // self.patch_size[1])
|
||||
|
||||
return x, out_size
|
||||
|
||||
|
||||
class Twins(nn.Module):
|
||||
""" Twins Vision Transfomer (Revisiting Spatial Attention)
|
||||
|
||||
Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git
|
||||
"""
|
||||
def __init__(
|
||||
self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dims=(64, 128, 256, 512),
|
||||
num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
|
||||
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None,
|
||||
block_cls=Block):
|
||||
super().__init__()
|
||||
self.num_classes = num_classes
|
||||
self.depths = depths
|
||||
|
||||
img_size = to_2tuple(img_size)
|
||||
prev_chs = in_chans
|
||||
self.patch_embeds = nn.ModuleList()
|
||||
self.pos_drops = nn.ModuleList()
|
||||
for i in range(len(depths)):
|
||||
self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i]))
|
||||
self.pos_drops.append(nn.Dropout(p=drop_rate))
|
||||
prev_chs = embed_dims[i]
|
||||
img_size = tuple(t // patch_size for t in img_size)
|
||||
patch_size = 2
|
||||
|
||||
self.blocks = nn.ModuleList()
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
||||
cur = 0
|
||||
for k in range(len(depths)):
|
||||
_block = nn.ModuleList([block_cls(
|
||||
dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], drop=drop_rate,
|
||||
attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k],
|
||||
ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])])
|
||||
self.blocks.append(_block)
|
||||
cur += depths[k]
|
||||
|
||||
self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims])
|
||||
|
||||
self.norm = norm_layer(embed_dims[-1])
|
||||
|
||||
# classification head
|
||||
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
|
||||
|
||||
# init weights
|
||||
self.apply(self._init_weights)
|
||||
|
||||
@torch.jit.ignore
|
||||
def no_weight_decay(self):
|
||||
return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()])
|
||||
|
||||
def get_classifier(self):
|
||||
return self.head
|
||||
|
||||
def reset_classifier(self, num_classes, global_pool=''):
|
||||
self.num_classes = num_classes
|
||||
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
||||
|
||||
def _init_weights(self, m):
|
||||
if isinstance(m, nn.Linear):
|
||||
trunc_normal_(m.weight, std=.02)
|
||||
if isinstance(m, nn.Linear) and m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.LayerNorm):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
||||
elif isinstance(m, nn.Conv2d):
|
||||
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
||||
fan_out //= m.groups
|
||||
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
|
||||
if m.bias is not None:
|
||||
m.bias.data.zero_()
|
||||
elif isinstance(m, nn.BatchNorm2d):
|
||||
m.weight.data.fill_(1.0)
|
||||
m.bias.data.zero_()
|
||||
|
||||
def forward_features(self, x):
|
||||
B = x.shape[0]
|
||||
for i, (embed, drop, blocks, pos_blk) in enumerate(
|
||||
zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)):
|
||||
x, size = embed(x)
|
||||
x = drop(x)
|
||||
for j, blk in enumerate(blocks):
|
||||
x = blk(x, size)
|
||||
if j == 0:
|
||||
x = pos_blk(x, size) # PEG here
|
||||
if i < len(self.depths) - 1:
|
||||
x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()
|
||||
x = self.norm(x)
|
||||
return x.mean(dim=1) # GAP here
|
||||
|
||||
def forward(self, x):
|
||||
x = self.forward_features(x)
|
||||
x = self.head(x)
|
||||
return x
|
||||
|
||||
|
||||
def _create_twins(variant, pretrained=False, **kwargs):
|
||||
if kwargs.get('features_only', None):
|
||||
raise RuntimeError('features_only not implemented for Vision Transformer models.')
|
||||
|
||||
model = build_model_with_cfg(
|
||||
Twins, variant, pretrained,
|
||||
default_cfg=default_cfgs[variant],
|
||||
**kwargs)
|
||||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def twins_pcpvt_small(pretrained=False, **kwargs):
|
||||
model_kwargs = dict(
|
||||
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
|
||||
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs)
|
||||
return _create_twins('twins_pcpvt_small', pretrained=pretrained, **model_kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def twins_pcpvt_base(pretrained=False, **kwargs):
|
||||
model_kwargs = dict(
|
||||
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
|
||||
depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs)
|
||||
return _create_twins('twins_pcpvt_base', pretrained=pretrained, **model_kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def twins_pcpvt_large(pretrained=False, **kwargs):
|
||||
model_kwargs = dict(
|
||||
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
|
||||
depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs)
|
||||
return _create_twins('twins_pcpvt_large', pretrained=pretrained, **model_kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def twins_svt_small(pretrained=False, **kwargs):
|
||||
model_kwargs = dict(
|
||||
patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4],
|
||||
depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)
|
||||
return _create_twins('twins_svt_small', pretrained=pretrained, **model_kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def twins_svt_base(pretrained=False, **kwargs):
|
||||
model_kwargs = dict(
|
||||
patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4],
|
||||
depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)
|
||||
return _create_twins('twins_svt_base', pretrained=pretrained, **model_kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def twins_svt_large(pretrained=False, **kwargs):
|
||||
model_kwargs = dict(
|
||||
patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4],
|
||||
depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)
|
||||
return _create_twins('twins_svt_large', pretrained=pretrained, **model_kwargs)
|
@ -0,0 +1,405 @@
|
||||
""" Visformer
|
||||
|
||||
Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533
|
||||
|
||||
From original at https://github.com/danczs/Visformer
|
||||
|
||||
"""
|
||||
from copy import deepcopy
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
|
||||
from .helpers import build_model_with_cfg, overlay_external_default_cfg
|
||||
from .layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d
|
||||
from .registry import register_model
|
||||
|
||||
|
||||
__all__ = ['Visformer']
|
||||
|
||||
|
||||
def _cfg(url='', **kwargs):
|
||||
return {
|
||||
'url': url,
|
||||
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
|
||||
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
|
||||
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
|
||||
'first_conv': 'stem.0', 'classifier': 'head',
|
||||
**kwargs
|
||||
}
|
||||
|
||||
|
||||
default_cfgs = dict(
|
||||
visformer_tiny=_cfg(),
|
||||
visformer_small=_cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/visformer_small-839e1f5b.pth'
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class SpatialMlp(nn.Module):
|
||||
def __init__(self, in_features, hidden_features=None, out_features=None,
|
||||
act_layer=nn.GELU, drop=0., group=8, spatial_conv=False):
|
||||
super().__init__()
|
||||
out_features = out_features or in_features
|
||||
hidden_features = hidden_features or in_features
|
||||
self.in_features = in_features
|
||||
self.out_features = out_features
|
||||
self.spatial_conv = spatial_conv
|
||||
if self.spatial_conv:
|
||||
if group < 2: # net setting
|
||||
hidden_features = in_features * 5 // 6
|
||||
else:
|
||||
hidden_features = in_features * 2
|
||||
self.hidden_features = hidden_features
|
||||
self.group = group
|
||||
self.drop = nn.Dropout(drop)
|
||||
self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False)
|
||||
self.act1 = act_layer()
|
||||
if self.spatial_conv:
|
||||
self.conv2 = nn.Conv2d(
|
||||
hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False)
|
||||
self.act2 = act_layer()
|
||||
else:
|
||||
self.conv2 = None
|
||||
self.act2 = None
|
||||
self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
x = self.act1(x)
|
||||
x = self.drop(x)
|
||||
if self.conv2 is not None:
|
||||
x = self.conv2(x)
|
||||
x = self.act2(x)
|
||||
x = self.conv3(x)
|
||||
x = self.drop(x)
|
||||
return x
|
||||
|
||||
|
||||
class Attention(nn.Module):
|
||||
def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.):
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.num_heads = num_heads
|
||||
head_dim = round(dim // num_heads * head_dim_ratio)
|
||||
self.head_dim = head_dim
|
||||
self.scale = head_dim ** -0.5
|
||||
self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False)
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
|
||||
def forward(self, x):
|
||||
B, C, H, W = x.shape
|
||||
x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3)
|
||||
q, k, v = x[0], x[1], x[2]
|
||||
|
||||
attn = (q @ k.transpose(-2, -1)) * self.scale
|
||||
attn = attn.softmax(dim=-1)
|
||||
attn = self.attn_drop(attn)
|
||||
x = attn @ v
|
||||
|
||||
x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
def __init__(self, dim, num_heads, head_dim_ratio=1., mlp_ratio=4.,
|
||||
drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm2d,
|
||||
group=8, attn_disabled=False, spatial_conv=False):
|
||||
super().__init__()
|
||||
self.spatial_conv = spatial_conv
|
||||
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
if attn_disabled:
|
||||
self.norm1 = None
|
||||
self.attn = None
|
||||
else:
|
||||
self.norm1 = norm_layer(dim)
|
||||
self.attn = Attention(
|
||||
dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=drop)
|
||||
|
||||
self.norm2 = norm_layer(dim)
|
||||
mlp_hidden_dim = int(dim * mlp_ratio)
|
||||
self.mlp = SpatialMlp(
|
||||
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop,
|
||||
group=group, spatial_conv=spatial_conv) # new setting
|
||||
|
||||
def forward(self, x):
|
||||
if self.attn is not None:
|
||||
x = x + self.drop_path(self.attn(self.norm1(x)))
|
||||
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
||||
return x
|
||||
|
||||
|
||||
class Visformer(nn.Module):
|
||||
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384,
|
||||
depth=12, num_heads=6, mlp_ratio=4., drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
|
||||
norm_layer=LayerNorm2d, attn_stage='111', pos_embed=True, spatial_conv='111',
|
||||
vit_stem=False, group=8, pool=True, conv_init=False, embed_norm=None):
|
||||
super().__init__()
|
||||
self.num_classes = num_classes
|
||||
self.num_features = self.embed_dim = embed_dim
|
||||
self.init_channels = init_channels
|
||||
self.img_size = img_size
|
||||
self.vit_stem = vit_stem
|
||||
self.pool = pool
|
||||
self.conv_init = conv_init
|
||||
if isinstance(depth, (list, tuple)):
|
||||
self.stage_num1, self.stage_num2, self.stage_num3 = depth
|
||||
depth = sum(depth)
|
||||
else:
|
||||
self.stage_num1 = self.stage_num3 = depth // 3
|
||||
self.stage_num2 = depth - self.stage_num1 - self.stage_num3
|
||||
self.pos_embed = pos_embed
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
||||
|
||||
# stage 1
|
||||
if self.vit_stem:
|
||||
self.stem = None
|
||||
self.patch_embed1 = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size, in_chans=in_chans,
|
||||
embed_dim=embed_dim, norm_layer=embed_norm, flatten=False)
|
||||
img_size //= 16
|
||||
else:
|
||||
if self.init_channels is None:
|
||||
self.stem = None
|
||||
self.patch_embed1 = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans,
|
||||
embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False)
|
||||
img_size //= 8
|
||||
else:
|
||||
self.stem = nn.Sequential(
|
||||
nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False),
|
||||
nn.BatchNorm2d(self.init_channels),
|
||||
nn.ReLU(inplace=True)
|
||||
)
|
||||
img_size //= 2
|
||||
self.patch_embed1 = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels,
|
||||
embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False)
|
||||
img_size //= 4
|
||||
|
||||
if self.pos_embed:
|
||||
if self.vit_stem:
|
||||
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, img_size, img_size))
|
||||
else:
|
||||
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, img_size, img_size))
|
||||
self.pos_drop = nn.Dropout(p=drop_rate)
|
||||
self.stage1 = nn.ModuleList([
|
||||
Block(
|
||||
dim=embed_dim//2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio,
|
||||
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
||||
group=group, attn_disabled=(attn_stage[0] == '0'), spatial_conv=(spatial_conv[0] == '1')
|
||||
)
|
||||
for i in range(self.stage_num1)
|
||||
])
|
||||
|
||||
#stage2
|
||||
if not self.vit_stem:
|
||||
self.patch_embed2 = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2,
|
||||
embed_dim=embed_dim, norm_layer=embed_norm, flatten=False)
|
||||
img_size //= 2
|
||||
if self.pos_embed:
|
||||
self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, img_size, img_size))
|
||||
self.stage2 = nn.ModuleList([
|
||||
Block(
|
||||
dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio,
|
||||
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
||||
group=group, attn_disabled=(attn_stage[1] == '0'), spatial_conv=(spatial_conv[1] == '1')
|
||||
)
|
||||
for i in range(self.stage_num1, self.stage_num1+self.stage_num2)
|
||||
])
|
||||
|
||||
# stage 3
|
||||
if not self.vit_stem:
|
||||
self.patch_embed3 = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim,
|
||||
embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False)
|
||||
img_size //= 2
|
||||
if self.pos_embed:
|
||||
self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, img_size, img_size))
|
||||
self.stage3 = nn.ModuleList([
|
||||
Block(
|
||||
dim=embed_dim*2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio,
|
||||
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
||||
group=group, attn_disabled=(attn_stage[2] == '0'), spatial_conv=(spatial_conv[2] == '1')
|
||||
)
|
||||
for i in range(self.stage_num1+self.stage_num2, depth)
|
||||
])
|
||||
|
||||
# head
|
||||
if self.pool:
|
||||
self.global_pooling = nn.AdaptiveAvgPool2d(1)
|
||||
head_dim = embed_dim if self.vit_stem else embed_dim * 2
|
||||
self.norm = norm_layer(head_dim)
|
||||
self.head = nn.Linear(head_dim, num_classes)
|
||||
|
||||
# weights init
|
||||
if self.pos_embed:
|
||||
trunc_normal_(self.pos_embed1, std=0.02)
|
||||
if not self.vit_stem:
|
||||
trunc_normal_(self.pos_embed2, std=0.02)
|
||||
trunc_normal_(self.pos_embed3, std=0.02)
|
||||
self.apply(self._init_weights)
|
||||
|
||||
def _init_weights(self, m):
|
||||
if isinstance(m, nn.Linear):
|
||||
trunc_normal_(m.weight, std=0.02)
|
||||
if m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.LayerNorm):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
||||
elif isinstance(m, nn.BatchNorm2d):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
||||
elif isinstance(m, nn.Conv2d):
|
||||
if self.conv_init:
|
||||
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
||||
else:
|
||||
trunc_normal_(m.weight, std=0.02)
|
||||
if m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0.)
|
||||
|
||||
def forward(self, x):
|
||||
if self.stem is not None:
|
||||
x = self.stem(x)
|
||||
|
||||
# stage 1
|
||||
x = self.patch_embed1(x)
|
||||
if self.pos_embed:
|
||||
x = x + self.pos_embed1
|
||||
x = self.pos_drop(x)
|
||||
for b in self.stage1:
|
||||
x = b(x)
|
||||
|
||||
# stage 2
|
||||
if not self.vit_stem:
|
||||
x = self.patch_embed2(x)
|
||||
if self.pos_embed:
|
||||
x = x + self.pos_embed2
|
||||
x = self.pos_drop(x)
|
||||
for b in self.stage2:
|
||||
x = b(x)
|
||||
|
||||
# stage3
|
||||
if not self.vit_stem:
|
||||
x = self.patch_embed3(x)
|
||||
if self.pos_embed:
|
||||
x = x + self.pos_embed3
|
||||
x = self.pos_drop(x)
|
||||
for b in self.stage3:
|
||||
x = b(x)
|
||||
|
||||
# head
|
||||
x = self.norm(x)
|
||||
if self.pool:
|
||||
x = self.global_pooling(x)
|
||||
else:
|
||||
x = x[:, :, 0, 0]
|
||||
|
||||
x = self.head(x.view(x.size(0), -1))
|
||||
return x
|
||||
|
||||
|
||||
def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs):
|
||||
if kwargs.get('features_only', None):
|
||||
raise RuntimeError('features_only not implemented for Vision Transformer models.')
|
||||
model = build_model_with_cfg(
|
||||
Visformer, variant, pretrained,
|
||||
default_cfg=default_cfgs[variant],
|
||||
**kwargs)
|
||||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def visformer_tiny(pretrained=False, **kwargs):
|
||||
model_cfg = dict(
|
||||
img_size=224, init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8,
|
||||
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
|
||||
embed_norm=nn.BatchNorm2d, **kwargs)
|
||||
model = _create_visformer('visformer_tiny', pretrained=pretrained, **model_cfg)
|
||||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def visformer_small(pretrained=False, **kwargs):
|
||||
model_cfg = dict(
|
||||
img_size=224, init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8,
|
||||
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
|
||||
embed_norm=nn.BatchNorm2d, **kwargs)
|
||||
model = _create_visformer('visformer_small', pretrained=pretrained, **model_cfg)
|
||||
return model
|
||||
|
||||
|
||||
# @register_model
|
||||
# def visformer_net1(pretrained=False, **kwargs):
|
||||
# model = Visformer(
|
||||
# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
|
||||
# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs)
|
||||
# model.default_cfg = _cfg()
|
||||
# return model
|
||||
#
|
||||
#
|
||||
# @register_model
|
||||
# def visformer_net2(pretrained=False, **kwargs):
|
||||
# model = Visformer(
|
||||
# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
|
||||
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
|
||||
# model.default_cfg = _cfg()
|
||||
# return model
|
||||
#
|
||||
#
|
||||
# @register_model
|
||||
# def visformer_net3(pretrained=False, **kwargs):
|
||||
# model = Visformer(
|
||||
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
|
||||
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
|
||||
# model.default_cfg = _cfg()
|
||||
# return model
|
||||
#
|
||||
#
|
||||
# @register_model
|
||||
# def visformer_net4(pretrained=False, **kwargs):
|
||||
# model = Visformer(
|
||||
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
|
||||
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
|
||||
# model.default_cfg = _cfg()
|
||||
# return model
|
||||
#
|
||||
#
|
||||
# @register_model
|
||||
# def visformer_net5(pretrained=False, **kwargs):
|
||||
# model = Visformer(
|
||||
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
|
||||
# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs)
|
||||
# model.default_cfg = _cfg()
|
||||
# return model
|
||||
#
|
||||
#
|
||||
# @register_model
|
||||
# def visformer_net6(pretrained=False, **kwargs):
|
||||
# model = Visformer(
|
||||
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
|
||||
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
|
||||
# model.default_cfg = _cfg()
|
||||
# return model
|
||||
#
|
||||
#
|
||||
# @register_model
|
||||
# def visformer_net7(pretrained=False, **kwargs):
|
||||
# model = Visformer(
|
||||
# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000',
|
||||
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
|
||||
# model.default_cfg = _cfg()
|
||||
# return model
|
||||
|
||||
|
||||
|
||||
|
@ -1 +1 @@
|
||||
__version__ = '0.4.9'
|
||||
__version__ = '0.4.11'
|
||||
|
Loading…
Reference in new issue