|
|
|
from torch import nn as nn
|
Monster commit, activation refactor, VoVNet, norm_act improvements, more
* refactor activations into basic PyTorch, jit scripted, and memory efficient custom auto
* implement hard-mish, better grad for hard-swish
* add initial VovNet V1/V2 impl, fix #151
* VovNet and DenseNet first models to use NormAct layers (support BatchNormAct2d, EvoNorm, InplaceIABN)
* Wrap IABN for any models that use it
* make more models torchscript compatible (DPN, PNasNet, Res2Net, SelecSLS) and add tests
5 years ago
|
|
|
from .create_act import get_act_fn
|
|
|
|
|
|
|
|
|
|
|
|
class SEModule(nn.Module):
|
|
|
|
|
Monster commit, activation refactor, VoVNet, norm_act improvements, more
* refactor activations into basic PyTorch, jit scripted, and memory efficient custom auto
* implement hard-mish, better grad for hard-swish
* add initial VovNet V1/V2 impl, fix #151
* VovNet and DenseNet first models to use NormAct layers (support BatchNormAct2d, EvoNorm, InplaceIABN)
* Wrap IABN for any models that use it
* make more models torchscript compatible (DPN, PNasNet, Res2Net, SelecSLS) and add tests
5 years ago
|
|
|
def __init__(self, channels, reduction=16, act_layer=nn.ReLU, min_channels=8, reduction_channels=None,
|
|
|
|
gate_fn='sigmoid'):
|
|
|
|
super(SEModule, self).__init__()
|
|
|
|
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
|
|
|
reduction_channels = reduction_channels or max(channels // reduction, min_channels)
|
|
|
|
self.fc1 = nn.Conv2d(
|
|
|
|
channels, reduction_channels, kernel_size=1, padding=0, bias=True)
|
|
|
|
self.act = act_layer(inplace=True)
|
|
|
|
self.fc2 = nn.Conv2d(
|
|
|
|
reduction_channels, channels, kernel_size=1, padding=0, bias=True)
|
Monster commit, activation refactor, VoVNet, norm_act improvements, more
* refactor activations into basic PyTorch, jit scripted, and memory efficient custom auto
* implement hard-mish, better grad for hard-swish
* add initial VovNet V1/V2 impl, fix #151
* VovNet and DenseNet first models to use NormAct layers (support BatchNormAct2d, EvoNorm, InplaceIABN)
* Wrap IABN for any models that use it
* make more models torchscript compatible (DPN, PNasNet, Res2Net, SelecSLS) and add tests
5 years ago
|
|
|
self.gate_fn = get_act_fn(gate_fn)
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
x_se = self.avg_pool(x)
|
|
|
|
x_se = self.fc1(x_se)
|
|
|
|
x_se = self.act(x_se)
|
|
|
|
x_se = self.fc2(x_se)
|
Monster commit, activation refactor, VoVNet, norm_act improvements, more
* refactor activations into basic PyTorch, jit scripted, and memory efficient custom auto
* implement hard-mish, better grad for hard-swish
* add initial VovNet V1/V2 impl, fix #151
* VovNet and DenseNet first models to use NormAct layers (support BatchNormAct2d, EvoNorm, InplaceIABN)
* Wrap IABN for any models that use it
* make more models torchscript compatible (DPN, PNasNet, Res2Net, SelecSLS) and add tests
5 years ago
|
|
|
return x * self.gate_fn(x_se)
|
|
|
|
|
|
|
|
|
|
|
|
class EffectiveSEModule(nn.Module):
|
|
|
|
""" 'Effective Squeeze-Excitation
|
|
|
|
From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
|
|
|
|
"""
|
|
|
|
def __init__(self, channel, gate_fn='hard_sigmoid'):
|
|
|
|
super(EffectiveSEModule, self).__init__()
|
|
|
|
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
|
|
|
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
|
|
|
|
self.gate_fn = get_act_fn(gate_fn)
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
x_se = self.avg_pool(x)
|
|
|
|
x_se = self.fc(x_se)
|
|
|
|
return x * self.gate_fn(x_se, inplace=True)
|