|
|
|
@ -13,35 +13,12 @@ import torch
|
|
|
|
|
import torch.nn as nn
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@torch.jit.script
|
|
|
|
|
def evo_batch_jit(
|
|
|
|
|
x: torch.Tensor, v: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, running_var: torch.Tensor,
|
|
|
|
|
momentum: float, training: bool, nonlin: bool, eps: float):
|
|
|
|
|
x_type = x.dtype
|
|
|
|
|
running_var = running_var.detach() # FIXME why is this needed, it's a buffer?
|
|
|
|
|
if training:
|
|
|
|
|
var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True) # FIXME biased, unbiased?
|
|
|
|
|
running_var.copy_(momentum * var + (1 - momentum) * running_var)
|
|
|
|
|
else:
|
|
|
|
|
var = running_var.clone()
|
|
|
|
|
|
|
|
|
|
if nonlin:
|
|
|
|
|
# FIXME biased, unbiased?
|
|
|
|
|
d = (x * v.to(x_type)) + x.var(dim=(2, 3), unbiased=False, keepdim=True).add_(eps).sqrt_().to(dtype=x_type)
|
|
|
|
|
d = d.max(var.add(eps).sqrt_().to(dtype=x_type))
|
|
|
|
|
x = x / d
|
|
|
|
|
return x.mul_(weight).add_(bias)
|
|
|
|
|
else:
|
|
|
|
|
return x.mul(weight).add_(bias)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class EvoNormBatch2d(nn.Module):
|
|
|
|
|
def __init__(self, num_features, momentum=0.1, nonlin=True, eps=1e-5, jit=True):
|
|
|
|
|
def __init__(self, num_features, momentum=0.1, nonlin=True, eps=1e-5):
|
|
|
|
|
super(EvoNormBatch2d, self).__init__()
|
|
|
|
|
self.momentum = momentum
|
|
|
|
|
self.nonlin = nonlin
|
|
|
|
|
self.eps = eps
|
|
|
|
|
self.jit = jit
|
|
|
|
|
param_shape = (1, num_features, 1, 1)
|
|
|
|
|
self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True)
|
|
|
|
|
self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True)
|
|
|
|
@ -58,50 +35,29 @@ class EvoNormBatch2d(nn.Module):
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
assert x.dim() == 4, 'expected 4D input'
|
|
|
|
|
|
|
|
|
|
if self.jit:
|
|
|
|
|
return evo_batch_jit(
|
|
|
|
|
x, self.v, self.weight, self.bias, self.running_var, self.momentum,
|
|
|
|
|
self.training, self.nonlin, self.eps)
|
|
|
|
|
x_type = x.dtype
|
|
|
|
|
if self.training:
|
|
|
|
|
var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True)
|
|
|
|
|
self.running_var.copy_(self.momentum * var.detach() + (1 - self.momentum) * self.running_var)
|
|
|
|
|
else:
|
|
|
|
|
x_type = x.dtype
|
|
|
|
|
if self.training:
|
|
|
|
|
var = x.var(dim=(0, 2, 3), keepdim=True)
|
|
|
|
|
self.running_var.copy_(self.momentum * var + (1 - self.momentum) * self.running_var)
|
|
|
|
|
else:
|
|
|
|
|
var = self.running_var.clone()
|
|
|
|
|
|
|
|
|
|
if self.nonlin:
|
|
|
|
|
v = self.v.to(dtype=x_type)
|
|
|
|
|
d = (x * v) + x.var(dim=(2, 3), keepdim=True).add_(self.eps).sqrt_().to(dtype=x_type)
|
|
|
|
|
d = d.max(var.add(self.eps).sqrt_().to(dtype=x_type))
|
|
|
|
|
x = x / d
|
|
|
|
|
return x.mul_(self.weight).add_(self.bias)
|
|
|
|
|
else:
|
|
|
|
|
return x.mul(self.weight).add_(self.bias)
|
|
|
|
|
var = self.running_var.clone()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@torch.jit.script
|
|
|
|
|
def evo_sample_jit(
|
|
|
|
|
x: torch.Tensor, v: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor,
|
|
|
|
|
groups: int, nonlin: bool, eps: float):
|
|
|
|
|
B, C, H, W = x.shape
|
|
|
|
|
assert C % groups == 0
|
|
|
|
|
if nonlin:
|
|
|
|
|
n = (x * v).sigmoid_().reshape(B, groups, -1)
|
|
|
|
|
x = x.reshape(B, groups, -1)
|
|
|
|
|
x = n / x.var(dim=-1, unbiased=False, keepdim=True).add_(eps).sqrt_()
|
|
|
|
|
x = x.reshape(B, C, H, W)
|
|
|
|
|
return x.mul_(weight).add_(bias)
|
|
|
|
|
if self.nonlin:
|
|
|
|
|
v = self.v.to(dtype=x_type)
|
|
|
|
|
d = (x * v) + x.var(dim=(2, 3), unbiased=False, keepdim=True).add_(self.eps).sqrt_().to(dtype=x_type)
|
|
|
|
|
d = d.max(var.add_(self.eps).sqrt_().to(dtype=x_type))
|
|
|
|
|
x = x / d
|
|
|
|
|
return x.mul_(self.weight).add_(self.bias)
|
|
|
|
|
else:
|
|
|
|
|
return x.mul(self.weight).add_(self.bias)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class EvoNormSample2d(nn.Module):
|
|
|
|
|
def __init__(self, num_features, nonlin=True, groups=8, eps=1e-5, jit=True):
|
|
|
|
|
def __init__(self, num_features, nonlin=True, groups=8, eps=1e-5):
|
|
|
|
|
super(EvoNormSample2d, self).__init__()
|
|
|
|
|
self.nonlin = nonlin
|
|
|
|
|
self.groups = groups
|
|
|
|
|
self.eps = eps
|
|
|
|
|
self.jit = jit
|
|
|
|
|
param_shape = (1, num_features, 1, 1)
|
|
|
|
|
self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True)
|
|
|
|
|
self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True)
|
|
|
|
@ -117,18 +73,13 @@ class EvoNormSample2d(nn.Module):
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
assert x.dim() == 4, 'expected 4D input'
|
|
|
|
|
|
|
|
|
|
if self.jit:
|
|
|
|
|
return evo_sample_jit(
|
|
|
|
|
x, self.v, self.weight, self.bias, self.groups, self.nonlin, self.eps)
|
|
|
|
|
B, C, H, W = x.shape
|
|
|
|
|
assert C % self.groups == 0
|
|
|
|
|
if self.nonlin:
|
|
|
|
|
n = (x * self.v).sigmoid().reshape(B, self.groups, -1)
|
|
|
|
|
x = x.reshape(B, self.groups, -1)
|
|
|
|
|
x = n / x.var(dim=-1, unbiased=False, keepdim=True).add_(self.eps).sqrt_()
|
|
|
|
|
x = x.reshape(B, C, H, W)
|
|
|
|
|
return x.mul_(self.weight).add_(self.bias)
|
|
|
|
|
else:
|
|
|
|
|
B, C, H, W = x.shape
|
|
|
|
|
assert C % self.groups == 0
|
|
|
|
|
if self.nonlin:
|
|
|
|
|
n = (x * self.v).sigmoid().reshape(B, self.groups, -1)
|
|
|
|
|
x = x.reshape(B, self.groups, -1)
|
|
|
|
|
x = n / (x.std(dim=-1, unbiased=False, keepdim=True) + self.eps)
|
|
|
|
|
x = x.reshape(B, C, H, W)
|
|
|
|
|
return x.mul_(self.weight).add_(self.bias)
|
|
|
|
|
else:
|
|
|
|
|
return x.mul(self.weight).add_(self.bias)
|
|
|
|
|
return x.mul(self.weight).add_(self.bias)
|
|
|
|
|