diff --git a/README.md b/README.md index 32a34883..7630eed7 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ * Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed). * Support model default_cfgs with separate train vs test resolution `test_input_size` -### Jan 30, 2012 +### Jan 30, 2021 * Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692) ### Jan 25, 2021 diff --git a/timm/models/layers/activations_me.py b/timm/models/layers/activations_me.py index 0441f7c4..9a12bb7e 100644 --- a/timm/models/layers/activations_me.py +++ b/timm/models/layers/activations_me.py @@ -30,6 +30,9 @@ class SwishJitAutoFn(torch.autograd.Function): Inspired by conversation btw Jeremy Howard & Adam Pazske https://twitter.com/jeremyphoward/status/1188251041835315200 """ + @staticmethod + def symbolic(g, x): + return g.op("Mul", x, g.op("Sigmoid", x)) @staticmethod def forward(ctx, x): @@ -152,6 +155,13 @@ class HardSwishJitAutoFn(torch.autograd.Function): x = ctx.saved_tensors[0] return hard_swish_jit_bwd(x, grad_output) + @staticmethod + def symbolic(g, self): + input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) + hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + return g.op("Mul", self, hardtanh_) + def hard_swish_me(x, inplace=False): return HardSwishJitAutoFn.apply(x)