From 214c84a2359b8021e3d2f170d18e734b78538d18 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Wed, 1 Dec 2021 14:58:09 -0800 Subject: [PATCH] Disable use of timm nn.Linear wrapper since AMP autocast + torchscript use appears fixed --- timm/models/layers/classifier.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/timm/models/layers/classifier.py b/timm/models/layers/classifier.py index 2b745413..798748da 100644 --- a/timm/models/layers/classifier.py +++ b/timm/models/layers/classifier.py @@ -6,7 +6,6 @@ from torch import nn as nn from torch.nn import functional as F from .adaptive_avgmax_pool import SelectAdaptivePool2d -from .linear import Linear def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): @@ -26,8 +25,7 @@ def _create_fc(num_features, num_classes, use_conv=False): elif use_conv: fc = nn.Conv2d(num_features, num_classes, 1, bias=True) else: - # NOTE: using my Linear wrapper that fixes AMP + torchscript casting issue - fc = Linear(num_features, num_classes, bias=True) + fc = nn.Linear(num_features, num_classes, bias=True) return fc