Merge remote-tracking branch 'origin/norm_norm_norm' into bits_and_tpu

pull/1239/head
Ross Wightman 3 years ago
commit 0e212e8fe5

@ -386,9 +386,6 @@ def test_model_forward_fx(model_name, batch_size):
assert not torch.isnan(outputs).any(), 'Output included NaNs'
if 'GITHUB_ACTIONS' not in os.environ:
# FIXME this test is causing GitHub actions to run out of RAM and abruptly kill the test process
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS, name_matches_cfg=True))
@ -422,6 +419,9 @@ if 'GITHUB_ACTIONS' not in os.environ:
assert not torch.isnan(outputs).any(), 'Output included NaNs'
if 'GITHUB_ACTIONS' not in os.environ:
# FIXME this test is causing GitHub actions to run out of RAM and abruptly kill the test process
# reason: model is scripted after fx tracing, but beit has torch.jit.is_scripting() control flow
EXCLUDE_FX_JIT_FILTERS = [
'deit_*_distilled_patch16_224',

@ -6,7 +6,6 @@ from torch import nn as nn
from torch.nn import functional as F
from .adaptive_avgmax_pool import SelectAdaptivePool2d
from .linear import Linear
def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False):
@ -26,8 +25,7 @@ def _create_fc(num_features, num_classes, use_conv=False):
elif use_conv:
fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
else:
# NOTE: using my Linear wrapper that fixes AMP + torchscript casting issue
fc = Linear(num_features, num_classes, bias=True)
fc = nn.Linear(num_features, num_classes, bias=True)
return fc

Loading…
Cancel
Save