From 84fd045e4dbe3b91f25c68b6a0416f6b92692b0b Mon Sep 17 00:00:00 2001 From: Aman Arora Date: Mon, 12 Apr 2021 04:09:01 -0400 Subject: [PATCH] only run test_forward_features --- tests/test_models.py | 212 +++++++++++++++++++++---------------------- 1 file changed, 106 insertions(+), 106 deletions(-) diff --git a/tests/test_models.py b/tests/test_models.py index 76f4ed3a..2eefdf46 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -31,98 +31,98 @@ MAX_BWD_SIZE = 128 MAX_FWD_FEAT_SIZE = 448 -@pytest.mark.timeout(120) -@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS[:-NUM_NON_STD])) -@pytest.mark.parametrize('batch_size', [1]) -def test_model_forward(model_name, batch_size): - """Run a single forward pass with each model""" - model = create_model(model_name, pretrained=False) - model.eval() - - input_size = model.default_cfg['input_size'] - if any([x > MAX_FWD_SIZE for x in input_size]): - # cap forward test at max res 448 * 448 to keep resource down - input_size = tuple([min(x, MAX_FWD_SIZE) for x in input_size]) - inputs = torch.randn((batch_size, *input_size)) - outputs = model(inputs) - - assert outputs.shape[0] == batch_size - assert not torch.isnan(outputs).any(), 'Output included NaNs' - - -@pytest.mark.timeout(120) -@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS)) -@pytest.mark.parametrize('batch_size', [2]) -def test_model_backward(model_name, batch_size): - """Run a single forward pass with each model""" - model = create_model(model_name, pretrained=False, num_classes=42) - num_params = sum([x.numel() for x in model.parameters()]) - model.eval() - - input_size = model.default_cfg['input_size'] - if any([x > MAX_BWD_SIZE for x in input_size]): - # cap backward test at 128 * 128 to keep resource usage down - input_size = tuple([min(x, MAX_BWD_SIZE) for x in input_size]) - inputs = torch.randn((batch_size, *input_size)) - outputs = model(inputs) - outputs.mean().backward() - for n, x in model.named_parameters(): - assert x.grad is not None, f'No gradient for {n}' - num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None]) - - assert outputs.shape[-1] == 42 - assert num_params == num_grad, 'Some parameters are missing gradients' - assert not torch.isnan(outputs).any(), 'Output included NaNs' - - -@pytest.mark.timeout(120) -@pytest.mark.parametrize('model_name', list_models(exclude_filters=NON_STD_FILTERS)) -@pytest.mark.parametrize('batch_size', [1]) -def test_model_default_cfgs(model_name, batch_size): - """Run a single forward pass with each model""" - model = create_model(model_name, pretrained=False) - model.eval() - state_dict = model.state_dict() - cfg = model.default_cfg - - classifier = cfg['classifier'] - pool_size = cfg['pool_size'] - input_size = model.default_cfg['input_size'] - - if all([x <= MAX_FWD_FEAT_SIZE for x in input_size]) and \ - not any([fnmatch.fnmatch(model_name, x) for x in EXCLUDE_FILTERS]): - # output sizes only checked if default res <= 448 * 448 to keep resource down - input_size = tuple([min(x, MAX_FWD_FEAT_SIZE) for x in input_size]) - input_tensor = torch.randn((batch_size, *input_size)) - - # test forward_features (always unpooled) - outputs = model.forward_features(input_tensor) - assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] - - # test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features - model.reset_classifier(0) - outputs = model.forward(input_tensor) - assert len(outputs.shape) == 2 - assert outputs.shape[-1] == model.num_features - - # test model forward without pooling and classifier - model.reset_classifier(0, '') # reset classifier and set global pooling to pass-through - outputs = model.forward(input_tensor) - assert len(outputs.shape) == 4 - if not isinstance(model, timm.models.MobileNetV3): - # FIXME mobilenetv3 forward_features vs removed pooling differ - assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] - - # check classifier name matches default_cfg - assert classifier + ".weight" in state_dict.keys(), f'{classifier} not in model params' - - # check first conv(s) names match default_cfg - first_conv = cfg['first_conv'] - if isinstance(first_conv, str): - first_conv = (first_conv,) - assert isinstance(first_conv, (tuple, list)) - for fc in first_conv: - assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params' +# @pytest.mark.timeout(120) +# @pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS[:-NUM_NON_STD])) +# @pytest.mark.parametrize('batch_size', [1]) +# def test_model_forward(model_name, batch_size): +# """Run a single forward pass with each model""" +# model = create_model(model_name, pretrained=False) +# model.eval() + +# input_size = model.default_cfg['input_size'] +# if any([x > MAX_FWD_SIZE for x in input_size]): +# # cap forward test at max res 448 * 448 to keep resource down +# input_size = tuple([min(x, MAX_FWD_SIZE) for x in input_size]) +# inputs = torch.randn((batch_size, *input_size)) +# outputs = model(inputs) + +# assert outputs.shape[0] == batch_size +# assert not torch.isnan(outputs).any(), 'Output included NaNs' + + +# @pytest.mark.timeout(120) +# @pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS)) +# @pytest.mark.parametrize('batch_size', [2]) +# def test_model_backward(model_name, batch_size): +# """Run a single forward pass with each model""" +# model = create_model(model_name, pretrained=False, num_classes=42) +# num_params = sum([x.numel() for x in model.parameters()]) +# model.eval() + +# input_size = model.default_cfg['input_size'] +# if any([x > MAX_BWD_SIZE for x in input_size]): +# # cap backward test at 128 * 128 to keep resource usage down +# input_size = tuple([min(x, MAX_BWD_SIZE) for x in input_size]) +# inputs = torch.randn((batch_size, *input_size)) +# outputs = model(inputs) +# outputs.mean().backward() +# for n, x in model.named_parameters(): +# assert x.grad is not None, f'No gradient for {n}' +# num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None]) + +# assert outputs.shape[-1] == 42 +# assert num_params == num_grad, 'Some parameters are missing gradients' +# assert not torch.isnan(outputs).any(), 'Output included NaNs' + + +# @pytest.mark.timeout(120) +# @pytest.mark.parametrize('model_name', list_models(exclude_filters=NON_STD_FILTERS)) +# @pytest.mark.parametrize('batch_size', [1]) +# def test_model_default_cfgs(model_name, batch_size): +# """Run a single forward pass with each model""" +# model = create_model(model_name, pretrained=False) +# model.eval() +# state_dict = model.state_dict() +# cfg = model.default_cfg + +# classifier = cfg['classifier'] +# pool_size = cfg['pool_size'] +# input_size = model.default_cfg['input_size'] + +# if all([x <= MAX_FWD_FEAT_SIZE for x in input_size]) and \ +# not any([fnmatch.fnmatch(model_name, x) for x in EXCLUDE_FILTERS]): +# # output sizes only checked if default res <= 448 * 448 to keep resource down +# input_size = tuple([min(x, MAX_FWD_FEAT_SIZE) for x in input_size]) +# input_tensor = torch.randn((batch_size, *input_size)) + +# # test forward_features (always unpooled) +# outputs = model.forward_features(input_tensor) +# assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] + +# # test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features +# model.reset_classifier(0) +# outputs = model.forward(input_tensor) +# assert len(outputs.shape) == 2 +# assert outputs.shape[-1] == model.num_features + +# # test model forward without pooling and classifier +# model.reset_classifier(0, '') # reset classifier and set global pooling to pass-through +# outputs = model.forward(input_tensor) +# assert len(outputs.shape) == 4 +# if not isinstance(model, timm.models.MobileNetV3): +# # FIXME mobilenetv3 forward_features vs removed pooling differ +# assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] + +# # check classifier name matches default_cfg +# assert classifier + ".weight" in state_dict.keys(), f'{classifier} not in model params' + +# # check first conv(s) names match default_cfg +# first_conv = cfg['first_conv'] +# if isinstance(first_conv, str): +# first_conv = (first_conv,) +# assert isinstance(first_conv, (tuple, list)) +# for fc in first_conv: +# assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params' if 'GITHUB_ACTIONS' not in os.environ: @@ -147,20 +147,20 @@ EXCLUDE_JIT_FILTERS = [ ] -@pytest.mark.timeout(120) -@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS)) -@pytest.mark.parametrize('batch_size', [1]) -def test_model_forward_torchscript(model_name, batch_size): - """Run a single forward pass with each model""" - with set_scriptable(True): - model = create_model(model_name, pretrained=False) - model.eval() - input_size = (3, 128, 128) # jit compile is already a bit slow and we've tested normal res already... - model = torch.jit.script(model) - outputs = model(torch.randn((batch_size, *input_size))) - - assert outputs.shape[0] == batch_size - assert not torch.isnan(outputs).any(), 'Output included NaNs' +# @pytest.mark.timeout(120) +# @pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS)) +# @pytest.mark.parametrize('batch_size', [1]) +# def test_model_forward_torchscript(model_name, batch_size): +# """Run a single forward pass with each model""" +# with set_scriptable(True): +# model = create_model(model_name, pretrained=False) +# model.eval() +# input_size = (3, 128, 128) # jit compile is already a bit slow and we've tested normal res already... +# model = torch.jit.script(model) +# outputs = model(torch.randn((batch_size, *input_size))) + +# assert outputs.shape[0] == batch_size +# assert not torch.isnan(outputs).any(), 'Output included NaNs' EXCLUDE_FEAT_FILTERS = [