From e72c98997360a5ee8f271615ba02f8122b45fde0 Mon Sep 17 00:00:00 2001 From: nateraw Date: Wed, 1 Sep 2021 18:14:28 -0600 Subject: [PATCH 01/19] :sparkles: add ability to push to hf hub --- timm/models/hub.py | 120 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 118 insertions(+), 2 deletions(-) diff --git a/timm/models/hub.py b/timm/models/hub.py index 9a9b5530..9e0ac18a 100644 --- a/timm/models/hub.py +++ b/timm/models/hub.py @@ -1,6 +1,7 @@ import json import logging import os +from pathlib import Path from functools import partial from typing import Union, Optional @@ -13,8 +14,7 @@ except ImportError: from timm import __version__ try: - from huggingface_hub import hf_hub_url - from huggingface_hub import cached_download + from huggingface_hub import cached_download, hf_hub_url, HfFolder, HfApi, Repository cached_download = partial(cached_download, library_name="timm", library_version=__version__) except ImportError: hf_hub_url = None @@ -94,3 +94,119 @@ def load_state_dict_from_hf(model_id: str): cached_file = _download_from_hf(model_id, 'pytorch_model.bin') state_dict = torch.load(cached_file, map_location='cpu') return state_dict + + +def save_pretrained_for_hf(model, save_directory, **config_kwargs): + assert has_hf_hub(True) + save_directory = Path(save_directory) + save_directory.mkdir(exist_ok=True, parents=True) + + weights_path = save_directory / 'pytorch_model.bin' + torch.save(model.state_dict(), weights_path) + + config_path = save_directory / 'config.json' + config = model.default_cfg + config.update(config_kwargs) + + with config_path.open('w') as f: + json.dump(config, f, indent=4) + + +def push_to_hf_hub( + model, + repo_path_or_name: Optional[str] = None, + repo_url: Optional[str] = None, + commit_message: Optional[str] = "Add model", + organization: Optional[str] = None, + private: Optional[bool] = None, + api_endpoint: Optional[str] = None, + use_auth_token: Optional[Union[bool, str]] = None, + git_user: Optional[str] = None, + git_email: Optional[str] = None, + config: Optional[dict] = None, +): + """ + Upload model checkpoint and config to the 🤗 Model Hub while synchronizing a local clone of the repo in + :obj:`repo_path_or_name`. + + Parameters: + repo_path_or_name (:obj:`str`, `optional`): + Can either be a repository name for your model or tokenizer in the Hub or a path to a local folder (in + which case the repository will have the name of that local folder). If not specified, will default to + the name given by :obj:`repo_url` and a local directory with that name will be created. + repo_url (:obj:`str`, `optional`): + Specify this in case you want to push to an existing repository in the hub. If unspecified, a new + repository will be created in your namespace (unless you specify an :obj:`organization`) with + :obj:`repo_name`. + commit_message (:obj:`str`, `optional`): + Message to commit while pushing. Will default to :obj:`"add config"`, :obj:`"add tokenizer"` or + :obj:`"add model"` depending on the type of the class. + organization (:obj:`str`, `optional`): + Organization in which you want to push your model or tokenizer (you must be a member of this + organization). + private (:obj:`bool`, `optional`): + Whether or not the repository created should be private (requires a paying subscription). + api_endpoint (:obj:`str`, `optional`): + The API endpoint to use when pushing the model to the hub. + use_auth_token (:obj:`bool` or :obj:`str`, `optional`): + The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token + generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). Will default to + :obj:`True` if :obj:`repo_url` is not specified. + git_user (``str``, `optional`): + will override the ``git config user.name`` for committing and pushing files to the hub. + git_email (``str``, `optional`): + will override the ``git config user.email`` for committing and pushing files to the hub. + config (:obj:`dict`, `optional`): + Configuration object to be saved alongside the model weights. + + + Returns: + The url of the commit of your model in the given repository. + """ + assert has_hf_hub(True) + if repo_path_or_name is None and repo_url is None: + raise ValueError( + "You need to specify a `repo_path_or_name` or a `repo_url`." + ) + + if use_auth_token is None and repo_url is None: + token = HfFolder.get_token() + if token is None: + raise ValueError( + "You must login to the Hugging Face hub on this computer by typing `transformers-cli login` and " + "entering your credentials to use `use_auth_token=True`. Alternatively, you can pass your own " + "token as the `use_auth_token` argument." + ) + elif isinstance(use_auth_token, str): + token = use_auth_token + else: + token = None + + if repo_path_or_name is None: + repo_path_or_name = repo_url.split("/")[-1] + + # If no URL is passed and there's no path to a directory containing files, create a repo + if repo_url is None and not os.path.exists(repo_path_or_name): + repo_name = Path(repo_path_or_name).name + repo_url = HfApi(endpoint=api_endpoint).create_repo( + token, + repo_name, + organization=organization, + private=private, + repo_type=None, + exist_ok=True, + ) + + repo = Repository( + repo_path_or_name, + clone_from=repo_url, + use_auth_token=use_auth_token, + git_user=git_user, + git_email=git_email, + ) + repo.git_pull(rebase=True) + + save_config = model.default_cfg + save_config.update(config or {}) + with repo.commit(commit_message): + save_pretrained_for_hf(model, repo.local_dir, **save_config) From 28d2841acfad0dbdd720b9335f361bcc80cde1a8 Mon Sep 17 00:00:00 2001 From: nateraw Date: Wed, 1 Sep 2021 18:15:08 -0600 Subject: [PATCH 02/19] :lipstick: apply isort --- timm/models/hub.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/timm/models/hub.py b/timm/models/hub.py index 9e0ac18a..f5b041a6 100644 --- a/timm/models/hub.py +++ b/timm/models/hub.py @@ -1,20 +1,24 @@ import json import logging import os -from pathlib import Path from functools import partial -from typing import Union, Optional +from pathlib import Path +from typing import Optional, Union import torch -from torch.hub import load_state_dict_from_url, download_url_to_file, urlparse, HASH_REGEX +from torch.hub import (HASH_REGEX, download_url_to_file, + load_state_dict_from_url, urlparse) + try: from torch.hub import get_dir except ImportError: from torch.hub import _get_torch_home as get_dir from timm import __version__ + try: - from huggingface_hub import cached_download, hf_hub_url, HfFolder, HfApi, Repository + from huggingface_hub import (HfApi, HfFolder, Repository, cached_download, + hf_hub_url) cached_download = partial(cached_download, library_name="timm", library_version=__version__) except ImportError: hf_hub_url = None From abf9d51bc3d3b2ca69f569b9f226a3cdf4ec1d10 Mon Sep 17 00:00:00 2001 From: nateraw Date: Tue, 7 Sep 2021 18:39:26 -0600 Subject: [PATCH 03/19] :construction: wip --- timm/models/hub.py | 118 +++++++++++++-------------------------------- 1 file changed, 33 insertions(+), 85 deletions(-) diff --git a/timm/models/hub.py b/timm/models/hub.py index f5b041a6..4cdca7f0 100644 --- a/timm/models/hub.py +++ b/timm/models/hub.py @@ -3,11 +3,10 @@ import logging import os from functools import partial from pathlib import Path -from typing import Optional, Union +from typing import Union import torch -from torch.hub import (HASH_REGEX, download_url_to_file, - load_state_dict_from_url, urlparse) +from torch.hub import HASH_REGEX, download_url_to_file, urlparse, load_state_dict_from_url try: from torch.hub import get_dir @@ -17,8 +16,7 @@ except ImportError: from timm import __version__ try: - from huggingface_hub import (HfApi, HfFolder, Repository, cached_download, - hf_hub_url) + from huggingface_hub import HfApi, HfFolder, Repository, cached_download, hf_hub_url cached_download = partial(cached_download, library_name="timm", library_version=__version__) except ImportError: hf_hub_url = None @@ -110,107 +108,57 @@ def save_pretrained_for_hf(model, save_directory, **config_kwargs): config_path = save_directory / 'config.json' config = model.default_cfg + config['num_classes'] = config_kwargs.pop('num_classes', model.num_classes) + config['num_features'] = config_kwargs.pop('num_features', model.num_features) + config['labels'] = config_kwargs.pop('labels', [f"LABEL_{i}" for i in range(config['num_classes'])]) config.update(config_kwargs) with config_path.open('w') as f: - json.dump(config, f, indent=4) + json.dump(config, f, indent=2) def push_to_hf_hub( model, - repo_path_or_name: Optional[str] = None, - repo_url: Optional[str] = None, - commit_message: Optional[str] = "Add model", - organization: Optional[str] = None, - private: Optional[bool] = None, - api_endpoint: Optional[str] = None, - use_auth_token: Optional[Union[bool, str]] = None, - git_user: Optional[str] = None, - git_email: Optional[str] = None, - config: Optional[dict] = None, + local_dir, + repo_namespace_or_url=None, + commit_message='Add model', + use_auth_token=True, + git_email=None, + git_user=None, + revision=None, + **config_kwargs ): - """ - Upload model checkpoint and config to the 🤗 Model Hub while synchronizing a local clone of the repo in - :obj:`repo_path_or_name`. - - Parameters: - repo_path_or_name (:obj:`str`, `optional`): - Can either be a repository name for your model or tokenizer in the Hub or a path to a local folder (in - which case the repository will have the name of that local folder). If not specified, will default to - the name given by :obj:`repo_url` and a local directory with that name will be created. - repo_url (:obj:`str`, `optional`): - Specify this in case you want to push to an existing repository in the hub. If unspecified, a new - repository will be created in your namespace (unless you specify an :obj:`organization`) with - :obj:`repo_name`. - commit_message (:obj:`str`, `optional`): - Message to commit while pushing. Will default to :obj:`"add config"`, :obj:`"add tokenizer"` or - :obj:`"add model"` depending on the type of the class. - organization (:obj:`str`, `optional`): - Organization in which you want to push your model or tokenizer (you must be a member of this - organization). - private (:obj:`bool`, `optional`): - Whether or not the repository created should be private (requires a paying subscription). - api_endpoint (:obj:`str`, `optional`): - The API endpoint to use when pushing the model to the hub. - use_auth_token (:obj:`bool` or :obj:`str`, `optional`): - The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token - generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). Will default to - :obj:`True` if :obj:`repo_url` is not specified. - git_user (``str``, `optional`): - will override the ``git config user.name`` for committing and pushing files to the hub. - git_email (``str``, `optional`): - will override the ``git config user.email`` for committing and pushing files to the hub. - config (:obj:`dict`, `optional`): - Configuration object to be saved alongside the model weights. - - - Returns: - The url of the commit of your model in the given repository. - """ - assert has_hf_hub(True) - if repo_path_or_name is None and repo_url is None: - raise ValueError( - "You need to specify a `repo_path_or_name` or a `repo_url`." - ) - if use_auth_token is None and repo_url is None: - token = HfFolder.get_token() + if repo_namespace_or_url: + repo_owner, repo_name = repo_namespace_or_url.rstrip('/').split('/')[-2:] + else: + if isinstance(use_auth_token, str): + token = use_auth_token + else: + token = HfFolder.get_token() + if token is None: raise ValueError( "You must login to the Hugging Face hub on this computer by typing `transformers-cli login` and " "entering your credentials to use `use_auth_token=True`. Alternatively, you can pass your own " "token as the `use_auth_token` argument." ) - elif isinstance(use_auth_token, str): - token = use_auth_token - else: - token = None - - if repo_path_or_name is None: - repo_path_or_name = repo_url.split("/")[-1] - - # If no URL is passed and there's no path to a directory containing files, create a repo - if repo_url is None and not os.path.exists(repo_path_or_name): - repo_name = Path(repo_path_or_name).name - repo_url = HfApi(endpoint=api_endpoint).create_repo( - token, - repo_name, - organization=organization, - private=private, - repo_type=None, - exist_ok=True, - ) + + repo_owner = HfApi().whoami(token)['name'] + repo_name = Path(local_dir).name + + repo_url = f'https://huggingface.co/{repo_owner}/{repo_name}' repo = Repository( - repo_path_or_name, + local_dir, clone_from=repo_url, use_auth_token=use_auth_token, git_user=git_user, git_email=git_email, + revision=revision, ) - repo.git_pull(rebase=True) - save_config = model.default_cfg - save_config.update(config or {}) with repo.commit(commit_message): - save_pretrained_for_hf(model, repo.local_dir, **save_config) + save_pretrained_for_hf(model, repo.local_dir, **config_kwargs) + + return repo.git_remote_url() From 2b6ade24b3479a641f43aec7bded20bfbb19603e Mon Sep 17 00:00:00 2001 From: nateraw Date: Mon, 13 Sep 2021 23:31:28 -0400 Subject: [PATCH 04/19] :art: write model card to enable inference --- timm/models/hub.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/timm/models/hub.py b/timm/models/hub.py index 4cdca7f0..184e9b85 100644 --- a/timm/models/hub.py +++ b/timm/models/hub.py @@ -159,6 +159,13 @@ def push_to_hf_hub( ) with repo.commit(commit_message): + # Save model weights and config save_pretrained_for_hf(model, repo.local_dir, **config_kwargs) + # Save a model card if it doesn't exist, enabling inference. + readme_path = Path(repo.local_dir) / 'README.md' + readme_txt = f'---\ntags:\n- image-classification\n- timm\nlibrary_tag: timm\n---\n# Model card for {repo_name}' + if not readme_path.exists(): + readme_path.write_text(readme_txt) + return repo.git_remote_url() From e65a2cba3d6d7bea6b95c4ed482872f4e2355e33 Mon Sep 17 00:00:00 2001 From: nateraw Date: Tue, 14 Sep 2021 01:07:04 -0400 Subject: [PATCH 05/19] :art: cleanup and add a couple comments --- timm/models/hub.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/timm/models/hub.py b/timm/models/hub.py index 184e9b85..31593f88 100644 --- a/timm/models/hub.py +++ b/timm/models/hub.py @@ -7,14 +7,12 @@ from typing import Union import torch from torch.hub import HASH_REGEX, download_url_to_file, urlparse, load_state_dict_from_url - try: from torch.hub import get_dir except ImportError: from torch.hub import _get_torch_home as get_dir from timm import __version__ - try: from huggingface_hub import HfApi, HfFolder, Repository, cached_download, hf_hub_url cached_download = partial(cached_download, library_name="timm", library_version=__version__) @@ -158,14 +156,15 @@ def push_to_hf_hub( revision=revision, ) + # Prepare a default model card that includes the necessary tags to enable inference. + readme_text = f'---\ntags:\n- image-classification\n- timm\nlibrary_tag: timm\n---\n# Model card for {repo_name}' with repo.commit(commit_message): - # Save model weights and config + # Save model weights and config. save_pretrained_for_hf(model, repo.local_dir, **config_kwargs) - # Save a model card if it doesn't exist, enabling inference. + # Save a model card if it doesn't exist. readme_path = Path(repo.local_dir) / 'README.md' - readme_txt = f'---\ntags:\n- image-classification\n- timm\nlibrary_tag: timm\n---\n# Model card for {repo_name}' if not readme_path.exists(): - readme_path.write_text(readme_txt) + readme_path.write_text(readme_text) return repo.git_remote_url() From adcb74f87f09cb9db75b62a8690b53ae1552dda5 Mon Sep 17 00:00:00 2001 From: nateraw Date: Tue, 14 Sep 2021 01:11:40 -0400 Subject: [PATCH 06/19] :art: Import load_state_dict_from_url directly --- timm/models/helpers.py | 4 ++-- timm/models/hub.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/timm/models/helpers.py b/timm/models/helpers.py index 662a7a48..281f2412 100644 --- a/timm/models/helpers.py +++ b/timm/models/helpers.py @@ -11,10 +11,10 @@ from typing import Any, Callable, Optional, Tuple import torch import torch.nn as nn - +from torch.hub import load_state_dict_from_url from .features import FeatureListNet, FeatureDictNet, FeatureHookNet -from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf, load_state_dict_from_url +from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf from .layers import Conv2dSame, Linear diff --git a/timm/models/hub.py b/timm/models/hub.py index 31593f88..a436aff6 100644 --- a/timm/models/hub.py +++ b/timm/models/hub.py @@ -6,7 +6,7 @@ from pathlib import Path from typing import Union import torch -from torch.hub import HASH_REGEX, download_url_to_file, urlparse, load_state_dict_from_url +from torch.hub import HASH_REGEX, download_url_to_file, urlparse try: from torch.hub import get_dir except ImportError: From f0507f6da6c3eb93960e1d712e8a40b81fd8d8fb Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Mon, 22 Nov 2021 09:37:36 -0800 Subject: [PATCH 07/19] Fix k_decay default arg != 1.0 in poly scheduler --- timm/scheduler/poly_lr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/timm/scheduler/poly_lr.py b/timm/scheduler/poly_lr.py index 0c1e63b7..9c351be6 100644 --- a/timm/scheduler/poly_lr.py +++ b/timm/scheduler/poly_lr.py @@ -37,7 +37,7 @@ class PolyLRScheduler(Scheduler): noise_pct=0.67, noise_std=1.0, noise_seed=42, - k_decay=.5, + k_decay=1.0, initialize=True) -> None: super().__init__( optimizer, param_group_field="lr", From 1e51c2d02e77373e4e4248f5d826f519379ebdff Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Mon, 22 Nov 2021 09:46:43 -0800 Subject: [PATCH 08/19] More FX test tweaks --- tests/test_models.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/tests/test_models.py b/tests/test_models.py index 68939a14..c3642eb9 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -348,6 +348,7 @@ if 'GITHUB_ACTIONS' in os.environ: 'vgg*', 'vit_large*', 'xcit_large*', + 'mixer_l*', ] @@ -368,15 +369,16 @@ def test_model_forward_fx(model_name, batch_size): input_size = _get_input_size(model=model, target=TARGET_FWD_FX_SIZE) if max(input_size) > MAX_FWD_FX_SIZE: pytest.skip("Fixed input size model > limit.") - inputs = torch.randn((batch_size, *input_size)) - outputs = model(inputs) - if isinstance(outputs, tuple): - outputs = torch.cat(outputs) + with torch.no_grad(): + inputs = torch.randn((batch_size, *input_size)) + outputs = model(inputs) + if isinstance(outputs, tuple): + outputs = torch.cat(outputs) - model = _create_fx_model(model) - fx_outputs = tuple(model(inputs).values()) - if isinstance(fx_outputs, tuple): - fx_outputs = torch.cat(fx_outputs) + model = _create_fx_model(model) + fx_outputs = tuple(model(inputs).values()) + if isinstance(fx_outputs, tuple): + fx_outputs = torch.cat(fx_outputs) assert torch.all(fx_outputs == outputs) assert outputs.shape[0] == batch_size @@ -440,9 +442,10 @@ def test_model_forward_fx_torchscript(model_name, batch_size): model.eval() model = torch.jit.script(_create_fx_model(model)) - outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) - if isinstance(outputs, tuple): - outputs = torch.cat(outputs) + with torch.no_grad(): + outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) + if isinstance(outputs, tuple): + outputs = torch.cat(outputs) assert outputs.shape[0] == batch_size assert not torch.isnan(outputs).any(), 'Output included NaNs' From ce76a810c2f6f8719c2a3bd1325b24da78928848 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Mon, 22 Nov 2021 11:48:40 -0800 Subject: [PATCH 09/19] New FX test strategy, filter based on param count --- tests/test_models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_models.py b/tests/test_models.py index c3642eb9..9fb826c5 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -399,8 +399,10 @@ def test_model_backward_fx(model_name, batch_size): pytest.skip("Fixed input size model > limit.") model = create_model(model_name, pretrained=False, num_classes=42) - num_params = sum([x.numel() for x in model.parameters()]) model.train() + num_params = sum([x.numel() for x in model.parameters()]) + if 'GITHUB_ACTIONS' in os.environ and num_params > 100e6: + pytest.skip("Skipping FX backward test on model with more than 100M params.") model = _create_fx_model(model, train=True) outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) From 79bf4f163f956252a84b6098fa06a81f0f11deff Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Mon, 22 Nov 2021 11:49:05 -0800 Subject: [PATCH 10/19] Add mention of ResNet50 weights w/ RSB recipe ingredients and lower train res for better @ 224 results (but worse res scaling beyond). Not changing default. --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e398ac1b..e300cc5b 100644 --- a/README.md +++ b/README.md @@ -23,10 +23,11 @@ I'm fortunate to be able to dedicate significant time and money of my own suppor ## What's New -### Nov 19, 2021 +### Nov 22, 2021 * A number of updated weights anew new model defs * `eca_halonext26ts` - 79.5 @ 256 * `resnet50_gn` (new) - 80.1 @ 224, 81.3 @ 288 + * `resnet50` - 80.68 @ 224, 80.9 @ 288 (trained at 176, not replacing current a1 weights as default since these don't scale as well to higher res, [weights](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth)) * `resnext50_32x4d` - 81.1 @ 224, 82.0 @ 288 * `sebotnet33ts_256` (new) - 81.2 @ 224 * `lamhalobotnet50ts_256` - 81.5 @ 256 From 9bb4c80d2a61812f4ca2a0d665f6147978feed39 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Mon, 22 Nov 2021 12:08:46 -0800 Subject: [PATCH 11/19] Update README.md, missed recent regnetz results --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e300cc5b..619cffb4 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ I'm fortunate to be able to dedicate significant time and money of my own suppor * A number of updated weights anew new model defs * `eca_halonext26ts` - 79.5 @ 256 * `resnet50_gn` (new) - 80.1 @ 224, 81.3 @ 288 - * `resnet50` - 80.68 @ 224, 80.9 @ 288 (trained at 176, not replacing current a1 weights as default since these don't scale as well to higher res, [weights](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth)) + * `resnet50` - 80.7 @ 224, 80.9 @ 288 (trained at 176, not replacing current a1 weights as default since these don't scale as well to higher res, [weights](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth)) * `resnext50_32x4d` - 81.1 @ 224, 82.0 @ 288 * `sebotnet33ts_256` (new) - 81.2 @ 224 * `lamhalobotnet50ts_256` - 81.5 @ 256 @@ -36,6 +36,8 @@ I'm fortunate to be able to dedicate significant time and money of my own suppor * `resnet101` - 82.0 @ 224, 82.8 @ 288 * `resnetv2_101` (new) - 82.1 @ 224, 83.0 @ 288 * `resnet152` - 82.8 @ 224, 83.5 @ 288 + * `regnetz_d8` (new) - 83.5 @ 256, 84.0 @ 320 + * `regnetz_e8` (new) - 84.5 @ 256, 85.0 @ 320 * `vit_base_patch8_224` (85.8 top-1) & `in21k` variant weights added thanks [Martins Bruveris](https://github.com/martinsbruveris) * Groundwork in for FX feature extraction thanks to [Alexander Soare](https://github.com/alexander-soare) * models updated for tracing compatibility (almost full support with some distlled transformer exceptions) From 878bee1d5e3bbc607424e2ef3284bbf7fd219c97 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Mon, 22 Nov 2021 14:00:27 -0800 Subject: [PATCH 12/19] Add patch8 vit model to FX exclusion filter --- tests/test_models.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/test_models.py b/tests/test_models.py index 9fb826c5..c2b151c3 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -339,16 +339,17 @@ EXCLUDE_FX_FILTERS = [] if 'GITHUB_ACTIONS' in os.environ: EXCLUDE_FX_FILTERS += [ 'beit_large*', - 'swin_large*', + 'mixer_l*', + '*nfnet_f2*', '*resnext101_32x32d', 'resnetv2_152x2*', - '*nfnet_f2*', 'resmlp_big*', 'resnetrs270', + 'swin_large*', 'vgg*', 'vit_large*', + 'vit_base_patch8*', 'xcit_large*', - 'mixer_l*', ] From b18c9e323b472dbc01e2334d7c1e98e4d0ef4871 Mon Sep 17 00:00:00 2001 From: Nathan Raw Date: Mon, 22 Nov 2021 23:43:44 -0500 Subject: [PATCH 13/19] Update helpers.py --- timm/models/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/timm/models/helpers.py b/timm/models/helpers.py index 4e339e66..fd128252 100644 --- a/timm/models/helpers.py +++ b/timm/models/helpers.py @@ -15,7 +15,7 @@ from torch.hub import load_state_dict_from_url from .features import FeatureListNet, FeatureDictNet, FeatureHookNet from .fx_features import FeatureGraphNet -from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf, load_state_dict_from_url +from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf from .layers import Conv2dSame, Linear From 147e1059a832aaf1bfbe7d530152cba13adf0514 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Tue, 23 Nov 2021 14:32:32 -0800 Subject: [PATCH 14/19] Remove FX backward test from GitHub actions runs for now. --- tests/test_models.py | 60 +++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/tests/test_models.py b/tests/test_models.py index c2b151c3..18162431 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -386,37 +386,41 @@ def test_model_forward_fx(model_name, batch_size): assert not torch.isnan(outputs).any(), 'Output included NaNs' -@pytest.mark.timeout(120) -@pytest.mark.parametrize('model_name', list_models( - exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS, name_matches_cfg=True)) -@pytest.mark.parametrize('batch_size', [2]) -def test_model_backward_fx(model_name, batch_size): - """Symbolically trace each model and run single backward pass through the resulting GraphModule""" - if not has_fx_feature_extraction: - pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.") - - input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_FX_SIZE) - if max(input_size) > MAX_BWD_FX_SIZE: - pytest.skip("Fixed input size model > limit.") +if 'GITHUB_ACTIONS' not in os.environ: + # FIXME this test is causing GitHub actions to run out of RAM and abruptly kill the test process - model = create_model(model_name, pretrained=False, num_classes=42) - model.train() - num_params = sum([x.numel() for x in model.parameters()]) - if 'GITHUB_ACTIONS' in os.environ and num_params > 100e6: - pytest.skip("Skipping FX backward test on model with more than 100M params.") + @pytest.mark.timeout(120) + @pytest.mark.parametrize('model_name', list_models( + exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS, name_matches_cfg=True)) + @pytest.mark.parametrize('batch_size', [2]) + def test_model_backward_fx(model_name, batch_size): + """Symbolically trace each model and run single backward pass through the resulting GraphModule""" + if not has_fx_feature_extraction: + pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.") + + input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_FX_SIZE) + if max(input_size) > MAX_BWD_FX_SIZE: + pytest.skip("Fixed input size model > limit.") + + model = create_model(model_name, pretrained=False, num_classes=42) + model.train() + num_params = sum([x.numel() for x in model.parameters()]) + if 'GITHUB_ACTIONS' in os.environ and num_params > 100e6: + pytest.skip("Skipping FX backward test on model with more than 100M params.") + + model = _create_fx_model(model, train=True) + outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) + if isinstance(outputs, tuple): + outputs = torch.cat(outputs) + outputs.mean().backward() + for n, x in model.named_parameters(): + assert x.grad is not None, f'No gradient for {n}' + num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None]) - model = _create_fx_model(model, train=True) - outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) - if isinstance(outputs, tuple): - outputs = torch.cat(outputs) - outputs.mean().backward() - for n, x in model.named_parameters(): - assert x.grad is not None, f'No gradient for {n}' - num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None]) + assert outputs.shape[-1] == 42 + assert num_params == num_grad, 'Some parameters are missing gradients' + assert not torch.isnan(outputs).any(), 'Output included NaNs' - assert outputs.shape[-1] == 42 - assert num_params == num_grad, 'Some parameters are missing gradients' - assert not torch.isnan(outputs).any(), 'Output included NaNs' # reason: model is scripted after fx tracing, but beit has torch.jit.is_scripting() control flow EXCLUDE_FX_JIT_FILTERS = [ From d633a014e6362ae354f0aa0227c61033233dc260 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Tue, 23 Nov 2021 16:54:01 -0800 Subject: [PATCH 15/19] Post merge cleanup. Fix potential security issue passing kwargs directly through to serialized web data. --- timm/models/helpers.py | 8 ++++---- timm/models/hub.py | 27 ++++++++++++++------------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/timm/models/helpers.py b/timm/models/helpers.py index fd128252..16ce64d0 100644 --- a/timm/models/helpers.py +++ b/timm/models/helpers.py @@ -184,12 +184,12 @@ def load_pretrained(model, default_cfg=None, num_classes=1000, in_chans=3, filte if not pretrained_url and not hf_hub_id: _logger.warning("No pretrained weights exist for this model. Using random initialization.") return - if hf_hub_id and has_hf_hub(necessary=not pretrained_url): - _logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})') - state_dict = load_state_dict_from_hf(hf_hub_id) - else: + if pretrained_url: _logger.info(f'Loading pretrained weights from url ({pretrained_url})') state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu') + elif hf_hub_id and has_hf_hub(necessary=True): + _logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})') + state_dict = load_state_dict_from_hf(hf_hub_id) if filter_fn is not None: # for backwards compat with filter fn that take one arg, try one first, the two try: diff --git a/timm/models/hub.py b/timm/models/hub.py index a436aff6..65e7ba9a 100644 --- a/timm/models/hub.py +++ b/timm/models/hub.py @@ -16,9 +16,10 @@ from timm import __version__ try: from huggingface_hub import HfApi, HfFolder, Repository, cached_download, hf_hub_url cached_download = partial(cached_download, library_name="timm", library_version=__version__) + _has_hf_hub = True except ImportError: - hf_hub_url = None cached_download = None + _has_hf_hub = False _logger = logging.getLogger(__name__) @@ -53,11 +54,11 @@ def download_cached_file(url, check_hash=True, progress=False): def has_hf_hub(necessary=False): - if hf_hub_url is None and necessary: + if not _has_hf_hub and necessary: # if no HF Hub module installed and it is necessary to continue, raise error raise RuntimeError( 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') - return hf_hub_url is not None + return _has_hf_hub def hf_split(hf_id): @@ -96,8 +97,9 @@ def load_state_dict_from_hf(model_id: str): return state_dict -def save_pretrained_for_hf(model, save_directory, **config_kwargs): +def save_for_hf(model, save_directory, model_config=None): assert has_hf_hub(True) + model_config = model_config or {} save_directory = Path(save_directory) save_directory.mkdir(exist_ok=True, parents=True) @@ -105,14 +107,14 @@ def save_pretrained_for_hf(model, save_directory, **config_kwargs): torch.save(model.state_dict(), weights_path) config_path = save_directory / 'config.json' - config = model.default_cfg - config['num_classes'] = config_kwargs.pop('num_classes', model.num_classes) - config['num_features'] = config_kwargs.pop('num_features', model.num_features) - config['labels'] = config_kwargs.pop('labels', [f"LABEL_{i}" for i in range(config['num_classes'])]) - config.update(config_kwargs) + hf_config = model.default_cfg + hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes) + hf_config['num_features'] = model_config.pop('num_features', model.num_features) + hf_config['labels'] = model_config.pop('labels', [f"LABEL_{i}" for i in range(hf_config['num_classes'])]) + hf_config.update(model_config) with config_path.open('w') as f: - json.dump(config, f, indent=2) + json.dump(hf_config, f, indent=2) def push_to_hf_hub( @@ -124,9 +126,8 @@ def push_to_hf_hub( git_email=None, git_user=None, revision=None, - **config_kwargs + model_config=None, ): - if repo_namespace_or_url: repo_owner, repo_name = repo_namespace_or_url.rstrip('/').split('/')[-2:] else: @@ -160,7 +161,7 @@ def push_to_hf_hub( readme_text = f'---\ntags:\n- image-classification\n- timm\nlibrary_tag: timm\n---\n# Model card for {repo_name}' with repo.commit(commit_message): # Save model weights and config. - save_pretrained_for_hf(model, repo.local_dir, **config_kwargs) + save_for_hf(model, repo.local_dir, model_config=model_config) # Save a model card if it doesn't exist. readme_path = Path(repo.local_dir) / 'README.md' From f83b0b01e3a28f0408431e8e88acda641d22f299 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Tue, 23 Nov 2021 22:24:58 -0800 Subject: [PATCH 16/19] Would like to pass GitHub tests again disabling both FX feature extract backward and torchscript tests --- tests/test_models.py | 56 ++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/tests/test_models.py b/tests/test_models.py index 18162431..4f80612f 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -422,37 +422,37 @@ if 'GITHUB_ACTIONS' not in os.environ: assert not torch.isnan(outputs).any(), 'Output included NaNs' -# reason: model is scripted after fx tracing, but beit has torch.jit.is_scripting() control flow -EXCLUDE_FX_JIT_FILTERS = [ - 'deit_*_distilled_patch16_224', - 'levit*', - 'pit_*_distilled_224', -] + EXCLUDE_FX_FILTERS + # reason: model is scripted after fx tracing, but beit has torch.jit.is_scripting() control flow + EXCLUDE_FX_JIT_FILTERS = [ + 'deit_*_distilled_patch16_224', + 'levit*', + 'pit_*_distilled_224', + ] + EXCLUDE_FX_FILTERS -@pytest.mark.timeout(120) -@pytest.mark.parametrize( - 'model_name', list_models( - exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS + EXCLUDE_FX_JIT_FILTERS, name_matches_cfg=True)) -@pytest.mark.parametrize('batch_size', [1]) -def test_model_forward_fx_torchscript(model_name, batch_size): - """Symbolically trace each model, script it, and run single forward pass""" - if not has_fx_feature_extraction: - pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.") + @pytest.mark.timeout(120) + @pytest.mark.parametrize( + 'model_name', list_models( + exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS + EXCLUDE_FX_JIT_FILTERS, name_matches_cfg=True)) + @pytest.mark.parametrize('batch_size', [1]) + def test_model_forward_fx_torchscript(model_name, batch_size): + """Symbolically trace each model, script it, and run single forward pass""" + if not has_fx_feature_extraction: + pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.") - input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE) - if max(input_size) > MAX_JIT_SIZE: - pytest.skip("Fixed input size model > limit.") + input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE) + if max(input_size) > MAX_JIT_SIZE: + pytest.skip("Fixed input size model > limit.") - with set_scriptable(True): - model = create_model(model_name, pretrained=False) - model.eval() + with set_scriptable(True): + model = create_model(model_name, pretrained=False) + model.eval() - model = torch.jit.script(_create_fx_model(model)) - with torch.no_grad(): - outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) - if isinstance(outputs, tuple): - outputs = torch.cat(outputs) + model = torch.jit.script(_create_fx_model(model)) + with torch.no_grad(): + outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) + if isinstance(outputs, tuple): + outputs = torch.cat(outputs) - assert outputs.shape[0] == batch_size - assert not torch.isnan(outputs).any(), 'Output included NaNs' + assert outputs.shape[0] == batch_size + assert not torch.isnan(outputs).any(), 'Output included NaNs' From 85c5ff26d741b2de29d990e0637b06014ec8ad15 Mon Sep 17 00:00:00 2001 From: Martins Bruveris Date: Wed, 24 Nov 2021 15:02:46 +0200 Subject: [PATCH 17/19] Added DINO pretrained ResMLP models. --- timm/models/mlp_mixer.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/timm/models/mlp_mixer.py b/timm/models/mlp_mixer.py index f128b9c9..727b655b 100644 --- a/timm/models/mlp_mixer.py +++ b/timm/models/mlp_mixer.py @@ -128,6 +128,13 @@ default_cfgs = dict( url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_12_224_dino=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224_dino=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + gmlp_ti16_224=_cfg(), gmlp_s16_224=_cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', @@ -589,6 +596,33 @@ def resmlp_big_24_224_in22ft1k(pretrained=False, **kwargs): return model +@register_model +def resmlp_12_224_dino(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + + Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224_dino', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224_dino(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + + Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224_dino', pretrained=pretrained, **model_args) + return model + + @register_model def gmlp_ti16_224(pretrained=False, **kwargs): """ gMLP-Tiny From 480c676ffad49092e1a76a58554b20bf7513e75e Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Wed, 24 Nov 2021 09:24:47 -0800 Subject: [PATCH 18/19] Fix FX breaking assert in evonorm --- timm/models/layers/evo_norm.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/timm/models/layers/evo_norm.py b/timm/models/layers/evo_norm.py index 8c08e49f..6ef0c881 100644 --- a/timm/models/layers/evo_norm.py +++ b/timm/models/layers/evo_norm.py @@ -34,18 +34,17 @@ class EvoNormBatch2d(nn.Module): nn.init.ones_(self.v) def forward(self, x): - assert x.dim() == 4, 'expected 4D input' + _assert(x.dim() == 4, 'expected 4D input') x_type = x.dtype - running_var = self.running_var.view(1, -1, 1, 1) - if self.training: - var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True) - n = x.numel() / x.shape[1] - running_var = var.detach() * self.momentum * (n / (n - 1)) + running_var * (1 - self.momentum) - self.running_var.copy_(running_var.view(self.running_var.shape)) - else: - var = running_var - if self.v is not None: + running_var = self.running_var.view(1, -1, 1, 1) + if self.training: + var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True) + n = x.numel() / x.shape[1] + running_var = var.detach() * self.momentum * (n / (n - 1)) + running_var * (1 - self.momentum) + self.running_var.copy_(running_var.view(self.running_var.shape)) + else: + var = running_var v = self.v.to(dtype=x_type).reshape(1, -1, 1, 1) d = x * v + (x.var(dim=(2, 3), unbiased=False, keepdim=True) + self.eps).sqrt().to(dtype=x_type) d = d.max((var + self.eps).sqrt().to(dtype=x_type)) From f7d210d759beb00a3d0834a3ce2d93f6e17f3d38 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Wed, 24 Nov 2021 13:21:03 -0800 Subject: [PATCH 19/19] Remove evonorm models from FX tests --- tests/test_models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_models.py b/tests/test_models.py index 4f80612f..80b1101f 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -350,6 +350,7 @@ if 'GITHUB_ACTIONS' in os.environ: 'vit_large*', 'vit_base_patch8*', 'xcit_large*', + '*evob', '*evos', # remove experimental evonorm models, seem to cause issues with dtype manipulation ]