You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
pytorch-image-models/models/big-transfer/index.html

1794 lines
44 KiB

<!doctype html>
<html lang="en" class="no-js">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<meta name="description" content="Pretained Image Recognition Models">
<link rel="prev" href="../advprop/">
<link rel="next" href="../csp-darknet/">
<link rel="icon" href="../../assets/images/favicon.png">
<meta name="generator" content="mkdocs-1.4.2, mkdocs-material-9.0.2">
<title>Big Transfer (BiT) - Pytorch Image Models</title>
<link rel="stylesheet" href="../../assets/stylesheets/main.f56500e0.min.css">
<link rel="stylesheet" href="../../assets/stylesheets/palette.2505c338.min.css">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback">
<style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
<script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce((e,_)=>(e<<5)-e+_.charCodeAt(0),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
</head>
<body dir="ltr" data-md-color-scheme="default" data-md-color-primary="" data-md-color-accent="">
<input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="__drawer" autocomplete="off">
<input class="md-toggle" data-md-toggle="search" type="checkbox" id="__search" autocomplete="off">
<label class="md-overlay" for="__drawer"></label>
<div data-md-component="skip">
<a href="#big-transfer-bit" class="md-skip">
Skip to content
</a>
</div>
<div data-md-component="announce">
</div>
<header class="md-header" data-md-component="header">
<nav class="md-header__inner md-grid" aria-label="Header">
<a href="../.." title="Pytorch Image Models" class="md-header__button md-logo" aria-label="Pytorch Image Models" data-md-component="logo">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M12 8a3 3 0 0 0 3-3 3 3 0 0 0-3-3 3 3 0 0 0-3 3 3 3 0 0 0 3 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54Z"/></svg>
</a>
<label class="md-header__button md-icon" for="__drawer">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2Z"/></svg>
</label>
<div class="md-header__title" data-md-component="header-title">
<div class="md-header__ellipsis">
<div class="md-header__topic">
<span class="md-ellipsis">
Pytorch Image Models
</span>
</div>
<div class="md-header__topic" data-md-component="header-topic">
<span class="md-ellipsis">
Big Transfer (BiT)
</span>
</div>
</div>
</div>
<label class="md-header__button md-icon" for="__search">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg>
</label>
<div class="md-search" data-md-component="search" role="dialog">
<label class="md-search__overlay" for="__search"></label>
<div class="md-search__inner" role="search">
<form class="md-search__form" name="search">
<input type="text" class="md-search__input" name="query" aria-label="Search" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="search-query" required>
<label class="md-search__icon md-icon" for="__search">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg>
</label>
<nav class="md-search__options" aria-label="Search">
<button type="reset" class="md-search__icon md-icon" title="Clear" aria-label="Clear" tabindex="-1">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41Z"/></svg>
</button>
</nav>
</form>
<div class="md-search__output">
<div class="md-search__scrollwrap" data-md-scrollfix>
<div class="md-search-result" data-md-component="search-result">
<div class="md-search-result__meta">
Initializing search
</div>
<ol class="md-search-result__list"></ol>
</div>
</div>
</div>
</div>
</div>
<div class="md-header__source">
<a href="https://github.com/rwightman/pytorch-image-models" title="Go to repository" class="md-source" data-md-component="source">
<div class="md-source__icon md-icon">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc.--><path d="M439.55 236.05 244 40.45a28.87 28.87 0 0 0-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 0 1-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 0 0 0 40.81l195.61 195.6a28.86 28.86 0 0 0 40.8 0l194.69-194.69a28.86 28.86 0 0 0 0-40.81z"/></svg>
</div>
<div class="md-source__repository">
rwightman/pytorch-image-models
</div>
</a>
</div>
</nav>
</header>
<div class="md-container" data-md-component="container">
<main class="md-main" data-md-component="main">
<div class="md-main__inner md-grid">
<div class="md-sidebar md-sidebar--primary" data-md-component="sidebar" data-md-type="navigation" >
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--primary" aria-label="Navigation" data-md-level="0">
<label class="md-nav__title" for="__drawer">
<a href="../.." title="Pytorch Image Models" class="md-nav__button md-logo" aria-label="Pytorch Image Models" data-md-component="logo">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M12 8a3 3 0 0 0 3-3 3 3 0 0 0-3-3 3 3 0 0 0-3 3 3 3 0 0 0 3 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54Z"/></svg>
</a>
Pytorch Image Models
</label>
<div class="md-nav__source">
<a href="https://github.com/rwightman/pytorch-image-models" title="Go to repository" class="md-source" data-md-component="source">
<div class="md-source__icon md-icon">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc.--><path d="M439.55 236.05 244 40.45a28.87 28.87 0 0 0-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 0 1-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 0 0 0 40.81l195.61 195.6a28.86 28.86 0 0 0 40.8 0l194.69-194.69a28.86 28.86 0 0 0 0-40.81z"/></svg>
</div>
<div class="md-source__repository">
rwightman/pytorch-image-models
</div>
</a>
</div>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../.." class="md-nav__link">
Getting Started
</a>
</li>
<li class="md-nav__item">
<a href="../" class="md-nav__link">
Model Summaries
</a>
</li>
<li class="md-nav__item md-nav__item--active md-nav__item--nested">
<input class="md-nav__toggle md-toggle" data-md-toggle="__nav_3" type="checkbox" id="__nav_3" checked>
<label class="md-nav__link" for="__nav_3">
Model Pages
<span class="md-nav__icon md-icon"></span>
</label>
<nav class="md-nav" aria-label="Model Pages" data-md-level="1">
<label class="md-nav__title" for="__nav_3">
<span class="md-nav__icon md-icon"></span>
Model Pages
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../adversarial-inception-v3/" class="md-nav__link">
Adversarial Inception v3
</a>
</li>
<li class="md-nav__item">
<a href="../advprop/" class="md-nav__link">
AdvProp (EfficientNet)
</a>
</li>
<li class="md-nav__item md-nav__item--active">
<input class="md-nav__toggle md-toggle" data-md-toggle="toc" type="checkbox" id="__toc">
<label class="md-nav__link md-nav__link--active" for="__toc">
Big Transfer (BiT)
<span class="md-nav__icon md-icon"></span>
</label>
<a href="./" class="md-nav__link md-nav__link--active">
Big Transfer (BiT)
</a>
<nav class="md-nav md-nav--secondary" aria-label="Table of contents">
<label class="md-nav__title" for="__toc">
<span class="md-nav__icon md-icon"></span>
Table of contents
</label>
<ul class="md-nav__list" data-md-component="toc" data-md-scrollfix>
<li class="md-nav__item">
<a href="#how-do-i-use-this-model-on-an-image" class="md-nav__link">
How do I use this model on an image?
</a>
</li>
<li class="md-nav__item">
<a href="#how-do-i-finetune-this-model" class="md-nav__link">
How do I finetune this model?
</a>
</li>
<li class="md-nav__item">
<a href="#how-do-i-train-this-model" class="md-nav__link">
How do I train this model?
</a>
</li>
<li class="md-nav__item">
<a href="#citation" class="md-nav__link">
Citation
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="../csp-darknet/" class="md-nav__link">
CSP-DarkNet
</a>
</li>
<li class="md-nav__item">
<a href="../csp-resnet/" class="md-nav__link">
CSP-ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../csp-resnext/" class="md-nav__link">
CSP-ResNeXt
</a>
</li>
<li class="md-nav__item">
<a href="../densenet/" class="md-nav__link">
DenseNet
</a>
</li>
<li class="md-nav__item">
<a href="../dla/" class="md-nav__link">
Deep Layer Aggregation
</a>
</li>
<li class="md-nav__item">
<a href="../dpn/" class="md-nav__link">
Dual Path Network (DPN)
</a>
</li>
<li class="md-nav__item">
<a href="../ecaresnet/" class="md-nav__link">
ECA-ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../efficientnet-pruned/" class="md-nav__link">
EfficientNet (Knapsack Pruned)
</a>
</li>
<li class="md-nav__item">
<a href="../efficientnet/" class="md-nav__link">
EfficientNet
</a>
</li>
<li class="md-nav__item">
<a href="../ensemble-adversarial/" class="md-nav__link">
Ensemble Adversarial Inception ResNet v2
</a>
</li>
<li class="md-nav__item">
<a href="../ese-vovnet/" class="md-nav__link">
ESE-VoVNet
</a>
</li>
<li class="md-nav__item">
<a href="../fbnet/" class="md-nav__link">
FBNet
</a>
</li>
<li class="md-nav__item">
<a href="../gloun-inception-v3/" class="md-nav__link">
(Gluon) Inception v3
</a>
</li>
<li class="md-nav__item">
<a href="../gloun-resnet/" class="md-nav__link">
(Gluon) ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../gloun-resnext/" class="md-nav__link">
(Gluon) ResNeXt
</a>
</li>
<li class="md-nav__item">
<a href="../gloun-senet/" class="md-nav__link">
(Gluon) SENet
</a>
</li>
<li class="md-nav__item">
<a href="../gloun-seresnext/" class="md-nav__link">
(Gluon) SE-ResNeXt
</a>
</li>
<li class="md-nav__item">
<a href="../gloun-xception/" class="md-nav__link">
(Gluon) Xception
</a>
</li>
<li class="md-nav__item">
<a href="../hrnet/" class="md-nav__link">
HRNet
</a>
</li>
<li class="md-nav__item">
<a href="../ig-resnext/" class="md-nav__link">
Instagram ResNeXt WSL
</a>
</li>
<li class="md-nav__item">
<a href="../inception-resnet-v2/" class="md-nav__link">
Inception ResNet v2
</a>
</li>
<li class="md-nav__item">
<a href="../inception-v3/" class="md-nav__link">
Inception v3
</a>
</li>
<li class="md-nav__item">
<a href="../inception-v4/" class="md-nav__link">
Inception v4
</a>
</li>
<li class="md-nav__item">
<a href="../legacy-se-resnet/" class="md-nav__link">
(Legacy) SE-ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../legacy-se-resnext/" class="md-nav__link">
(Legacy) SE-ResNeXt
</a>
</li>
<li class="md-nav__item">
<a href="../legacy-senet/" class="md-nav__link">
(Legacy) SENet
</a>
</li>
<li class="md-nav__item">
<a href="../mixnet/" class="md-nav__link">
MixNet
</a>
</li>
<li class="md-nav__item">
<a href="../mnasnet/" class="md-nav__link">
MnasNet
</a>
</li>
<li class="md-nav__item">
<a href="../mobilenet-v2/" class="md-nav__link">
MobileNet v2
</a>
</li>
<li class="md-nav__item">
<a href="../mobilenet-v3/" class="md-nav__link">
MobileNet v3
</a>
</li>
<li class="md-nav__item">
<a href="../nasnet/" class="md-nav__link">
NASNet
</a>
</li>
<li class="md-nav__item">
<a href="../noisy-student/" class="md-nav__link">
Noisy Student (EfficientNet)
</a>
</li>
<li class="md-nav__item">
<a href="../pnasnet/" class="md-nav__link">
PNASNet
</a>
</li>
<li class="md-nav__item">
<a href="../regnetx/" class="md-nav__link">
RegNetX
</a>
</li>
<li class="md-nav__item">
<a href="../regnety/" class="md-nav__link">
RegNetY
</a>
</li>
<li class="md-nav__item">
<a href="../res2net/" class="md-nav__link">
Res2Net
</a>
</li>
<li class="md-nav__item">
<a href="../res2next/" class="md-nav__link">
Res2NeXt
</a>
</li>
<li class="md-nav__item">
<a href="../resnest/" class="md-nav__link">
ResNeSt
</a>
</li>
<li class="md-nav__item">
<a href="../resnet-d/" class="md-nav__link">
ResNet-D
</a>
</li>
<li class="md-nav__item">
<a href="../resnet/" class="md-nav__link">
ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../resnext/" class="md-nav__link">
ResNeXt
</a>
</li>
<li class="md-nav__item">
<a href="../rexnet/" class="md-nav__link">
RexNet
</a>
</li>
<li class="md-nav__item">
<a href="../se-resnet/" class="md-nav__link">
SE-ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../selecsls/" class="md-nav__link">
SelecSLS
</a>
</li>
<li class="md-nav__item">
<a href="../seresnext/" class="md-nav__link">
SE-ResNeXt
</a>
</li>
<li class="md-nav__item">
<a href="../skresnet/" class="md-nav__link">
SK-ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../skresnext/" class="md-nav__link">
SK-ResNeXt
</a>
</li>
<li class="md-nav__item">
<a href="../spnasnet/" class="md-nav__link">
SPNASNet
</a>
</li>
<li class="md-nav__item">
<a href="../ssl-resnet/" class="md-nav__link">
SSL ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../ssl-resnext/" class="md-nav__link">
SSL ResNeXT
</a>
</li>
<li class="md-nav__item">
<a href="../swsl-resnet/" class="md-nav__link">
SWSL ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../swsl-resnext/" class="md-nav__link">
SWSL ResNeXt
</a>
</li>
<li class="md-nav__item">
<a href="../tf-efficientnet-condconv/" class="md-nav__link">
(Tensorflow) EfficientNet CondConv
</a>
</li>
<li class="md-nav__item">
<a href="../tf-efficientnet-lite/" class="md-nav__link">
(Tensorflow) EfficientNet Lite
</a>
</li>
<li class="md-nav__item">
<a href="../tf-efficientnet/" class="md-nav__link">
(Tensorflow) EfficientNet
</a>
</li>
<li class="md-nav__item">
<a href="../tf-inception-v3/" class="md-nav__link">
(Tensorflow) Inception v3
</a>
</li>
<li class="md-nav__item">
<a href="../tf-mixnet/" class="md-nav__link">
(Tensorflow) MixNet
</a>
</li>
<li class="md-nav__item">
<a href="../tf-mobilenet-v3/" class="md-nav__link">
(Tensorflow) MobileNet v3
</a>
</li>
<li class="md-nav__item">
<a href="../tresnet/" class="md-nav__link">
TResNet
</a>
</li>
<li class="md-nav__item">
<a href="../vision-transformer/" class="md-nav__link">
Vision Transformer (ViT)
</a>
</li>
<li class="md-nav__item">
<a href="../wide-resnet/" class="md-nav__link">
Wide ResNet
</a>
</li>
<li class="md-nav__item">
<a href="../xception/" class="md-nav__link">
Xception
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="../../results/" class="md-nav__link">
Results
</a>
</li>
<li class="md-nav__item">
<a href="../../scripts/" class="md-nav__link">
Scripts
</a>
</li>
<li class="md-nav__item">
<a href="../../training_hparam_examples/" class="md-nav__link">
Training Examples
</a>
</li>
<li class="md-nav__item">
<a href="../../feature_extraction/" class="md-nav__link">
Feature Extraction
</a>
</li>
<li class="md-nav__item">
<a href="../../changes/" class="md-nav__link">
Recent Changes
</a>
</li>
<li class="md-nav__item">
<a href="../../archived_changes/" class="md-nav__link">
Archived Changes
</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-sidebar md-sidebar--secondary" data-md-component="sidebar" data-md-type="toc" >
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--secondary" aria-label="Table of contents">
<label class="md-nav__title" for="__toc">
<span class="md-nav__icon md-icon"></span>
Table of contents
</label>
<ul class="md-nav__list" data-md-component="toc" data-md-scrollfix>
<li class="md-nav__item">
<a href="#how-do-i-use-this-model-on-an-image" class="md-nav__link">
How do I use this model on an image?
</a>
</li>
<li class="md-nav__item">
<a href="#how-do-i-finetune-this-model" class="md-nav__link">
How do I finetune this model?
</a>
</li>
<li class="md-nav__item">
<a href="#how-do-i-train-this-model" class="md-nav__link">
How do I train this model?
</a>
</li>
<li class="md-nav__item">
<a href="#citation" class="md-nav__link">
Citation
</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-content" data-md-component="content">
<article class="md-content__inner md-typeset">
<h1 id="big-transfer-bit">Big Transfer (BiT)</h1>
<p><strong>Big Transfer (BiT)</strong> is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet.</p>
<h2 id="how-do-i-use-this-model-on-an-image">How do I use this model on an image?</h2>
<p>To load a pretrained model:</p>
<div class="highlight"><pre><span></span><code><span class="kn">import</span> <span class="nn">timm</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">timm</span><span class="o">.</span><span class="n">create_model</span><span class="p">(</span><span class="s1">&#39;resnetv2_101x1_bitm&#39;</span><span class="p">,</span> <span class="n">pretrained</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">model</span><span class="o">.</span><span class="n">eval</span><span class="p">()</span>
</code></pre></div>
<p>To load and preprocess the image:
<div class="highlight"><pre><span></span><code><span class="kn">import</span> <span class="nn">urllib</span>
<span class="kn">from</span> <span class="nn">PIL</span> <span class="kn">import</span> <span class="n">Image</span>
<span class="kn">from</span> <span class="nn">timm.data</span> <span class="kn">import</span> <span class="n">resolve_data_config</span>
<span class="kn">from</span> <span class="nn">timm.data.transforms_factory</span> <span class="kn">import</span> <span class="n">create_transform</span>
<span class="n">config</span> <span class="o">=</span> <span class="n">resolve_data_config</span><span class="p">({},</span> <span class="n">model</span><span class="o">=</span><span class="n">model</span><span class="p">)</span>
<span class="n">transform</span> <span class="o">=</span> <span class="n">create_transform</span><span class="p">(</span><span class="o">**</span><span class="n">config</span><span class="p">)</span>
<span class="n">url</span><span class="p">,</span> <span class="n">filename</span> <span class="o">=</span> <span class="p">(</span><span class="s2">&quot;https://github.com/pytorch/hub/raw/master/images/dog.jpg&quot;</span><span class="p">,</span> <span class="s2">&quot;dog.jpg&quot;</span><span class="p">)</span>
<span class="n">urllib</span><span class="o">.</span><span class="n">request</span><span class="o">.</span><span class="n">urlretrieve</span><span class="p">(</span><span class="n">url</span><span class="p">,</span> <span class="n">filename</span><span class="p">)</span>
<span class="n">img</span> <span class="o">=</span> <span class="n">Image</span><span class="o">.</span><span class="n">open</span><span class="p">(</span><span class="n">filename</span><span class="p">)</span><span class="o">.</span><span class="n">convert</span><span class="p">(</span><span class="s1">&#39;RGB&#39;</span><span class="p">)</span>
<span class="n">tensor</span> <span class="o">=</span> <span class="n">transform</span><span class="p">(</span><span class="n">img</span><span class="p">)</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span> <span class="c1"># transform and add batch dimension</span>
</code></pre></div></p>
<p>To get the model predictions:
<div class="highlight"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="n">out</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">tensor</span><span class="p">)</span>
<span class="n">probabilities</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">functional</span><span class="o">.</span><span class="n">softmax</span><span class="p">(</span><span class="n">out</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">probabilities</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span>
<span class="c1"># prints: torch.Size([1000])</span>
</code></pre></div></p>
<p>To get the top-5 predictions class names:
<div class="highlight"><pre><span></span><code><span class="c1"># Get imagenet class mappings</span>
<span class="n">url</span><span class="p">,</span> <span class="n">filename</span> <span class="o">=</span> <span class="p">(</span><span class="s2">&quot;https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt&quot;</span><span class="p">,</span> <span class="s2">&quot;imagenet_classes.txt&quot;</span><span class="p">)</span>
<span class="n">urllib</span><span class="o">.</span><span class="n">request</span><span class="o">.</span><span class="n">urlretrieve</span><span class="p">(</span><span class="n">url</span><span class="p">,</span> <span class="n">filename</span><span class="p">)</span>
<span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="s2">&quot;imagenet_classes.txt&quot;</span><span class="p">,</span> <span class="s2">&quot;r&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">f</span><span class="p">:</span>
<span class="n">categories</span> <span class="o">=</span> <span class="p">[</span><span class="n">s</span><span class="o">.</span><span class="n">strip</span><span class="p">()</span> <span class="k">for</span> <span class="n">s</span> <span class="ow">in</span> <span class="n">f</span><span class="o">.</span><span class="n">readlines</span><span class="p">()]</span>
<span class="c1"># Print top categories per image</span>
<span class="n">top5_prob</span><span class="p">,</span> <span class="n">top5_catid</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">topk</span><span class="p">(</span><span class="n">probabilities</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">top5_prob</span><span class="o">.</span><span class="n">size</span><span class="p">(</span><span class="mi">0</span><span class="p">)):</span>
<span class="nb">print</span><span class="p">(</span><span class="n">categories</span><span class="p">[</span><span class="n">top5_catid</span><span class="p">[</span><span class="n">i</span><span class="p">]],</span> <span class="n">top5_prob</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">item</span><span class="p">())</span>
<span class="c1"># prints class names and probabilities like:</span>
<span class="c1"># [(&#39;Samoyed&#39;, 0.6425196528434753), (&#39;Pomeranian&#39;, 0.04062102362513542), (&#39;keeshond&#39;, 0.03186424449086189), (&#39;white wolf&#39;, 0.01739676296710968), (&#39;Eskimo dog&#39;, 0.011717947199940681)]</span>
</code></pre></div></p>
<p>Replace the model name with the variant you want to use, e.g. <code>resnetv2_101x1_bitm</code>. You can find the IDs in the model summaries at the top of this page.</p>
<p>To extract image features with this model, follow the <a href="https://rwightman.github.io/pytorch-image-models/feature_extraction/">timm feature extraction examples</a>, just change the name of the model you want to use.</p>
<h2 id="how-do-i-finetune-this-model">How do I finetune this model?</h2>
<p>You can finetune any of the pre-trained models just by changing the classifier (the last layer).
<div class="highlight"><pre><span></span><code><span class="n">model</span> <span class="o">=</span> <span class="n">timm</span><span class="o">.</span><span class="n">create_model</span><span class="p">(</span><span class="s1">&#39;resnetv2_101x1_bitm&#39;</span><span class="p">,</span> <span class="n">pretrained</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">num_classes</span><span class="o">=</span><span class="n">NUM_FINETUNE_CLASSES</span><span class="p">)</span>
</code></pre></div>
To finetune on your own dataset, you have to write a training loop or adapt <a href="https://github.com/rwightman/pytorch-image-models/blob/master/train.py">timm's training
script</a> to use your dataset.</p>
<h2 id="how-do-i-train-this-model">How do I train this model?</h2>
<p>You can follow the <a href="https://rwightman.github.io/pytorch-image-models/scripts/">timm recipe scripts</a> for training a new model afresh.</p>
<h2 id="citation">Citation</h2>
<div class="highlight"><pre><span></span><code><span class="nc">@misc</span><span class="p">{</span><span class="nl">kolesnikov2020big</span><span class="p">,</span>
<span class="w"> </span><span class="na">title</span><span class="p">=</span><span class="s">{Big Transfer (BiT): General Visual Representation Learning}</span><span class="p">,</span><span class="w"> </span>
<span class="w"> </span><span class="na">author</span><span class="p">=</span><span class="s">{Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}</span><span class="p">,</span>
<span class="w"> </span><span class="na">year</span><span class="p">=</span><span class="s">{2020}</span><span class="p">,</span>
<span class="w"> </span><span class="na">eprint</span><span class="p">=</span><span class="s">{1912.11370}</span><span class="p">,</span>
<span class="w"> </span><span class="na">archivePrefix</span><span class="p">=</span><span class="s">{arXiv}</span><span class="p">,</span>
<span class="w"> </span><span class="na">primaryClass</span><span class="p">=</span><span class="s">{cs.CV}</span>
<span class="p">}</span>
</code></pre></div>
<!--
Type: model-index
Collections:
- Name: Big Transfer
Paper:
Title: 'Big Transfer (BiT): General Visual Representation Learning'
URL: https://paperswithcode.com/paper/large-scale-learning-of-general-visual
Models:
- Name: resnetv2_101x1_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 5330896
Parameters: 44540000
File Size: 178256468
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_101x1_bitm
LR: 0.03
Epochs: 90
Layers: 101
Crop Pct: '1.0'
Momentum: 0.9
Batch Size: 4096
Image Size: '480'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L444
Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.21%
Top 5 Accuracy: 96.47%
- Name: resnetv2_101x3_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 15988688
Parameters: 387930000
File Size: 1551830100
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_101x3_bitm
LR: 0.03
Epochs: 90
Layers: 101
Crop Pct: '1.0'
Momentum: 0.9
Batch Size: 4096
Image Size: '480'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L451
Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.38%
Top 5 Accuracy: 97.37%
- Name: resnetv2_152x2_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 10659792
Parameters: 236340000
File Size: 945476668
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
ID: resnetv2_152x2_bitm
Crop Pct: '1.0'
Image Size: '480'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L458
Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.4%
Top 5 Accuracy: 97.43%
- Name: resnetv2_152x4_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 21317584
Parameters: 936530000
File Size: 3746270104
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_152x4_bitm
Crop Pct: '1.0'
Image Size: '480'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L465
Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.95%
Top 5 Accuracy: 97.45%
- Name: resnetv2_50x1_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 5330896
Parameters: 25550000
File Size: 102242668
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_50x1_bitm
LR: 0.03
Epochs: 90
Layers: 50
Crop Pct: '1.0'
Momentum: 0.9
Batch Size: 4096
Image Size: '480'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L430
Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.19%
Top 5 Accuracy: 95.63%
- Name: resnetv2_50x3_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 15988688
Parameters: 217320000
File Size: 869321580
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_50x3_bitm
LR: 0.03
Epochs: 90
Layers: 50
Crop Pct: '1.0'
Momentum: 0.9
Batch Size: 4096
Image Size: '480'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L437
Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.75%
Top 5 Accuracy: 97.12%
-->
</article>
</div>
</div>
</main>
<footer class="md-footer">
<div class="md-footer-meta md-typeset">
<div class="md-footer-meta__inner md-grid">
<div class="md-copyright">
Made with
<a href="https://squidfunk.github.io/mkdocs-material/" target="_blank" rel="noopener">
Material for MkDocs
</a>
</div>
</div>
</div>
</footer>
</div>
<div class="md-dialog" data-md-component="dialog">
<div class="md-dialog__inner md-typeset"></div>
</div>
<script id="__config" type="application/json">{"base": "../..", "features": [], "search": "../../assets/javascripts/workers/search.12658920.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}}</script>
<script src="../../assets/javascripts/bundle.5cf534bf.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js"></script>
<script src="../../javascripts/tables.js"></script>
</body>
</html>