<!doctype html>
< html lang = "en" class = "no-js" >
< head >
< meta charset = "utf-8" >
< meta name = "viewport" content = "width=device-width,initial-scale=1" >
< meta name = "description" content = "Pretained Image Recognition Models" >
< link rel = "icon" href = "../../assets/images/favicon.png" >
< meta name = "generator" content = "mkdocs-1.3.0, mkdocs-material-8.2.9" >
< title > (Gluon) ResNet - Pytorch Image Models< / title >
< link rel = "stylesheet" href = "../../assets/stylesheets/main.120efc48.min.css" >
< link rel = "stylesheet" href = "../../assets/stylesheets/palette.9647289d.min.css" >
< link rel = "preconnect" href = "https://fonts.gstatic.com" crossorigin >
< link rel = "stylesheet" href = "https://fonts.googleapis.com/css?family=Roboto:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback" >
< style > : root { --md-text-font : "Roboto" ; --md-code-font : "Roboto Mono" } < / style >
< script > _ _md _scope = new URL ( "../.." , location ) , _ _md _get = ( e , _ = localStorage , t = _ _md _scope ) => JSON . parse ( _ . getItem ( t . pathname + "." + e ) ) , _ _md _set = ( e , _ , t = localStorage , a = _ _md _scope ) => { try { t . setItem ( a . pathname + "." + e , JSON . stringify ( _ ) ) } catch ( e ) { } } < / script >
< / head >
< body dir = "ltr" data-md-color-scheme = "" data-md-color-primary = "none" data-md-color-accent = "none" >
< input class = "md-toggle" data-md-toggle = "drawer" type = "checkbox" id = "__drawer" autocomplete = "off" >
< input class = "md-toggle" data-md-toggle = "search" type = "checkbox" id = "__search" autocomplete = "off" >
< label class = "md-overlay" for = "__drawer" > < / label >
< div data-md-component = "skip" >
< a href = "#gluon-resnet" class = "md-skip" >
Skip to content
< / a >
< / div >
< div data-md-component = "announce" >
< / div >
< header class = "md-header" data-md-component = "header" >
< nav class = "md-header__inner md-grid" aria-label = "Header" >
< a href = "../.." title = "Pytorch Image Models" class = "md-header__button md-logo" aria-label = "Pytorch Image Models" data-md-component = "logo" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 8a3 3 0 0 0 3-3 3 3 0 0 0-3-3 3 3 0 0 0-3 3 3 3 0 0 0 3 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54Z" / > < / svg >
< / a >
< label class = "md-header__button md-icon" for = "__drawer" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2Z" / > < / svg >
< / label >
< div class = "md-header__title" data-md-component = "header-title" >
< div class = "md-header__ellipsis" >
< div class = "md-header__topic" >
< span class = "md-ellipsis" >
Pytorch Image Models
< / span >
< / div >
< div class = "md-header__topic" data-md-component = "header-topic" >
< span class = "md-ellipsis" >
(Gluon) ResNet
< / span >
< / div >
< / div >
< / div >
< label class = "md-header__button md-icon" for = "__search" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z" / > < / svg >
< / label >
< div class = "md-search" data-md-component = "search" role = "dialog" >
< label class = "md-search__overlay" for = "__search" > < / label >
< div class = "md-search__inner" role = "search" >
< form class = "md-search__form" name = "search" >
< input type = "text" class = "md-search__input" name = "query" aria-label = "Search" placeholder = "Search" autocapitalize = "off" autocorrect = "off" autocomplete = "off" spellcheck = "false" data-md-component = "search-query" required >
< label class = "md-search__icon md-icon" for = "__search" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z" / > < / svg >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z" / > < / svg >
< / label >
< nav class = "md-search__options" aria-label = "Search" >
< button type = "reset" class = "md-search__icon md-icon" aria-label = "Clear" tabindex = "-1" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41Z" / > < / svg >
< / button >
< / nav >
< / form >
< div class = "md-search__output" >
< div class = "md-search__scrollwrap" data-md-scrollfix >
< div class = "md-search-result" data-md-component = "search-result" >
< div class = "md-search-result__meta" >
Initializing search
< / div >
< ol class = "md-search-result__list" > < / ol >
< / div >
< / div >
< / div >
< / div >
< / div >
< div class = "md-header__source" >
< a href = "https://github.com/rwightman/pytorch-image-models" title = "Go to repository" class = "md-source" data-md-component = "source" >
< div class = "md-source__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 448 512" > <!-- ! Font Awesome Free 6.1.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --> < path d = "M439.55 236.05 244 40.45a28.87 28.87 0 0 0-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 0 1-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 0 0 0 40.81l195.61 195.6a28.86 28.86 0 0 0 40.8 0l194.69-194.69a28.86 28.86 0 0 0 0-40.81z" / > < / svg >
< / div >
< div class = "md-source__repository" >
rwightman/pytorch-image-models
< / div >
< / a >
< / div >
< / nav >
< / header >
< div class = "md-container" data-md-component = "container" >
< main class = "md-main" data-md-component = "main" >
< div class = "md-main__inner md-grid" >
< div class = "md-sidebar md-sidebar--primary" data-md-component = "sidebar" data-md-type = "navigation" >
< div class = "md-sidebar__scrollwrap" >
< div class = "md-sidebar__inner" >
< nav class = "md-nav md-nav--primary" aria-label = "Navigation" data-md-level = "0" >
< label class = "md-nav__title" for = "__drawer" >
< a href = "../.." title = "Pytorch Image Models" class = "md-nav__button md-logo" aria-label = "Pytorch Image Models" data-md-component = "logo" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 8a3 3 0 0 0 3-3 3 3 0 0 0-3-3 3 3 0 0 0-3 3 3 3 0 0 0 3 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54Z" / > < / svg >
< / a >
Pytorch Image Models
< / label >
< div class = "md-nav__source" >
< a href = "https://github.com/rwightman/pytorch-image-models" title = "Go to repository" class = "md-source" data-md-component = "source" >
< div class = "md-source__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 448 512" > <!-- ! Font Awesome Free 6.1.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --> < path d = "M439.55 236.05 244 40.45a28.87 28.87 0 0 0-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 0 1-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 0 0 0 40.81l195.61 195.6a28.86 28.86 0 0 0 40.8 0l194.69-194.69a28.86 28.86 0 0 0 0-40.81z" / > < / svg >
< / div >
< div class = "md-source__repository" >
rwightman/pytorch-image-models
< / div >
< / a >
< / div >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "../.." class = "md-nav__link" >
Getting Started
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../" class = "md-nav__link" >
Model Summaries
< / a >
< / li >
< li class = "md-nav__item md-nav__item--active md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle" data-md-toggle = "__nav_3" type = "checkbox" id = "__nav_3" checked >
< label class = "md-nav__link" for = "__nav_3" >
Model Pages
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" aria-label = "Model Pages" data-md-level = "1" >
< label class = "md-nav__title" for = "__nav_3" >
< span class = "md-nav__icon md-icon" > < / span >
Model Pages
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "../adversarial-inception-v3/" class = "md-nav__link" >
Adversarial Inception v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../advprop/" class = "md-nav__link" >
AdvProp (EfficientNet)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../big-transfer/" class = "md-nav__link" >
Big Transfer (BiT)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../csp-darknet/" class = "md-nav__link" >
CSP-DarkNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../csp-resnet/" class = "md-nav__link" >
CSP-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../csp-resnext/" class = "md-nav__link" >
CSP-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../densenet/" class = "md-nav__link" >
DenseNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../dla/" class = "md-nav__link" >
Deep Layer Aggregation
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../dpn/" class = "md-nav__link" >
Dual Path Network (DPN)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ecaresnet/" class = "md-nav__link" >
ECA-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../efficientnet-pruned/" class = "md-nav__link" >
EfficientNet (Knapsack Pruned)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../efficientnet/" class = "md-nav__link" >
EfficientNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ensemble-adversarial/" class = "md-nav__link" >
Ensemble Adversarial Inception ResNet v2
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ese-vovnet/" class = "md-nav__link" >
ESE-VoVNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../fbnet/" class = "md-nav__link" >
FBNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-inception-v3/" class = "md-nav__link" >
(Gluon) Inception v3
< / a >
< / li >
< li class = "md-nav__item md-nav__item--active" >
< input class = "md-nav__toggle md-toggle" data-md-toggle = "toc" type = "checkbox" id = "__toc" >
< label class = "md-nav__link md-nav__link--active" for = "__toc" >
(Gluon) ResNet
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< a href = "./" class = "md-nav__link md-nav__link--active" >
(Gluon) ResNet
< / a >
< nav class = "md-nav md-nav--secondary" aria-label = "Table of contents" >
< label class = "md-nav__title" for = "__toc" >
< span class = "md-nav__icon md-icon" > < / span >
Table of contents
< / label >
< ul class = "md-nav__list" data-md-component = "toc" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "#how-do-i-use-this-model-on-an-image" class = "md-nav__link" >
How do I use this model on an image?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#how-do-i-finetune-this-model" class = "md-nav__link" >
How do I finetune this model?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#how-do-i-train-this-model" class = "md-nav__link" >
How do I train this model?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#citation" class = "md-nav__link" >
Citation
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-resnext/" class = "md-nav__link" >
(Gluon) ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-senet/" class = "md-nav__link" >
(Gluon) SENet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-seresnext/" class = "md-nav__link" >
(Gluon) SE-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-xception/" class = "md-nav__link" >
(Gluon) Xception
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../hrnet/" class = "md-nav__link" >
HRNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ig-resnext/" class = "md-nav__link" >
Instagram ResNeXt WSL
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../inception-resnet-v2/" class = "md-nav__link" >
Inception ResNet v2
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../inception-v3/" class = "md-nav__link" >
Inception v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../inception-v4/" class = "md-nav__link" >
Inception v4
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../legacy-se-resnet/" class = "md-nav__link" >
(Legacy) SE-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../legacy-se-resnext/" class = "md-nav__link" >
(Legacy) SE-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../legacy-senet/" class = "md-nav__link" >
(Legacy) SENet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../mixnet/" class = "md-nav__link" >
MixNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../mnasnet/" class = "md-nav__link" >
MnasNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../mobilenet-v2/" class = "md-nav__link" >
MobileNet v2
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../mobilenet-v3/" class = "md-nav__link" >
MobileNet v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../nasnet/" class = "md-nav__link" >
NASNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../noisy-student/" class = "md-nav__link" >
Noisy Student (EfficientNet)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../pnasnet/" class = "md-nav__link" >
PNASNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../regnetx/" class = "md-nav__link" >
RegNetX
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../regnety/" class = "md-nav__link" >
RegNetY
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../res2net/" class = "md-nav__link" >
Res2Net
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../res2next/" class = "md-nav__link" >
Res2NeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../resnest/" class = "md-nav__link" >
ResNeSt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../resnet-d/" class = "md-nav__link" >
ResNet-D
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../resnet/" class = "md-nav__link" >
ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../resnext/" class = "md-nav__link" >
ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../rexnet/" class = "md-nav__link" >
RexNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../se-resnet/" class = "md-nav__link" >
SE-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../selecsls/" class = "md-nav__link" >
SelecSLS
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../seresnext/" class = "md-nav__link" >
SE-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../skresnet/" class = "md-nav__link" >
SK-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../skresnext/" class = "md-nav__link" >
SK-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../spnasnet/" class = "md-nav__link" >
SPNASNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ssl-resnet/" class = "md-nav__link" >
SSL ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ssl-resnext/" class = "md-nav__link" >
SSL ResNeXT
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../swsl-resnet/" class = "md-nav__link" >
SWSL ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../swsl-resnext/" class = "md-nav__link" >
SWSL ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-efficientnet-condconv/" class = "md-nav__link" >
(Tensorflow) EfficientNet CondConv
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-efficientnet-lite/" class = "md-nav__link" >
(Tensorflow) EfficientNet Lite
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-efficientnet/" class = "md-nav__link" >
(Tensorflow) EfficientNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-inception-v3/" class = "md-nav__link" >
(Tensorflow) Inception v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-mixnet/" class = "md-nav__link" >
(Tensorflow) MixNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-mobilenet-v3/" class = "md-nav__link" >
(Tensorflow) MobileNet v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tresnet/" class = "md-nav__link" >
TResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../vision-transformer/" class = "md-nav__link" >
Vision Transformer (ViT)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../wide-resnet/" class = "md-nav__link" >
Wide ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../xception/" class = "md-nav__link" >
Xception
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "../../results/" class = "md-nav__link" >
Results
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../scripts/" class = "md-nav__link" >
Scripts
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../training_hparam_examples/" class = "md-nav__link" >
Training Examples
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../feature_extraction/" class = "md-nav__link" >
Feature Extraction
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../changes/" class = "md-nav__link" >
Recent Changes
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../archived_changes/" class = "md-nav__link" >
Archived Changes
< / a >
< / li >
< / ul >
< / nav >
< / div >
< / div >
< / div >
< div class = "md-sidebar md-sidebar--secondary" data-md-component = "sidebar" data-md-type = "toc" >
< div class = "md-sidebar__scrollwrap" >
< div class = "md-sidebar__inner" >
< nav class = "md-nav md-nav--secondary" aria-label = "Table of contents" >
< label class = "md-nav__title" for = "__toc" >
< span class = "md-nav__icon md-icon" > < / span >
Table of contents
< / label >
< ul class = "md-nav__list" data-md-component = "toc" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "#how-do-i-use-this-model-on-an-image" class = "md-nav__link" >
How do I use this model on an image?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#how-do-i-finetune-this-model" class = "md-nav__link" >
How do I finetune this model?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#how-do-i-train-this-model" class = "md-nav__link" >
How do I train this model?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#citation" class = "md-nav__link" >
Citation
< / a >
< / li >
< / ul >
< / nav >
< / div >
< / div >
< / div >
< div class = "md-content" data-md-component = "content" >
< article class = "md-content__inner md-typeset" >
< a href = "https://github.com/rwightman/pytorch-image-models/edit/master/docs/models/gloun-resnet.md" title = "Edit this page" class = "md-content__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20.71 7.04c.39-.39.39-1.04 0-1.41l-2.34-2.34c-.37-.39-1.02-.39-1.41 0l-1.84 1.83 3.75 3.75M3 17.25V21h3.75L17.81 9.93l-3.75-3.75L3 17.25Z" / > < / svg >
< / a >
< h1 id = "gluon-resnet" > (Gluon) ResNet< / h1 >
< p > < strong > Residual Networks< / strong > , or < strong > ResNets< / strong > , learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack < a href = "https://paperswithcode.com/method/residual-block" > residual blocks< / a > ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. < / p >
< p > The weights from this model were ported from < a href = "https://cv.gluon.ai/model_zoo/classification.html" > Gluon< / a > .< / p >
< h2 id = "how-do-i-use-this-model-on-an-image" > How do I use this model on an image?< / h2 >
< p > To load a pretrained model:< / p >
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "kn" > import< / span > < span class = "nn" > timm< / span >
< span class = "n" > model< / span > < span class = "o" > =< / span > < span class = "n" > timm< / span > < span class = "o" > .< / span > < span class = "n" > create_model< / span > < span class = "p" > (< / span > < span class = "s1" > ' gluon_resnet101_v1b' < / span > < span class = "p" > ,< / span > < span class = "n" > pretrained< / span > < span class = "o" > =< / span > < span class = "kc" > True< / span > < span class = "p" > )< / span >
< span class = "n" > model< / span > < span class = "o" > .< / span > < span class = "n" > eval< / span > < span class = "p" > ()< / span >
< / code > < / pre > < / div >
< p > To load and preprocess the image:
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "kn" > import< / span > < span class = "nn" > urllib< / span >
< span class = "kn" > from< / span > < span class = "nn" > PIL< / span > < span class = "kn" > import< / span > < span class = "n" > Image< / span >
< span class = "kn" > from< / span > < span class = "nn" > timm.data< / span > < span class = "kn" > import< / span > < span class = "n" > resolve_data_config< / span >
< span class = "kn" > from< / span > < span class = "nn" > timm.data.transforms_factory< / span > < span class = "kn" > import< / span > < span class = "n" > create_transform< / span >
< span class = "n" > config< / span > < span class = "o" > =< / span > < span class = "n" > resolve_data_config< / span > < span class = "p" > ({},< / span > < span class = "n" > model< / span > < span class = "o" > =< / span > < span class = "n" > model< / span > < span class = "p" > )< / span >
< span class = "n" > transform< / span > < span class = "o" > =< / span > < span class = "n" > create_transform< / span > < span class = "p" > (< / span > < span class = "o" > **< / span > < span class = "n" > config< / span > < span class = "p" > )< / span >
< span class = "n" > url< / span > < span class = "p" > ,< / span > < span class = "n" > filename< / span > < span class = "o" > =< / span > < span class = "p" > (< / span > < span class = "s2" > " https://github.com/pytorch/hub/raw/master/images/dog.jpg" < / span > < span class = "p" > ,< / span > < span class = "s2" > " dog.jpg" < / span > < span class = "p" > )< / span >
< span class = "n" > urllib< / span > < span class = "o" > .< / span > < span class = "n" > request< / span > < span class = "o" > .< / span > < span class = "n" > urlretrieve< / span > < span class = "p" > (< / span > < span class = "n" > url< / span > < span class = "p" > ,< / span > < span class = "n" > filename< / span > < span class = "p" > )< / span >
< span class = "n" > img< / span > < span class = "o" > =< / span > < span class = "n" > Image< / span > < span class = "o" > .< / span > < span class = "n" > open< / span > < span class = "p" > (< / span > < span class = "n" > filename< / span > < span class = "p" > )< / span > < span class = "o" > .< / span > < span class = "n" > convert< / span > < span class = "p" > (< / span > < span class = "s1" > ' RGB' < / span > < span class = "p" > )< / span >
< span class = "n" > tensor< / span > < span class = "o" > =< / span > < span class = "n" > transform< / span > < span class = "p" > (< / span > < span class = "n" > img< / span > < span class = "p" > )< / span > < span class = "o" > .< / span > < span class = "n" > unsqueeze< / span > < span class = "p" > (< / span > < span class = "mi" > 0< / span > < span class = "p" > )< / span > < span class = "c1" > # transform and add batch dimension< / span >
< / code > < / pre > < / div > < / p >
< p > To get the model predictions:
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "kn" > import< / span > < span class = "nn" > torch< / span >
< span class = "k" > with< / span > < span class = "n" > torch< / span > < span class = "o" > .< / span > < span class = "n" > no_grad< / span > < span class = "p" > ():< / span >
< span class = "n" > out< / span > < span class = "o" > =< / span > < span class = "n" > model< / span > < span class = "p" > (< / span > < span class = "n" > tensor< / span > < span class = "p" > )< / span >
< span class = "n" > probabilities< / span > < span class = "o" > =< / span > < span class = "n" > torch< / span > < span class = "o" > .< / span > < span class = "n" > nn< / span > < span class = "o" > .< / span > < span class = "n" > functional< / span > < span class = "o" > .< / span > < span class = "n" > softmax< / span > < span class = "p" > (< / span > < span class = "n" > out< / span > < span class = "p" > [< / span > < span class = "mi" > 0< / span > < span class = "p" > ],< / span > < span class = "n" > dim< / span > < span class = "o" > =< / span > < span class = "mi" > 0< / span > < span class = "p" > )< / span >
< span class = "nb" > print< / span > < span class = "p" > (< / span > < span class = "n" > probabilities< / span > < span class = "o" > .< / span > < span class = "n" > shape< / span > < span class = "p" > )< / span >
< span class = "c1" > # prints: torch.Size([1000])< / span >
< / code > < / pre > < / div > < / p >
< p > To get the top-5 predictions class names:
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "c1" > # Get imagenet class mappings< / span >
< span class = "n" > url< / span > < span class = "p" > ,< / span > < span class = "n" > filename< / span > < span class = "o" > =< / span > < span class = "p" > (< / span > < span class = "s2" > " https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" < / span > < span class = "p" > ,< / span > < span class = "s2" > " imagenet_classes.txt" < / span > < span class = "p" > )< / span >
< span class = "n" > urllib< / span > < span class = "o" > .< / span > < span class = "n" > request< / span > < span class = "o" > .< / span > < span class = "n" > urlretrieve< / span > < span class = "p" > (< / span > < span class = "n" > url< / span > < span class = "p" > ,< / span > < span class = "n" > filename< / span > < span class = "p" > )< / span >
< span class = "k" > with< / span > < span class = "nb" > open< / span > < span class = "p" > (< / span > < span class = "s2" > " imagenet_classes.txt" < / span > < span class = "p" > ,< / span > < span class = "s2" > " r" < / span > < span class = "p" > )< / span > < span class = "k" > as< / span > < span class = "n" > f< / span > < span class = "p" > :< / span >
< span class = "n" > categories< / span > < span class = "o" > =< / span > < span class = "p" > [< / span > < span class = "n" > s< / span > < span class = "o" > .< / span > < span class = "n" > strip< / span > < span class = "p" > ()< / span > < span class = "k" > for< / span > < span class = "n" > s< / span > < span class = "ow" > in< / span > < span class = "n" > f< / span > < span class = "o" > .< / span > < span class = "n" > readlines< / span > < span class = "p" > ()]< / span >
< span class = "c1" > # Print top categories per image< / span >
< span class = "n" > top5_prob< / span > < span class = "p" > ,< / span > < span class = "n" > top5_catid< / span > < span class = "o" > =< / span > < span class = "n" > torch< / span > < span class = "o" > .< / span > < span class = "n" > topk< / span > < span class = "p" > (< / span > < span class = "n" > probabilities< / span > < span class = "p" > ,< / span > < span class = "mi" > 5< / span > < span class = "p" > )< / span >
< span class = "k" > for< / span > < span class = "n" > i< / span > < span class = "ow" > in< / span > < span class = "nb" > range< / span > < span class = "p" > (< / span > < span class = "n" > top5_prob< / span > < span class = "o" > .< / span > < span class = "n" > size< / span > < span class = "p" > (< / span > < span class = "mi" > 0< / span > < span class = "p" > )):< / span >
< span class = "nb" > print< / span > < span class = "p" > (< / span > < span class = "n" > categories< / span > < span class = "p" > [< / span > < span class = "n" > top5_catid< / span > < span class = "p" > [< / span > < span class = "n" > i< / span > < span class = "p" > ]],< / span > < span class = "n" > top5_prob< / span > < span class = "p" > [< / span > < span class = "n" > i< / span > < span class = "p" > ]< / span > < span class = "o" > .< / span > < span class = "n" > item< / span > < span class = "p" > ())< / span >
< span class = "c1" > # prints class names and probabilities like:< / span >
< span class = "c1" > # [(' Samoyed' , 0.6425196528434753), (' Pomeranian' , 0.04062102362513542), (' keeshond' , 0.03186424449086189), (' white wolf' , 0.01739676296710968), (' Eskimo dog' , 0.011717947199940681)]< / span >
< / code > < / pre > < / div > < / p >
< p > Replace the model name with the variant you want to use, e.g. < code > gluon_resnet101_v1b< / code > . You can find the IDs in the model summaries at the top of this page.< / p >
< p > To extract image features with this model, follow the < a href = "https://rwightman.github.io/pytorch-image-models/feature_extraction/" > timm feature extraction examples< / a > , just change the name of the model you want to use.< / p >
< h2 id = "how-do-i-finetune-this-model" > How do I finetune this model?< / h2 >
< p > You can finetune any of the pre-trained models just by changing the classifier (the last layer).
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "n" > model< / span > < span class = "o" > =< / span > < span class = "n" > timm< / span > < span class = "o" > .< / span > < span class = "n" > create_model< / span > < span class = "p" > (< / span > < span class = "s1" > ' gluon_resnet101_v1b' < / span > < span class = "p" > ,< / span > < span class = "n" > pretrained< / span > < span class = "o" > =< / span > < span class = "kc" > True< / span > < span class = "p" > ,< / span > < span class = "n" > num_classes< / span > < span class = "o" > =< / span > < span class = "n" > NUM_FINETUNE_CLASSES< / span > < span class = "p" > )< / span >
< / code > < / pre > < / div >
To finetune on your own dataset, you have to write a training loop or adapt < a href = "https://github.com/rwightman/pytorch-image-models/blob/master/train.py" > timm's training
script< / a > to use your dataset.< / p >
< h2 id = "how-do-i-train-this-model" > How do I train this model?< / h2 >
< p > You can follow the < a href = "https://rwightman.github.io/pytorch-image-models/scripts/" > timm recipe scripts< / a > for training a new model afresh.< / p >
< h2 id = "citation" > Citation< / h2 >
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "nc" > @article< / span > < span class = "p" > {< / span > < span class = "nl" > DBLP:journals/corr/HeZRS15< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > author< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {Kaiming He and< / span >
< span class = "s" > Xiangyu Zhang and< / span >
< span class = "s" > Shaoqing Ren and< / span >
< span class = "s" > Jian Sun}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > title< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {Deep Residual Learning for Image Recognition}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > journal< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {CoRR}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > volume< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {abs/1512.03385}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > year< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {2015}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > url< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {http://arxiv.org/abs/1512.03385}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > archivePrefix< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {arXiv}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > eprint< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {1512.03385}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > timestamp< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {Wed, 17 Apr 2019 17:23:45 +0200}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > biburl< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {https://dblp.org/rec/journals/corr/HeZRS15.bib}< / span > < span class = "p" > ,< / span > < span class = "w" > < / span >
< span class = "w" > < / span > < span class = "na" > bibsource< / span > < span class = "w" > < / span > < span class = "p" > =< / span > < span class = "w" > < / span > < span class = "s" > {dblp computer science bibliography, https://dblp.org}< / span > < span class = "w" > < / span >
< span class = "p" > }< / span > < span class = "w" > < / span >
< / code > < / pre > < / div >
<!--
Type: model-index
Collections:
- Name: Gloun ResNet
Paper:
Title: Deep Residual Learning for Image Recognition
URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition
Models:
- Name: gluon_resnet101_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 10068547584
Parameters: 44550000
File Size: 178723172
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L89
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.3%
Top 5 Accuracy: 94.53%
- Name: gluon_resnet101_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 10376567296
Parameters: 44570000
File Size: 178802575
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L113
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.53%
Top 5 Accuracy: 94.59%
- Name: gluon_resnet101_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 10377018880
Parameters: 44570000
File Size: 178802755
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L138
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.4%
Top 5 Accuracy: 95.02%
- Name: gluon_resnet101_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 11805511680
Parameters: 44670000
File Size: 179221777
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L166
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.29%
Top 5 Accuracy: 95.16%
- Name: gluon_resnet152_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 14857660416
Parameters: 60190000
File Size: 241534001
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L97
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.69%
Top 5 Accuracy: 94.73%
- Name: gluon_resnet152_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 15165680128
Parameters: 60210000
File Size: 241613404
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L121
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.91%
Top 5 Accuracy: 94.85%
- Name: gluon_resnet152_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 15166131712
Parameters: 60210000
File Size: 241613584
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L147
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.48%
Top 5 Accuracy: 95.2%
- Name: gluon_resnet152_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 16594624512
Parameters: 60320000
File Size: 242032606
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L175
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.02%
Top 5 Accuracy: 95.42%
- Name: gluon_resnet18_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 2337073152
Parameters: 11690000
File Size: 46816736
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet18_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L65
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 70.84%
Top 5 Accuracy: 89.76%
- Name: gluon_resnet34_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 4718469120
Parameters: 21800000
File Size: 87295112
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet34_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L73
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.59%
Top 5 Accuracy: 92.0%
- Name: gluon_resnet50_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 5282531328
Parameters: 25560000
File Size: 102493763
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L81
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.58%
Top 5 Accuracy: 93.72%
- Name: gluon_resnet50_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 5590551040
Parameters: 25580000
File Size: 102573166
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L105
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.01%
Top 5 Accuracy: 93.99%
- Name: gluon_resnet50_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 5591002624
Parameters: 25580000
File Size: 102573346
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L129
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.06%
Top 5 Accuracy: 94.46%
- Name: gluon_resnet50_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 7019495424
Parameters: 25680000
File Size: 102992368
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L156
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.7%
Top 5 Accuracy: 94.25%
-->
< / article >
< / div >
< / div >
< / main >
< footer class = "md-footer" >
< nav class = "md-footer__inner md-grid" aria-label = "Footer" >
< a href = "../gloun-inception-v3/" class = "md-footer__link md-footer__link--prev" aria-label = "Previous: (Gluon) Inception v3" rel = "prev" >
< div class = "md-footer__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z" / > < / svg >
< / div >
< div class = "md-footer__title" >
< div class = "md-ellipsis" >
< span class = "md-footer__direction" >
Previous
< / span >
(Gluon) Inception v3
< / div >
< / div >
< / a >
< a href = "../gloun-resnext/" class = "md-footer__link md-footer__link--next" aria-label = "Next: (Gluon) ResNeXt" rel = "next" >
< div class = "md-footer__title" >
< div class = "md-ellipsis" >
< span class = "md-footer__direction" >
Next
< / span >
(Gluon) ResNeXt
< / div >
< / div >
< div class = "md-footer__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M4 11v2h12l-5.5 5.5 1.42 1.42L19.84 12l-7.92-7.92L10.5 5.5 16 11H4Z" / > < / svg >
< / div >
< / a >
< / nav >
< div class = "md-footer-meta md-typeset" >
< div class = "md-footer-meta__inner md-grid" >
< div class = "md-copyright" >
Made with
< a href = "https://squidfunk.github.io/mkdocs-material/" target = "_blank" rel = "noopener" >
Material for MkDocs
< / a >
< / div >
< / div >
< / div >
< / footer >
< / div >
< div class = "md-dialog" data-md-component = "dialog" >
< div class = "md-dialog__inner md-typeset" > < / div >
< / div >
< script id = "__config" type = "application/json" > { "base" : "../.." , "features" : [ ] , "search" : "../../assets/javascripts/workers/search.2a1c317c.min.js" , "translations" : { "clipboard.copied" : "Copied to clipboard" , "clipboard.copy" : "Copy to clipboard" , "search.config.lang" : "en" , "search.config.pipeline" : "trimmer, stopWordFilter" , "search.config.separator" : "[\\s\\-]+" , "search.placeholder" : "Search" , "search.result.more.one" : "1 more on this page" , "search.result.more.other" : "# more on this page" , "search.result.none" : "No matching documents" , "search.result.one" : "1 matching document" , "search.result.other" : "# matching documents" , "search.result.placeholder" : "Type to start searching" , "search.result.term.missing" : "Missing" , "select.version.title" : "Select version" } } < / script >
< script src = "../../assets/javascripts/bundle.6e54b5cd.min.js" > < / script >
< script src = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML" > < / script >
< script src = "https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js" > < / script >
< script src = "../../javascripts/tables.js" > < / script >
< / body >
< / html >