<!doctype html>
< html lang = "en" class = "no-js" >
< head >
< meta charset = "utf-8" >
< meta name = "viewport" content = "width=device-width,initial-scale=1" >
< meta name = "description" content = "Pretained Image Recognition Models" >
< link rel = "icon" href = "../../assets/images/favicon.png" >
< meta name = "generator" content = "mkdocs-1.1.2, mkdocs-material-7.0.6" >
< title > (Tensorflow) EfficientNet - Pytorch Image Models< / title >
< link rel = "stylesheet" href = "../../assets/stylesheets/main.2c0c5eaf.min.css" >
< link rel = "stylesheet" href = "../../assets/stylesheets/palette.7fa14f5b.min.css" >
< link rel = "preconnect" href = "https://fonts.gstatic.com" crossorigin >
< link rel = "stylesheet" href = "https://fonts.googleapis.com/css?family=Roboto:300,400,400i,700%7CRoboto+Mono&display=fallback" >
< style > : root { --md-text-font-family : "Roboto" ; --md-code-font-family : "Roboto Mono" } < / style >
< / head >
< body dir = "ltr" data-md-color-scheme = "" data-md-color-primary = "none" data-md-color-accent = "none" >
< input class = "md-toggle" data-md-toggle = "drawer" type = "checkbox" id = "__drawer" autocomplete = "off" >
< input class = "md-toggle" data-md-toggle = "search" type = "checkbox" id = "__search" autocomplete = "off" >
< label class = "md-overlay" for = "__drawer" > < / label >
< div data-md-component = "skip" >
< a href = "#tensorflow-efficientnet" class = "md-skip" >
Skip to content
< / a >
< / div >
< div data-md-component = "announce" >
< / div >
< header class = "md-header" data-md-component = "header" >
< nav class = "md-header__inner md-grid" aria-label = "Header" >
< a href = "../.." title = "Pytorch Image Models" class = "md-header__button md-logo" aria-label = "Pytorch Image Models" data-md-component = "logo" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 8a3 3 0 0 0 3-3 3 3 0 0 0-3-3 3 3 0 0 0-3 3 3 3 0 0 0 3 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54z" / > < / svg >
< / a >
< label class = "md-header__button md-icon" for = "__drawer" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2z" / > < / svg >
< / label >
< div class = "md-header__title" data-md-component = "header-title" >
< div class = "md-header__ellipsis" >
< div class = "md-header__topic" >
< span class = "md-ellipsis" >
Pytorch Image Models
< / span >
< / div >
< div class = "md-header__topic" data-md-component = "header-topic" >
< span class = "md-ellipsis" >
(Tensorflow) EfficientNet
< / span >
< / div >
< / div >
< / div >
< div class = "md-header__options" >
< / div >
< label class = "md-header__button md-icon" for = "__search" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5z" / > < / svg >
< / label >
< div class = "md-search" data-md-component = "search" role = "dialog" >
< label class = "md-search__overlay" for = "__search" > < / label >
< div class = "md-search__inner" role = "search" >
< form class = "md-search__form" name = "search" >
< input type = "text" class = "md-search__input" name = "query" aria-label = "Search" placeholder = "Search" autocapitalize = "off" autocorrect = "off" autocomplete = "off" spellcheck = "false" data-md-component = "search-query" data-md-state = "active" required >
< label class = "md-search__icon md-icon" for = "__search" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5z" / > < / svg >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12z" / > < / svg >
< / label >
< button type = "reset" class = "md-search__icon md-icon" aria-label = "Clear" tabindex = "-1" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41z" / > < / svg >
< / button >
< / form >
< div class = "md-search__output" >
< div class = "md-search__scrollwrap" data-md-scrollfix >
< div class = "md-search-result" data-md-component = "search-result" >
< div class = "md-search-result__meta" >
Initializing search
< / div >
< ol class = "md-search-result__list" > < / ol >
< / div >
< / div >
< / div >
< / div >
< / div >
< div class = "md-header__source" >
< a href = "https://github.com/rwightman/pytorch-image-models/" title = "Go to repository" class = "md-source" data-md-component = "source" >
< div class = "md-source__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 448 512" > < path d = "M439.55 236.05L244 40.45a28.87 28.87 0 0 0-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 0 1-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 0 0 0 40.81l195.61 195.6a28.86 28.86 0 0 0 40.8 0l194.69-194.69a28.86 28.86 0 0 0 0-40.81z" / > < / svg >
< / div >
< div class = "md-source__repository" >
rwightman/pytorch-image-models
< / div >
< / a >
< / div >
< / nav >
< / header >
< div class = "md-container" data-md-component = "container" >
< main class = "md-main" data-md-component = "main" >
< div class = "md-main__inner md-grid" >
< div class = "md-sidebar md-sidebar--primary" data-md-component = "sidebar" data-md-type = "navigation" >
< div class = "md-sidebar__scrollwrap" >
< div class = "md-sidebar__inner" >
< nav class = "md-nav md-nav--primary" aria-label = "Navigation" data-md-level = "0" >
< label class = "md-nav__title" for = "__drawer" >
< a href = "../.." title = "Pytorch Image Models" class = "md-nav__button md-logo" aria-label = "Pytorch Image Models" data-md-component = "logo" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 8a3 3 0 0 0 3-3 3 3 0 0 0-3-3 3 3 0 0 0-3 3 3 3 0 0 0 3 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54z" / > < / svg >
< / a >
Pytorch Image Models
< / label >
< div class = "md-nav__source" >
< a href = "https://github.com/rwightman/pytorch-image-models/" title = "Go to repository" class = "md-source" data-md-component = "source" >
< div class = "md-source__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 448 512" > < path d = "M439.55 236.05L244 40.45a28.87 28.87 0 0 0-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 0 1-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 0 0 0 40.81l195.61 195.6a28.86 28.86 0 0 0 40.8 0l194.69-194.69a28.86 28.86 0 0 0 0-40.81z" / > < / svg >
< / div >
< div class = "md-source__repository" >
rwightman/pytorch-image-models
< / div >
< / a >
< / div >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "../.." class = "md-nav__link" >
Getting Started
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../" class = "md-nav__link" >
Model Summaries
< / a >
< / li >
< li class = "md-nav__item md-nav__item--active md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle" data-md-toggle = "__nav_3" type = "checkbox" id = "__nav_3" checked >
< label class = "md-nav__link" for = "__nav_3" >
Model Pages
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" aria-label = "Model Pages" data-md-level = "1" >
< label class = "md-nav__title" for = "__nav_3" >
< span class = "md-nav__icon md-icon" > < / span >
Model Pages
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "../adversarial-inception-v3/" class = "md-nav__link" >
Adversarial Inception v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../advprop/" class = "md-nav__link" >
AdvProp (EfficientNet)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../big-transfer/" class = "md-nav__link" >
Big Transfer (BiT)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../csp-darknet/" class = "md-nav__link" >
CSP-DarkNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../csp-resnet/" class = "md-nav__link" >
CSP-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../csp-resnext/" class = "md-nav__link" >
CSP-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../densenet/" class = "md-nav__link" >
DenseNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../dla/" class = "md-nav__link" >
Deep Layer Aggregation
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../dpn/" class = "md-nav__link" >
Dual Path Network (DPN)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ecaresnet/" class = "md-nav__link" >
ECA-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../efficientnet-pruned/" class = "md-nav__link" >
EfficientNet (Knapsack Pruned)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../efficientnet/" class = "md-nav__link" >
EfficientNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ensemble-adversarial/" class = "md-nav__link" >
Ensemble Adversarial Inception ResNet v2
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ese-vovnet/" class = "md-nav__link" >
ESE-VoVNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../fbnet/" class = "md-nav__link" >
FBNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-inception-v3/" class = "md-nav__link" >
(Gluon) Inception v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-resnet/" class = "md-nav__link" >
(Gluon) ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-resnext/" class = "md-nav__link" >
(Gluon) ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-senet/" class = "md-nav__link" >
(Gluon) SENet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-seresnext/" class = "md-nav__link" >
(Gluon) SE-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../gloun-xception/" class = "md-nav__link" >
(Gluon) Xception
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../hrnet/" class = "md-nav__link" >
HRNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ig-resnext/" class = "md-nav__link" >
Instagram ResNeXt WSL
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../inception-resnet-v2/" class = "md-nav__link" >
Inception ResNet v2
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../inception-v3/" class = "md-nav__link" >
Inception v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../inception-v4/" class = "md-nav__link" >
Inception v4
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../legacy-se-resnet/" class = "md-nav__link" >
(Legacy) SE-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../legacy-se-resnext/" class = "md-nav__link" >
(Legacy) SE-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../legacy-senet/" class = "md-nav__link" >
(Legacy) SENet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../mixnet/" class = "md-nav__link" >
MixNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../mnasnet/" class = "md-nav__link" >
MnasNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../mobilenet-v2/" class = "md-nav__link" >
MobileNet v2
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../mobilenet-v3/" class = "md-nav__link" >
MobileNet v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../nasnet/" class = "md-nav__link" >
NASNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../noisy-student/" class = "md-nav__link" >
Noisy Student (EfficientNet)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../pnasnet/" class = "md-nav__link" >
PNASNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../regnetx/" class = "md-nav__link" >
RegNetX
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../regnety/" class = "md-nav__link" >
RegNetY
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../res2net/" class = "md-nav__link" >
Res2Net
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../res2next/" class = "md-nav__link" >
Res2NeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../resnest/" class = "md-nav__link" >
ResNeSt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../resnet-d/" class = "md-nav__link" >
ResNet-D
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../resnet/" class = "md-nav__link" >
ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../resnext/" class = "md-nav__link" >
ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../rexnet/" class = "md-nav__link" >
RexNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../se-resnet/" class = "md-nav__link" >
SE-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../selecsls/" class = "md-nav__link" >
SelecSLS
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../seresnext/" class = "md-nav__link" >
SE-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../skresnet/" class = "md-nav__link" >
SK-ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../skresnext/" class = "md-nav__link" >
SK-ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../spnasnet/" class = "md-nav__link" >
SPNASNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ssl-resnet/" class = "md-nav__link" >
SSL ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../ssl-resnext/" class = "md-nav__link" >
SSL ResNeXT
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../swsl-resnet/" class = "md-nav__link" >
SWSL ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../swsl-resnext/" class = "md-nav__link" >
SWSL ResNeXt
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-efficientnet-condconv/" class = "md-nav__link" >
(Tensorflow) EfficientNet CondConv
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-efficientnet-lite/" class = "md-nav__link" >
(Tensorflow) EfficientNet Lite
< / a >
< / li >
< li class = "md-nav__item md-nav__item--active" >
< input class = "md-nav__toggle md-toggle" data-md-toggle = "toc" type = "checkbox" id = "__toc" >
< label class = "md-nav__link md-nav__link--active" for = "__toc" >
(Tensorflow) EfficientNet
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< a href = "./" class = "md-nav__link md-nav__link--active" >
(Tensorflow) EfficientNet
< / a >
< nav class = "md-nav md-nav--secondary" aria-label = "Table of contents" >
< label class = "md-nav__title" for = "__toc" >
< span class = "md-nav__icon md-icon" > < / span >
Table of contents
< / label >
< ul class = "md-nav__list" data-md-component = "toc" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "#how-do-i-use-this-model-on-an-image" class = "md-nav__link" >
How do I use this model on an image?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#how-do-i-finetune-this-model" class = "md-nav__link" >
How do I finetune this model?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#how-do-i-train-this-model" class = "md-nav__link" >
How do I train this model?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#citation" class = "md-nav__link" >
Citation
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-inception-v3/" class = "md-nav__link" >
(Tensorflow) Inception v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-mixnet/" class = "md-nav__link" >
(Tensorflow) MixNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tf-mobilenet-v3/" class = "md-nav__link" >
(Tensorflow) MobileNet v3
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../tresnet/" class = "md-nav__link" >
TResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../vision-transformer/" class = "md-nav__link" >
Vision Transformer (ViT)
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../wide-resnet/" class = "md-nav__link" >
Wide ResNet
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../xception/" class = "md-nav__link" >
Xception
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "../../results/" class = "md-nav__link" >
Results
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../scripts/" class = "md-nav__link" >
Scripts
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../training_hparam_examples/" class = "md-nav__link" >
Training Examples
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../feature_extraction/" class = "md-nav__link" >
Feature Extraction
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../changes/" class = "md-nav__link" >
Recent Changes
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../../archived_changes/" class = "md-nav__link" >
Archived Changes
< / a >
< / li >
< / ul >
< / nav >
< / div >
< / div >
< / div >
< div class = "md-sidebar md-sidebar--secondary" data-md-component = "sidebar" data-md-type = "toc" >
< div class = "md-sidebar__scrollwrap" >
< div class = "md-sidebar__inner" >
< nav class = "md-nav md-nav--secondary" aria-label = "Table of contents" >
< label class = "md-nav__title" for = "__toc" >
< span class = "md-nav__icon md-icon" > < / span >
Table of contents
< / label >
< ul class = "md-nav__list" data-md-component = "toc" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "#how-do-i-use-this-model-on-an-image" class = "md-nav__link" >
How do I use this model on an image?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#how-do-i-finetune-this-model" class = "md-nav__link" >
How do I finetune this model?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#how-do-i-train-this-model" class = "md-nav__link" >
How do I train this model?
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#citation" class = "md-nav__link" >
Citation
< / a >
< / li >
< / ul >
< / nav >
< / div >
< / div >
< / div >
< div class = "md-content" data-md-component = "content" >
< article class = "md-content__inner md-typeset" >
< a href = "https://github.com/rwightman/pytorch-image-models/edit/master/docs/models/tf-efficientnet.md" title = "Edit this page" class = "md-content__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20.71 7.04c.39-.39.39-1.04 0-1.41l-2.34-2.34c-.37-.39-1.02-.39-1.41 0l-1.84 1.83 3.75 3.75M3 17.25V21h3.75L17.81 9.93l-3.75-3.75L3 17.25z" / > < / svg >
< / a >
< h1 id = "tensorflow-efficientnet" > (Tensorflow) EfficientNet< / h1 >
< p > < strong > EfficientNet< / strong > is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a < em > compound coefficient< / em > . Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use < span > < span class = "MathJax_Preview" > 2^N< / span > < script type = "math/tex" > 2 ^ N < / script > < / span > times more computational resources, then we can simply increase the network depth by < span > < span class = "MathJax_Preview" > \alpha ^ N< / span > < script type = "math/tex" > \ alpha ^ N < / script > < / span > , width by < span > < span class = "MathJax_Preview" > \beta ^ N< / span > < script type = "math/tex" > \ beta ^ N < / script > < / span > , and image size by < span > < span class = "MathJax_Preview" > \gamma ^ N< / span > < script type = "math/tex" > \ gamma ^ N < / script > < / span > , where < span > < span class = "MathJax_Preview" > \alpha, \beta, \gamma< / span > < script type = "math/tex" > \ alpha , \ beta , \ gamma < / script > < / span > are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient < span > < span class = "MathJax_Preview" > \phi< / span > < script type = "math/tex" > \ phi < / script > < / span > to uniformly scales network width, depth, and resolution in a principled way.< / p >
< p > The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.< / p >
< p > The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of < a href = "https://paperswithcode.com/method/mobilenetv2" > MobileNetV2< / a > , in addition to < a href = "https://paperswithcode.com/method/squeeze-and-excitation-block" > squeeze-and-excitation blocks< / a > .< / p >
< p > The weights from this model were ported from < a href = "https://github.com/tensorflow/tpu" > Tensorflow/TPU< / a > .< / p >
< h2 id = "how-do-i-use-this-model-on-an-image" > How do I use this model on an image?< / h2 >
< p > To load a pretrained model:< / p >
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "kn" > import< / span > < span class = "nn" > timm< / span >
< span class = "n" > model< / span > < span class = "o" > =< / span > < span class = "n" > timm< / span > < span class = "o" > .< / span > < span class = "n" > create_model< / span > < span class = "p" > (< / span > < span class = "s1" > ' tf_efficientnet_b0' < / span > < span class = "p" > ,< / span > < span class = "n" > pretrained< / span > < span class = "o" > =< / span > < span class = "kc" > True< / span > < span class = "p" > )< / span >
< span class = "n" > model< / span > < span class = "o" > .< / span > < span class = "n" > eval< / span > < span class = "p" > ()< / span >
< / code > < / pre > < / div >
< p > To load and preprocess the image:
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "kn" > import< / span > < span class = "nn" > urllib< / span >
< span class = "kn" > from< / span > < span class = "nn" > PIL< / span > < span class = "kn" > import< / span > < span class = "n" > Image< / span >
< span class = "kn" > from< / span > < span class = "nn" > timm.data< / span > < span class = "kn" > import< / span > < span class = "n" > resolve_data_config< / span >
< span class = "kn" > from< / span > < span class = "nn" > timm.data.transforms_factory< / span > < span class = "kn" > import< / span > < span class = "n" > create_transform< / span >
< span class = "n" > config< / span > < span class = "o" > =< / span > < span class = "n" > resolve_data_config< / span > < span class = "p" > ({},< / span > < span class = "n" > model< / span > < span class = "o" > =< / span > < span class = "n" > model< / span > < span class = "p" > )< / span >
< span class = "n" > transform< / span > < span class = "o" > =< / span > < span class = "n" > create_transform< / span > < span class = "p" > (< / span > < span class = "o" > **< / span > < span class = "n" > config< / span > < span class = "p" > )< / span >
< span class = "n" > url< / span > < span class = "p" > ,< / span > < span class = "n" > filename< / span > < span class = "o" > =< / span > < span class = "p" > (< / span > < span class = "s2" > " https://github.com/pytorch/hub/raw/master/images/dog.jpg" < / span > < span class = "p" > ,< / span > < span class = "s2" > " dog.jpg" < / span > < span class = "p" > )< / span >
< span class = "n" > urllib< / span > < span class = "o" > .< / span > < span class = "n" > request< / span > < span class = "o" > .< / span > < span class = "n" > urlretrieve< / span > < span class = "p" > (< / span > < span class = "n" > url< / span > < span class = "p" > ,< / span > < span class = "n" > filename< / span > < span class = "p" > )< / span >
< span class = "n" > img< / span > < span class = "o" > =< / span > < span class = "n" > Image< / span > < span class = "o" > .< / span > < span class = "n" > open< / span > < span class = "p" > (< / span > < span class = "n" > filename< / span > < span class = "p" > )< / span > < span class = "o" > .< / span > < span class = "n" > convert< / span > < span class = "p" > (< / span > < span class = "s1" > ' RGB' < / span > < span class = "p" > )< / span >
< span class = "n" > tensor< / span > < span class = "o" > =< / span > < span class = "n" > transform< / span > < span class = "p" > (< / span > < span class = "n" > img< / span > < span class = "p" > )< / span > < span class = "o" > .< / span > < span class = "n" > unsqueeze< / span > < span class = "p" > (< / span > < span class = "mi" > 0< / span > < span class = "p" > )< / span > < span class = "c1" > # transform and add batch dimension< / span >
< / code > < / pre > < / div > < / p >
< p > To get the model predictions:
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "kn" > import< / span > < span class = "nn" > torch< / span >
< span class = "k" > with< / span > < span class = "n" > torch< / span > < span class = "o" > .< / span > < span class = "n" > no_grad< / span > < span class = "p" > ():< / span >
< span class = "n" > out< / span > < span class = "o" > =< / span > < span class = "n" > model< / span > < span class = "p" > (< / span > < span class = "n" > tensor< / span > < span class = "p" > )< / span >
< span class = "n" > probabilities< / span > < span class = "o" > =< / span > < span class = "n" > torch< / span > < span class = "o" > .< / span > < span class = "n" > nn< / span > < span class = "o" > .< / span > < span class = "n" > functional< / span > < span class = "o" > .< / span > < span class = "n" > softmax< / span > < span class = "p" > (< / span > < span class = "n" > out< / span > < span class = "p" > [< / span > < span class = "mi" > 0< / span > < span class = "p" > ],< / span > < span class = "n" > dim< / span > < span class = "o" > =< / span > < span class = "mi" > 0< / span > < span class = "p" > )< / span >
< span class = "nb" > print< / span > < span class = "p" > (< / span > < span class = "n" > probabilities< / span > < span class = "o" > .< / span > < span class = "n" > shape< / span > < span class = "p" > )< / span >
< span class = "c1" > # prints: torch.Size([1000])< / span >
< / code > < / pre > < / div > < / p >
< p > To get the top-5 predictions class names:
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "c1" > # Get imagenet class mappings< / span >
< span class = "n" > url< / span > < span class = "p" > ,< / span > < span class = "n" > filename< / span > < span class = "o" > =< / span > < span class = "p" > (< / span > < span class = "s2" > " https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" < / span > < span class = "p" > ,< / span > < span class = "s2" > " imagenet_classes.txt" < / span > < span class = "p" > )< / span >
< span class = "n" > urllib< / span > < span class = "o" > .< / span > < span class = "n" > request< / span > < span class = "o" > .< / span > < span class = "n" > urlretrieve< / span > < span class = "p" > (< / span > < span class = "n" > url< / span > < span class = "p" > ,< / span > < span class = "n" > filename< / span > < span class = "p" > )< / span >
< span class = "k" > with< / span > < span class = "nb" > open< / span > < span class = "p" > (< / span > < span class = "s2" > " imagenet_classes.txt" < / span > < span class = "p" > ,< / span > < span class = "s2" > " r" < / span > < span class = "p" > )< / span > < span class = "k" > as< / span > < span class = "n" > f< / span > < span class = "p" > :< / span >
< span class = "n" > categories< / span > < span class = "o" > =< / span > < span class = "p" > [< / span > < span class = "n" > s< / span > < span class = "o" > .< / span > < span class = "n" > strip< / span > < span class = "p" > ()< / span > < span class = "k" > for< / span > < span class = "n" > s< / span > < span class = "ow" > in< / span > < span class = "n" > f< / span > < span class = "o" > .< / span > < span class = "n" > readlines< / span > < span class = "p" > ()]< / span >
< span class = "c1" > # Print top categories per image< / span >
< span class = "n" > top5_prob< / span > < span class = "p" > ,< / span > < span class = "n" > top5_catid< / span > < span class = "o" > =< / span > < span class = "n" > torch< / span > < span class = "o" > .< / span > < span class = "n" > topk< / span > < span class = "p" > (< / span > < span class = "n" > probabilities< / span > < span class = "p" > ,< / span > < span class = "mi" > 5< / span > < span class = "p" > )< / span >
< span class = "k" > for< / span > < span class = "n" > i< / span > < span class = "ow" > in< / span > < span class = "nb" > range< / span > < span class = "p" > (< / span > < span class = "n" > top5_prob< / span > < span class = "o" > .< / span > < span class = "n" > size< / span > < span class = "p" > (< / span > < span class = "mi" > 0< / span > < span class = "p" > )):< / span >
< span class = "nb" > print< / span > < span class = "p" > (< / span > < span class = "n" > categories< / span > < span class = "p" > [< / span > < span class = "n" > top5_catid< / span > < span class = "p" > [< / span > < span class = "n" > i< / span > < span class = "p" > ]],< / span > < span class = "n" > top5_prob< / span > < span class = "p" > [< / span > < span class = "n" > i< / span > < span class = "p" > ]< / span > < span class = "o" > .< / span > < span class = "n" > item< / span > < span class = "p" > ())< / span >
< span class = "c1" > # prints class names and probabilities like:< / span >
< span class = "c1" > # [(' Samoyed' , 0.6425196528434753), (' Pomeranian' , 0.04062102362513542), (' keeshond' , 0.03186424449086189), (' white wolf' , 0.01739676296710968), (' Eskimo dog' , 0.011717947199940681)]< / span >
< / code > < / pre > < / div > < / p >
< p > Replace the model name with the variant you want to use, e.g. < code > tf_efficientnet_b0< / code > . You can find the IDs in the model summaries at the top of this page.< / p >
< p > To extract image features with this model, follow the < a href = "https://rwightman.github.io/pytorch-image-models/feature_extraction/" > timm feature extraction examples< / a > , just change the name of the model you want to use.< / p >
< h2 id = "how-do-i-finetune-this-model" > How do I finetune this model?< / h2 >
< p > You can finetune any of the pre-trained models just by changing the classifier (the last layer).
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "n" > model< / span > < span class = "o" > =< / span > < span class = "n" > timm< / span > < span class = "o" > .< / span > < span class = "n" > create_model< / span > < span class = "p" > (< / span > < span class = "s1" > ' tf_efficientnet_b0' < / span > < span class = "p" > ,< / span > < span class = "n" > pretrained< / span > < span class = "o" > =< / span > < span class = "kc" > True< / span > < span class = "p" > ,< / span > < span class = "n" > num_classes< / span > < span class = "o" > =< / span > < span class = "n" > NUM_FINETUNE_CLASSES< / span > < span class = "p" > )< / span >
< / code > < / pre > < / div >
To finetune on your own dataset, you have to write a training loop or adapt < a href = "https://github.com/rwightman/pytorch-image-models/blob/master/train.py" > timm's training
script< / a > to use your dataset.< / p >
< h2 id = "how-do-i-train-this-model" > How do I train this model?< / h2 >
< p > You can follow the < a href = "https://rwightman.github.io/pytorch-image-models/scripts/" > timm recipe scripts< / a > for training a new model afresh.< / p >
< h2 id = "citation" > Citation< / h2 >
< div class = "highlight" > < pre > < span > < / span > < code > < span class = "nc" > @misc< / span > < span class = "p" > {< / span > < span class = "nl" > tan2020efficientnet< / span > < span class = "p" > ,< / span >
< span class = "na" > title< / span > < span class = "p" > =< / span > < span class = "s" > {EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}< / span > < span class = "p" > ,< / span >
< span class = "na" > author< / span > < span class = "p" > =< / span > < span class = "s" > {Mingxing Tan and Quoc V. Le}< / span > < span class = "p" > ,< / span >
< span class = "na" > year< / span > < span class = "p" > =< / span > < span class = "s" > {2020}< / span > < span class = "p" > ,< / span >
< span class = "na" > eprint< / span > < span class = "p" > =< / span > < span class = "s" > {1905.11946}< / span > < span class = "p" > ,< / span >
< span class = "na" > archivePrefix< / span > < span class = "p" > =< / span > < span class = "s" > {arXiv}< / span > < span class = "p" > ,< / span >
< span class = "na" > primaryClass< / span > < span class = "p" > =< / span > < span class = "s" > {cs.LG}< / span >
< span class = "p" > }< / span >
< / code > < / pre > < / div >
<!--
Type: model-index
Collections:
- Name: TF EfficientNet
Paper:
Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks'
URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for
Models:
- Name: tf_efficientnet_b0
In Collection: TF EfficientNet
Metadata:
FLOPs: 488688572
Parameters: 5290000
File Size: 21383997
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
Training Resources: TPUv3 Cloud TPU
ID: tf_efficientnet_b0
LR: 0.256
Epochs: 350
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 2048
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1241
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.85%
Top 5 Accuracy: 93.23%
- Name: tf_efficientnet_b1
In Collection: TF EfficientNet
Metadata:
FLOPs: 883633200
Parameters: 7790000
File Size: 31512534
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b1
LR: 0.256
Epochs: 350
Crop Pct: '0.882'
Momentum: 0.9
Batch Size: 2048
Image Size: '240'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1251
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.84%
Top 5 Accuracy: 94.2%
- Name: tf_efficientnet_b2
In Collection: TF EfficientNet
Metadata:
FLOPs: 1234321170
Parameters: 9110000
File Size: 36797929
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b2
LR: 0.256
Epochs: 350
Crop Pct: '0.89'
Momentum: 0.9
Batch Size: 2048
Image Size: '260'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1261
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.07%
Top 5 Accuracy: 94.9%
- Name: tf_efficientnet_b3
In Collection: TF EfficientNet
Metadata:
FLOPs: 2275247568
Parameters: 12230000
File Size: 49381362
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b3
LR: 0.256
Epochs: 350
Crop Pct: '0.904'
Momentum: 0.9
Batch Size: 2048
Image Size: '300'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1271
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.65%
Top 5 Accuracy: 95.72%
- Name: tf_efficientnet_b4
In Collection: TF EfficientNet
Metadata:
FLOPs: 5749638672
Parameters: 19340000
File Size: 77989689
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
Training Resources: TPUv3 Cloud TPU
ID: tf_efficientnet_b4
LR: 0.256
Epochs: 350
Crop Pct: '0.922'
Momentum: 0.9
Batch Size: 2048
Image Size: '380'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1281
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.03%
Top 5 Accuracy: 96.3%
- Name: tf_efficientnet_b5
In Collection: TF EfficientNet
Metadata:
FLOPs: 13176501888
Parameters: 30390000
File Size: 122403150
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b5
LR: 0.256
Epochs: 350
Crop Pct: '0.934'
Momentum: 0.9
Batch Size: 2048
Image Size: '456'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1291
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.81%
Top 5 Accuracy: 96.75%
- Name: tf_efficientnet_b6
In Collection: TF EfficientNet
Metadata:
FLOPs: 24180518488
Parameters: 43040000
File Size: 173232007
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b6
LR: 0.256
Epochs: 350
Crop Pct: '0.942'
Momentum: 0.9
Batch Size: 2048
Image Size: '528'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1301
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.11%
Top 5 Accuracy: 96.89%
- Name: tf_efficientnet_b7
In Collection: TF EfficientNet
Metadata:
FLOPs: 48205304880
Parameters: 66349999
File Size: 266850607
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b7
LR: 0.256
Epochs: 350
Crop Pct: '0.949'
Momentum: 0.9
Batch Size: 2048
Image Size: '600'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1312
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.93%
Top 5 Accuracy: 97.2%
- Name: tf_efficientnet_b8
In Collection: TF EfficientNet
Metadata:
FLOPs: 80962956270
Parameters: 87410000
File Size: 351379853
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b8
LR: 0.256
Epochs: 350
Crop Pct: '0.954'
Momentum: 0.9
Batch Size: 2048
Image Size: '672'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1323
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 85.35%
Top 5 Accuracy: 97.39%
- Name: tf_efficientnet_el
In Collection: TF EfficientNet
Metadata:
FLOPs: 9356616096
Parameters: 10590000
File Size: 42800271
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_el
Crop Pct: '0.904'
Image Size: '300'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1551
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.45%
Top 5 Accuracy: 95.17%
- Name: tf_efficientnet_em
In Collection: TF EfficientNet
Metadata:
FLOPs: 3636607040
Parameters: 6900000
File Size: 27933644
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_em
Crop Pct: '0.882'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1541
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.71%
Top 5 Accuracy: 94.33%
- Name: tf_efficientnet_es
In Collection: TF EfficientNet
Metadata:
FLOPs: 2057577472
Parameters: 5440000
File Size: 22008479
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_es
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1531
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.28%
Top 5 Accuracy: 93.6%
- Name: tf_efficientnet_l2_ns_475
In Collection: TF EfficientNet
Metadata:
FLOPs: 217795669644
Parameters: 480310000
File Size: 1925950424
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: TPUv3 Cloud TPU
ID: tf_efficientnet_l2_ns_475
LR: 0.128
Epochs: 350
Dropout: 0.5
Crop Pct: '0.936'
Momentum: 0.9
Batch Size: 2048
Image Size: '475'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1509
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 88.24%
Top 5 Accuracy: 98.55%
-->
< / article >
< / div >
< / div >
< / main >
< footer class = "md-footer" >
< nav class = "md-footer__inner md-grid" aria-label = "Footer" >
< a href = "../tf-efficientnet-lite/" class = "md-footer__link md-footer__link--prev" rel = "prev" >
< div class = "md-footer__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12z" / > < / svg >
< / div >
< div class = "md-footer__title" >
< div class = "md-ellipsis" >
< span class = "md-footer__direction" >
Previous
< / span >
(Tensorflow) EfficientNet Lite
< / div >
< / div >
< / a >
< a href = "../tf-inception-v3/" class = "md-footer__link md-footer__link--next" rel = "next" >
< div class = "md-footer__title" >
< div class = "md-ellipsis" >
< span class = "md-footer__direction" >
Next
< / span >
(Tensorflow) Inception v3
< / div >
< / div >
< div class = "md-footer__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M4 11v2h12l-5.5 5.5 1.42 1.42L19.84 12l-7.92-7.92L10.5 5.5 16 11H4z" / > < / svg >
< / div >
< / a >
< / nav >
< div class = "md-footer-meta md-typeset" >
< div class = "md-footer-meta__inner md-grid" >
< div class = "md-footer-copyright" >
Made with
< a href = "https://squidfunk.github.io/mkdocs-material/" target = "_blank" rel = "noopener" >
Material for MkDocs
< / a >
< / div >
< / div >
< / div >
< / footer >
< / div >
< div class = "md-dialog" data-md-component = "dialog" >
< div class = "md-dialog__inner md-typeset" > < / div >
< / div >
< script id = "__config" type = "application/json" > { "base" : "../.." , "features" : [ ] , "translations" : { "clipboard.copy" : "Copy to clipboard" , "clipboard.copied" : "Copied to clipboard" , "search.config.lang" : "en" , "search.config.pipeline" : "trimmer, stopWordFilter" , "search.config.separator" : "[\\s\\-]+" , "search.placeholder" : "Search" , "search.result.placeholder" : "Type to start searching" , "search.result.none" : "No matching documents" , "search.result.one" : "1 matching document" , "search.result.other" : "# matching documents" , "search.result.more.one" : "1 more on this page" , "search.result.more.other" : "# more on this page" , "search.result.term.missing" : "Missing" } , "search" : "../../assets/javascripts/workers/search.fb4a9340.min.js" , "version" : null } < / script >
< script src = "../../assets/javascripts/bundle.a1c7c35e.min.js" > < / script >
< script src = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML" > < / script >
< script src = "https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js" > < / script >
< script src = "../../javascripts/tables.js" > < / script >
< / body >
< / html >