<!doctype html>
< html lang = "en" class = "no-js" >
< head >
< meta charset = "utf-8" >
< meta name = "viewport" content = "width=device-width,initial-scale=1" >
< meta name = "description" content = "Pretained Image Recognition Models" >
< link rel = "shortcut icon" href = "../assets/images/favicon.png" >
< meta name = "generator" content = "mkdocs-1.1.2, mkdocs-material-5.4.0" >
< title > Model Architectures - Pytorch Image Models< / title >
< link rel = "stylesheet" href = "../assets/stylesheets/main.fe0cca5b.min.css" >
< link href = "https://fonts.gstatic.com" rel = "preconnect" crossorigin >
< link rel = "stylesheet" href = "https://fonts.googleapis.com/css?family=Roboto:300,400,400i,700%7CRoboto+Mono&display=fallback" >
< style > body , input { font-family : "Roboto" , - apple-system , BlinkMacSystemFont , Helvetica , Arial , sans-serif } code , kbd , pre { font-family : "Roboto Mono" , SFMono-Regular , Consolas , Menlo , monospace } < / style >
< / head >
< body dir = "ltr" >
< input class = "md-toggle" data-md-toggle = "drawer" type = "checkbox" id = "__drawer" autocomplete = "off" >
< input class = "md-toggle" data-md-toggle = "search" type = "checkbox" id = "__search" autocomplete = "off" >
< label class = "md-overlay" for = "__drawer" > < / label >
< div data-md-component = "skip" >
< a href = "#model-architectures" class = "md-skip" >
Skip to content
< / a >
< / div >
< div data-md-component = "announce" >
< / div >
< header class = "md-header" data-md-component = "header" >
< nav class = "md-header-nav md-grid" aria-label = "Header" >
< a href = ".." title = "Pytorch Image Models" class = "md-header-nav__button md-logo" aria-label = "Pytorch Image Models" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 8a3 3 0 003-3 3 3 0 00-3-3 3 3 0 00-3 3 3 3 0 003 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54z" / > < / svg >
< / a >
< label class = "md-header-nav__button md-icon" for = "__drawer" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2z" / > < / svg >
< / label >
< div class = "md-header-nav__title" data-md-component = "header-title" >
< div class = "md-header-nav__ellipsis" >
< span class = "md-header-nav__topic md-ellipsis" >
Pytorch Image Models
< / span >
< span class = "md-header-nav__topic md-ellipsis" >
Model Architectures
< / span >
< / div >
< / div >
< label class = "md-header-nav__button md-icon" for = "__search" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M9.5 3A6.5 6.5 0 0116 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 019.5 16 6.5 6.5 0 013 9.5 6.5 6.5 0 019.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5z" / > < / svg >
< / label >
< div class = "md-search" data-md-component = "search" role = "dialog" >
< label class = "md-search__overlay" for = "__search" > < / label >
< div class = "md-search__inner" role = "search" >
< form class = "md-search__form" name = "search" >
< input type = "text" class = "md-search__input" name = "query" aria-label = "Search" placeholder = "Search" autocapitalize = "off" autocorrect = "off" autocomplete = "off" spellcheck = "false" data-md-component = "search-query" data-md-state = "active" >
< label class = "md-search__icon md-icon" for = "__search" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M9.5 3A6.5 6.5 0 0116 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 019.5 16 6.5 6.5 0 013 9.5 6.5 6.5 0 019.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5z" / > < / svg >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12z" / > < / svg >
< / label >
< button type = "reset" class = "md-search__icon md-icon" aria-label = "Clear" data-md-component = "search-reset" tabindex = "-1" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41z" / > < / svg >
< / button >
< / form >
< div class = "md-search__output" >
< div class = "md-search__scrollwrap" data-md-scrollfix >
< div class = "md-search-result" data-md-component = "search-result" >
< div class = "md-search-result__meta" >
Initializing search
< / div >
< ol class = "md-search-result__list" > < / ol >
< / div >
< / div >
< / div >
< / div >
< / div >
< div class = "md-header-nav__source" >
< a href = "https://github.com/rwightman/pytorch-image-models/" title = "Go to repository" class = "md-source" >
< div class = "md-source__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 448 512" > < path d = "M439.55 236.05L244 40.45a28.87 28.87 0 00-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 01-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 000 40.81l195.61 195.6a28.86 28.86 0 0040.8 0l194.69-194.69a28.86 28.86 0 000-40.81z" / > < / svg >
< / div >
< div class = "md-source__repository" >
rwightman/pytorch-image-models
< / div >
< / a >
< / div >
< / nav >
< / header >
< div class = "md-container" data-md-component = "container" >
< main class = "md-main" data-md-component = "main" >
< div class = "md-main__inner md-grid" >
< div class = "md-sidebar md-sidebar--primary" data-md-component = "navigation" >
< div class = "md-sidebar__scrollwrap" >
< div class = "md-sidebar__inner" >
< nav class = "md-nav md-nav--primary" aria-label = "Navigation" data-md-level = "0" >
< label class = "md-nav__title" for = "__drawer" >
< a href = ".." title = "Pytorch Image Models" class = "md-nav__button md-logo" aria-label = "Pytorch Image Models" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 8a3 3 0 003-3 3 3 0 00-3-3 3 3 0 00-3 3 3 3 0 003 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54z" / > < / svg >
< / a >
Pytorch Image Models
< / label >
< div class = "md-nav__source" >
< a href = "https://github.com/rwightman/pytorch-image-models/" title = "Go to repository" class = "md-source" >
< div class = "md-source__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 448 512" > < path d = "M439.55 236.05L244 40.45a28.87 28.87 0 00-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 01-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 000 40.81l195.61 195.6a28.86 28.86 0 0040.8 0l194.69-194.69a28.86 28.86 0 000-40.81z" / > < / svg >
< / div >
< div class = "md-source__repository" >
rwightman/pytorch-image-models
< / div >
< / a >
< / div >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = ".." title = "Getting Started" class = "md-nav__link" >
Getting Started
< / a >
< / li >
< li class = "md-nav__item md-nav__item--active" >
< input class = "md-nav__toggle md-toggle" data-md-toggle = "toc" type = "checkbox" id = "__toc" >
< label class = "md-nav__link md-nav__link--active" for = "__toc" >
Model Architectures
< span class = "md-nav__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M3 9h14V7H3v2m0 4h14v-2H3v2m0 4h14v-2H3v2m16 0h2v-2h-2v2m0-10v2h2V7h-2m0 6h2v-2h-2v2z" / > < / svg >
< / span >
< / label >
< a href = "./" title = "Model Architectures" class = "md-nav__link md-nav__link--active" >
Model Architectures
< / a >
< nav class = "md-nav md-nav--secondary" aria-label = "Table of contents" >
< label class = "md-nav__title" for = "__toc" >
< span class = "md-nav__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12z" / > < / svg >
< / span >
Table of contents
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "#cross-stage-partial-networks-cspnetpy" class = "md-nav__link" >
Cross-Stage Partial Networks [cspnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#densenet-densenetpy" class = "md-nav__link" >
DenseNet [densenet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#dla-dlapy" class = "md-nav__link" >
DLA [dla.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#dual-path-networks-dpnpy" class = "md-nav__link" >
Dual-Path Networks [dpn.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#hrnet-hrnetpy" class = "md-nav__link" >
HRNet [hrnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#inception-v3-inception_v3py" class = "md-nav__link" >
Inception-V3 [inception_v3.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#inception-v4-inception_v4py" class = "md-nav__link" >
Inception-V4 [inception_v4.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#inception-resnet-v2-inception_resnet_v2py" class = "md-nav__link" >
Inception-ResNet-V2 [inception_resnet_v2.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#nasnet-a-nasnetpy" class = "md-nav__link" >
NASNet-A [nasnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#pnasnet-5-pnasnetpy" class = "md-nav__link" >
PNasNet-5 [pnasnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#efficientnet-efficientnetpy" class = "md-nav__link" >
EfficientNet [efficientnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#mobilenet-v3-mobilenetv3py" class = "md-nav__link" >
MobileNet-V3 [mobilenetv3.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#regnet-regnetpy" class = "md-nav__link" >
RegNet [regnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#resnet-resnext-resnetpy" class = "md-nav__link" >
ResNet, ResNeXt [resnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#res2net-res2netpy" class = "md-nav__link" >
Res2Net [res2net.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#resnest-resnestpy" class = "md-nav__link" >
ResNeSt [resnest.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#rexnet-rexnetpy" class = "md-nav__link" >
ReXNet [rexnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#selective-kernel-networks-sknetpy" class = "md-nav__link" >
Selective-Kernel Networks [sknet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#selecsls-selecslspy" class = "md-nav__link" >
SelecSLS [selecsls.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#squeeze-and-excitation-networks-senetpy" class = "md-nav__link" >
Squeeze-and-Excitation Networks [senet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#tresnet-tresnetpy" class = "md-nav__link" >
TResNet [tresnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#vovnet-v2-and-v1-vovnetpy" class = "md-nav__link" >
VovNet V2 and V1 [vovnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#xception-xceptionpy" class = "md-nav__link" >
Xception [xception.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#xception-modified-aligned-gluon-gluon_xceptionpy" class = "md-nav__link" >
Xception (Modified Aligned, Gluon) [gluon_xception.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#xception-modified-aligned-tf-aligned_xceptionpy" class = "md-nav__link" >
Xception (Modified Aligned, TF) [aligned_xception.py]
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "../results/" title = "Results" class = "md-nav__link" >
Results
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../scripts/" title = "Scripts" class = "md-nav__link" >
Scripts
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../training_hparam_examples/" title = "Training Examples" class = "md-nav__link" >
Training Examples
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../feature_extraction/" title = "Feature Extraction" class = "md-nav__link" >
Feature Extraction
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../changes/" title = "Recent Changes" class = "md-nav__link" >
Recent Changes
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "../archived_changes/" title = "Archived Changes" class = "md-nav__link" >
Archived Changes
< / a >
< / li >
< / ul >
< / nav >
< / div >
< / div >
< / div >
< div class = "md-sidebar md-sidebar--secondary" data-md-component = "toc" >
< div class = "md-sidebar__scrollwrap" >
< div class = "md-sidebar__inner" >
< nav class = "md-nav md-nav--secondary" aria-label = "Table of contents" >
< label class = "md-nav__title" for = "__toc" >
< span class = "md-nav__icon md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12z" / > < / svg >
< / span >
Table of contents
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "#cross-stage-partial-networks-cspnetpy" class = "md-nav__link" >
Cross-Stage Partial Networks [cspnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#densenet-densenetpy" class = "md-nav__link" >
DenseNet [densenet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#dla-dlapy" class = "md-nav__link" >
DLA [dla.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#dual-path-networks-dpnpy" class = "md-nav__link" >
Dual-Path Networks [dpn.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#hrnet-hrnetpy" class = "md-nav__link" >
HRNet [hrnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#inception-v3-inception_v3py" class = "md-nav__link" >
Inception-V3 [inception_v3.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#inception-v4-inception_v4py" class = "md-nav__link" >
Inception-V4 [inception_v4.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#inception-resnet-v2-inception_resnet_v2py" class = "md-nav__link" >
Inception-ResNet-V2 [inception_resnet_v2.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#nasnet-a-nasnetpy" class = "md-nav__link" >
NASNet-A [nasnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#pnasnet-5-pnasnetpy" class = "md-nav__link" >
PNasNet-5 [pnasnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#efficientnet-efficientnetpy" class = "md-nav__link" >
EfficientNet [efficientnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#mobilenet-v3-mobilenetv3py" class = "md-nav__link" >
MobileNet-V3 [mobilenetv3.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#regnet-regnetpy" class = "md-nav__link" >
RegNet [regnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#resnet-resnext-resnetpy" class = "md-nav__link" >
ResNet, ResNeXt [resnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#res2net-res2netpy" class = "md-nav__link" >
Res2Net [res2net.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#resnest-resnestpy" class = "md-nav__link" >
ResNeSt [resnest.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#rexnet-rexnetpy" class = "md-nav__link" >
ReXNet [rexnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#selective-kernel-networks-sknetpy" class = "md-nav__link" >
Selective-Kernel Networks [sknet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#selecsls-selecslspy" class = "md-nav__link" >
SelecSLS [selecsls.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#squeeze-and-excitation-networks-senetpy" class = "md-nav__link" >
Squeeze-and-Excitation Networks [senet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#tresnet-tresnetpy" class = "md-nav__link" >
TResNet [tresnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#vovnet-v2-and-v1-vovnetpy" class = "md-nav__link" >
VovNet V2 and V1 [vovnet.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#xception-xceptionpy" class = "md-nav__link" >
Xception [xception.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#xception-modified-aligned-gluon-gluon_xceptionpy" class = "md-nav__link" >
Xception (Modified Aligned, Gluon) [gluon_xception.py]
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#xception-modified-aligned-tf-aligned_xceptionpy" class = "md-nav__link" >
Xception (Modified Aligned, TF) [aligned_xception.py]
< / a >
< / li >
< / ul >
< / nav >
< / div >
< / div >
< / div >
< div class = "md-content" >
< article class = "md-content__inner md-typeset" >
< a href = "https://github.com/rwightman/pytorch-image-models/edit/master/docs/models.md" title = "Edit this page" class = "md-content__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20.71 7.04c.39-.39.39-1.04 0-1.41l-2.34-2.34c-.37-.39-1.02-.39-1.41 0l-1.84 1.83 3.75 3.75M3 17.25V21h3.75L17.81 9.93l-3.75-3.75L3 17.25z" / > < / svg >
< / a >
< h1 id = "model-architectures" > Model Architectures< / h1 >
< p > The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below.< / p >
< p > Most included models have pretrained weights. The weights are either:
1. from their original sources
2. ported by myself from their original impl in a different framework (e.g. Tensorflow models)
3. trained from scratch using the included training script< / p >
< p > The validation results for the pretrained weights can be found < a href = "../results/" > here< / a > < / p >
< h2 id = "cross-stage-partial-networks-cspnetpy" > Cross-Stage Partial Networks [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py" > cspnet.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > CSPNet: A New Backbone that can Enhance Learning Capability of CNN< / code > - < a href = "https://arxiv.org/abs/1911.11929" > https://arxiv.org/abs/1911.11929< / a > < / li >
< li > Reference impl: < a href = "https://github.com/WongKinYiu/CrossStagePartialNetworks" > https://github.com/WongKinYiu/CrossStagePartialNetworks< / a > < / li >
< / ul >
< h2 id = "densenet-densenetpy" > DenseNet [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py" > densenet.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Densely Connected Convolutional Networks< / code > - < a href = "https://arxiv.org/abs/1608.06993" > https://arxiv.org/abs/1608.06993< / a > < / li >
< li > Code: < a href = "https://github.com/pytorch/vision/tree/master/torchvision/models" > https://github.com/pytorch/vision/tree/master/torchvision/models< / a > < / li >
< / ul >
< h2 id = "dla-dlapy" > DLA [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py" > dla.py< / a > ]< / h2 >
< ul >
< li > Paper: < a href = "https://arxiv.org/abs/1707.06484" > https://arxiv.org/abs/1707.06484< / a > < / li >
< li > Code: < a href = "https://github.com/ucbdrive/dla" > https://github.com/ucbdrive/dla< / a > < / li >
< / ul >
< h2 id = "dual-path-networks-dpnpy" > Dual-Path Networks [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py" > dpn.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Dual Path Networks< / code > - < a href = "https://arxiv.org/abs/1707.01629" > https://arxiv.org/abs/1707.01629< / a > < / li >
< li > My PyTorch code: < a href = "https://github.com/rwightman/pytorch-dpn-pretrained" > https://github.com/rwightman/pytorch-dpn-pretrained< / a > < / li >
< li > Reference code: < a href = "https://github.com/cypw/DPNs" > https://github.com/cypw/DPNs< / a > < / li >
< / ul >
< h2 id = "hrnet-hrnetpy" > HRNet [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py" > hrnet.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Deep High-Resolution Representation Learning for Visual Recognition< / code > - < a href = "https://arxiv.org/abs/1908.07919" > https://arxiv.org/abs/1908.07919< / a > < / li >
< li > Code: < a href = "https://github.com/HRNet/HRNet-Image-Classification" > https://github.com/HRNet/HRNet-Image-Classification< / a > < / li >
< / ul >
< h2 id = "inception-v3-inception_v3py" > Inception-V3 [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py" > inception_v3.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Rethinking the Inception Architecture for Computer Vision< / code > - < a href = "https://arxiv.org/abs/1512.00567" > https://arxiv.org/abs/1512.00567< / a > < / li >
< li > Code: < a href = "https://github.com/pytorch/vision/tree/master/torchvision/models" > https://github.com/pytorch/vision/tree/master/torchvision/models< / a > < / li >
< / ul >
< h2 id = "inception-v4-inception_v4py" > Inception-V4 [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py" > inception_v4.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning< / code > - < a href = "https://arxiv.org/abs/1602.07261" > https://arxiv.org/abs/1602.07261< / a > < / li >
< li > Code: < a href = "https://github.com/Cadene/pretrained-models.pytorch" > https://github.com/Cadene/pretrained-models.pytorch< / a > < / li >
< li > Reference code: < a href = "https://github.com/tensorflow/models/tree/master/research/slim/nets" > https://github.com/tensorflow/models/tree/master/research/slim/nets< / a > < / li >
< / ul >
< h2 id = "inception-resnet-v2-inception_resnet_v2py" > Inception-ResNet-V2 [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py" > inception_resnet_v2.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning< / code > - < a href = "https://arxiv.org/abs/1602.07261" > https://arxiv.org/abs/1602.07261< / a > < / li >
< li > Code: < a href = "https://github.com/Cadene/pretrained-models.pytorch" > https://github.com/Cadene/pretrained-models.pytorch< / a > < / li >
< li > Reference code: < a href = "https://github.com/tensorflow/models/tree/master/research/slim/nets" > https://github.com/tensorflow/models/tree/master/research/slim/nets< / a > < / li >
< / ul >
< h2 id = "nasnet-a-nasnetpy" > NASNet-A [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py" > nasnet.py< / a > ]< / h2 >
< ul >
< li > Papers: < code > Learning Transferable Architectures for Scalable Image Recognition< / code > - < a href = "https://arxiv.org/abs/1707.07012" > https://arxiv.org/abs/1707.07012< / a > < / li >
< li > Code: < a href = "https://github.com/Cadene/pretrained-models.pytorch" > https://github.com/Cadene/pretrained-models.pytorch< / a > < / li >
< li > Reference code: < a href = "https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet" > https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet< / a > < / li >
< / ul >
< h2 id = "pnasnet-5-pnasnetpy" > PNasNet-5 [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py" > pnasnet.py< / a > ]< / h2 >
< ul >
< li > Papers: < code > Progressive Neural Architecture Search< / code > - < a href = "https://arxiv.org/abs/1712.00559" > https://arxiv.org/abs/1712.00559< / a > < / li >
< li > Code: < a href = "https://github.com/Cadene/pretrained-models.pytorch" > https://github.com/Cadene/pretrained-models.pytorch< / a > < / li >
< li > Reference code: < a href = "https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet" > https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet< / a > < / li >
< / ul >
< h2 id = "efficientnet-efficientnetpy" > EfficientNet [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py" > efficientnet.py< / a > ]< / h2 >
< ul >
< li > Papers< ul >
< li > EfficientNet NoisyStudent (B0-B7, L2) - < a href = "https://arxiv.org/abs/1911.04252" > https://arxiv.org/abs/1911.04252< / a > < / li >
< li > EfficientNet AdvProp (B0-B8) - < a href = "https://arxiv.org/abs/1911.09665" > https://arxiv.org/abs/1911.09665< / a > < / li >
< li > EfficientNet (B0-B7) - < a href = "https://arxiv.org/abs/1905.11946" > https://arxiv.org/abs/1905.11946< / a > < / li >
< li > EfficientNet-EdgeTPU (S, M, L) - < a href = "https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html" > https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html< / a > < / li >
< li > MixNet - < a href = "https://arxiv.org/abs/1907.09595" > https://arxiv.org/abs/1907.09595< / a > < / li >
< li > MNASNet B1, A1 (Squeeze-Excite), and Small - < a href = "https://arxiv.org/abs/1807.11626" > https://arxiv.org/abs/1807.11626< / a > < / li >
< li > MobileNet-V2 - < a href = "https://arxiv.org/abs/1801.04381" > https://arxiv.org/abs/1801.04381< / a > < / li >
< li > FBNet-C - < a href = "https://arxiv.org/abs/1812.03443" > https://arxiv.org/abs/1812.03443< / a > < / li >
< li > Single-Path NAS - < a href = "https://arxiv.org/abs/1904.02877" > https://arxiv.org/abs/1904.02877< / a > < / li >
< / ul >
< / li >
< li > My PyTorch code: < a href = "https://github.com/rwightman/gen-efficientnet-pytorch" > https://github.com/rwightman/gen-efficientnet-pytorch< / a > < / li >
< li > Reference code: < a href = "https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet" > https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet< / a > < / li >
< / ul >
< h2 id = "mobilenet-v3-mobilenetv3py" > MobileNet-V3 [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py" > mobilenetv3.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Searching for MobileNetV3< / code > - < a href = "https://arxiv.org/abs/1905.02244" > https://arxiv.org/abs/1905.02244< / a > < / li >
< li > Reference code: < a href = "https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet" > https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet< / a > < / li >
< / ul >
< h2 id = "regnet-regnetpy" > RegNet [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py" > regnet.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Designing Network Design Spaces< / code > - < a href = "https://arxiv.org/abs/2003.13678" > https://arxiv.org/abs/2003.13678< / a > < / li >
< li > Reference code: < a href = "https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py" > https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py< / a > < / li >
< / ul >
< h2 id = "resnet-resnext-resnetpy" > ResNet, ResNeXt [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py" > resnet.py< / a > ]< / h2 >
< ul >
< li > ResNet (V1B)< ul >
< li > Paper: < code > Deep Residual Learning for Image Recognition< / code > - < a href = "https://arxiv.org/abs/1512.03385" > https://arxiv.org/abs/1512.03385< / a > < / li >
< li > Code: < a href = "https://github.com/pytorch/vision/tree/master/torchvision/models" > https://github.com/pytorch/vision/tree/master/torchvision/models< / a > < / li >
< / ul >
< / li >
< li > ResNeXt< ul >
< li > Paper: < code > Aggregated Residual Transformations for Deep Neural Networks< / code > - < a href = "https://arxiv.org/abs/1611.05431" > https://arxiv.org/abs/1611.05431< / a > < / li >
< li > Code: < a href = "https://github.com/pytorch/vision/tree/master/torchvision/models" > https://github.com/pytorch/vision/tree/master/torchvision/models< / a > < / li >
< / ul >
< / li >
< li > 'Bag of Tricks' / Gluon C, D, E, S ResNet variants< ul >
< li > Paper: < code > Bag of Tricks for Image Classification with CNNs< / code > - < a href = "https://arxiv.org/abs/1812.01187" > https://arxiv.org/abs/1812.01187< / a > < / li >
< li > Code: < a href = "https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py" > https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py< / a > < / li >
< / ul >
< / li >
< li > Instagram pretrained / ImageNet tuned ResNeXt101< ul >
< li > Paper: < code > Exploring the Limits of Weakly Supervised Pretraining< / code > - < a href = "https://arxiv.org/abs/1805.00932" > https://arxiv.org/abs/1805.00932< / a > < / li >
< li > Weights: < a href = "https://pytorch.org/hub/facebookresearch_WSL-Images_resnext" > https://pytorch.org/hub/facebookresearch_WSL-Images_resnext< / a > (NOTE: CC BY-NC 4.0 License, NOT commercial friendly)< / li >
< / ul >
< / li >
< li > Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts< ul >
< li > Paper: < code > Billion-scale semi-supervised learning for image classification< / code > - < a href = "https://arxiv.org/abs/1905.00546" > https://arxiv.org/abs/1905.00546< / a > < / li >
< li > Weights: < a href = "https://github.com/facebookresearch/semi-supervised-ImageNet1K-models" > https://github.com/facebookresearch/semi-supervised-ImageNet1K-models< / a > (NOTE: CC BY-NC 4.0 License, NOT commercial friendly)< / li >
< / ul >
< / li >
< li > Squeeze-and-Excitation Networks< ul >
< li > Paper: < code > Squeeze-and-Excitation Networks< / code > - < a href = "https://arxiv.org/abs/1709.01507" > https://arxiv.org/abs/1709.01507< / a > < / li >
< li > Code: Added to ResNet base, this is current version going forward, old < code > senet.py< / code > is being deprecated< / li >
< / ul >
< / li >
< li > ECAResNet (ECA-Net)< ul >
< li > Paper: < code > ECA-Net: Efficient Channel Attention for Deep CNN< / code > - < a href = "https://arxiv.org/abs/1910.03151v4" > https://arxiv.org/abs/1910.03151v4< / a > < / li >
< li > Code: Added to ResNet base, ECA module contributed by @VRandme, reference < a href = "https://github.com/BangguWu/ECANet" > https://github.com/BangguWu/ECANet< / a > < / li >
< / ul >
< / li >
< / ul >
< h2 id = "res2net-res2netpy" > Res2Net [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py" > res2net.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Res2Net: A New Multi-scale Backbone Architecture< / code > - < a href = "https://arxiv.org/abs/1904.01169" > https://arxiv.org/abs/1904.01169< / a > < / li >
< li > Code: < a href = "https://github.com/gasvn/Res2Net" > https://github.com/gasvn/Res2Net< / a > < / li >
< / ul >
< h2 id = "resnest-resnestpy" > ResNeSt [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py" > resnest.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > ResNeSt: Split-Attention Networks< / code > - < a href = "https://arxiv.org/abs/2004.08955" > https://arxiv.org/abs/2004.08955< / a > < / li >
< li > Code: < a href = "https://github.com/zhanghang1989/ResNeSt" > https://github.com/zhanghang1989/ResNeSt< / a > < / li >
< / ul >
< h2 id = "rexnet-rexnetpy" > ReXNet [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py" > rexnet.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > ReXNet: Diminishing Representational Bottleneck on CNN< / code > - < a href = "https://arxiv.org/abs/2007.00992" > https://arxiv.org/abs/2007.00992< / a > < / li >
< li > Code: < a href = "https://github.com/clovaai/rexnet" > https://github.com/clovaai/rexnet< / a > < / li >
< / ul >
< h2 id = "selective-kernel-networks-sknetpy" > Selective-Kernel Networks [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py" > sknet.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Selective-Kernel Networks< / code > - < a href = "https://arxiv.org/abs/1903.06586" > https://arxiv.org/abs/1903.06586< / a > < / li >
< li > Code: < a href = "https://github.com/implus/SKNet" > https://github.com/implus/SKNet< / a > , < a href = "https://github.com/clovaai/assembled-cnn" > https://github.com/clovaai/assembled-cnn< / a > < / li >
< / ul >
< h2 id = "selecsls-selecslspy" > SelecSLS [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py" > selecsls.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera< / code > - < a href = "https://arxiv.org/abs/1907.00837" > https://arxiv.org/abs/1907.00837< / a > < / li >
< li > Code: < a href = "https://github.com/mehtadushy/SelecSLS-Pytorch" > https://github.com/mehtadushy/SelecSLS-Pytorch< / a > < / li >
< / ul >
< h2 id = "squeeze-and-excitation-networks-senetpy" > Squeeze-and-Excitation Networks [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py" > senet.py< / a > ]< / h2 >
< p > NOTE: I am deprecating this version of the networks, the new ones are part of < code > resnet.py< / code >
* Paper: < code > Squeeze-and-Excitation Networks< / code > - < a href = "https://arxiv.org/abs/1709.01507" > https://arxiv.org/abs/1709.01507< / a >
* Code: < a href = "https://github.com/Cadene/pretrained-models.pytorch" > https://github.com/Cadene/pretrained-models.pytorch< / a > < / p >
< h2 id = "tresnet-tresnetpy" > TResNet [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py" > tresnet.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > TResNet: High Performance GPU-Dedicated Architecture< / code > - < a href = "https://arxiv.org/abs/2003.13630" > https://arxiv.org/abs/2003.13630< / a > < / li >
< li > Code: < a href = "https://github.com/mrT23/TResNet" > https://github.com/mrT23/TResNet< / a > < / li >
< / ul >
< h2 id = "vovnet-v2-and-v1-vovnetpy" > VovNet V2 and V1 [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py" > vovnet.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > CenterMask : Real-Time Anchor-Free Instance Segmentation< / code > - < a href = "https://arxiv.org/abs/1911.06667" > https://arxiv.org/abs/1911.06667< / a > < / li >
< li > Reference code: < a href = "https://github.com/youngwanLEE/vovnet-detectron2" > https://github.com/youngwanLEE/vovnet-detectron2< / a > < / li >
< / ul >
< h2 id = "xception-xceptionpy" > Xception [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py" > xception.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Xception: Deep Learning with Depthwise Separable Convolutions< / code > - < a href = "https://arxiv.org/abs/1610.02357" > https://arxiv.org/abs/1610.02357< / a > < / li >
< li > Code: < a href = "https://github.com/Cadene/pretrained-models.pytorch" > https://github.com/Cadene/pretrained-models.pytorch< / a > < / li >
< / ul >
< h2 id = "xception-modified-aligned-gluon-gluon_xceptionpy" > Xception (Modified Aligned, Gluon) [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py" > gluon_xception.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation< / code > - < a href = "https://arxiv.org/abs/1802.02611" > https://arxiv.org/abs/1802.02611< / a > < / li >
< li > Reference code: < a href = "https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo" > https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo< / a > , < a href = "https://github.com/jfzhang95/pytorch-deeplab-xception/" > https://github.com/jfzhang95/pytorch-deeplab-xception/< / a > < / li >
< / ul >
< h2 id = "xception-modified-aligned-tf-aligned_xceptionpy" > Xception (Modified Aligned, TF) [< a href = "https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py" > aligned_xception.py< / a > ]< / h2 >
< ul >
< li > Paper: < code > Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation< / code > - < a href = "https://arxiv.org/abs/1802.02611" > https://arxiv.org/abs/1802.02611< / a > < / li >
< li > Reference code: < a href = "https://github.com/tensorflow/models/tree/master/research/deeplab" > https://github.com/tensorflow/models/tree/master/research/deeplab< / a > < / li >
< / ul >
< / article >
< / div >
< / div >
< / main >
< footer class = "md-footer" >
< div class = "md-footer-nav" >
< nav class = "md-footer-nav__inner md-grid" aria-label = "Footer" >
< a href = ".." title = "Getting Started" class = "md-footer-nav__link md-footer-nav__link--prev" rel = "prev" >
< div class = "md-footer-nav__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12z" / > < / svg >
< / div >
< div class = "md-footer-nav__title" >
< div class = "md-ellipsis" >
< span class = "md-footer-nav__direction" >
Previous
< / span >
Getting Started
< / div >
< / div >
< / a >
< a href = "../results/" title = "Results" class = "md-footer-nav__link md-footer-nav__link--next" rel = "next" >
< div class = "md-footer-nav__title" >
< div class = "md-ellipsis" >
< span class = "md-footer-nav__direction" >
Next
< / span >
Results
< / div >
< / div >
< div class = "md-footer-nav__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M4 11v2h12l-5.5 5.5 1.42 1.42L19.84 12l-7.92-7.92L10.5 5.5 16 11H4z" / > < / svg >
< / div >
< / a >
< / nav >
< / div >
< div class = "md-footer-meta md-typeset" >
< div class = "md-footer-meta__inner md-grid" >
< div class = "md-footer-copyright" >
Made with
< a href = "https://squidfunk.github.io/mkdocs-material/" target = "_blank" rel = "noopener" >
Material for MkDocs
< / a >
< / div >
< / div >
< / div >
< / footer >
< / div >
< script src = "../assets/javascripts/vendor.d710d30a.min.js" > < / script >
< script src = "../assets/javascripts/bundle.b39636ac.min.js" > < / script > < script id = "__lang" type = "application/json" > { "clipboard.copy" : "Copy to clipboard" , "clipboard.copied" : "Copied to clipboard" , "search.config.lang" : "en" , "search.config.pipeline" : "trimmer, stopWordFilter" , "search.config.separator" : "[\\s\\-]+" , "search.result.placeholder" : "Type to start searching" , "search.result.none" : "No matching documents" , "search.result.one" : "1 matching document" , "search.result.other" : "# matching documents" } < / script >
< script >
app = initialize({
base: "..",
features: [],
search: Object.assign({
worker: "../assets/javascripts/worker/search.a68abb33.min.js"
}, typeof search !== "undefined" & & search)
})
< / script >
< script src = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML" > < / script >
< script src = "https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js" > < / script >
< script src = "../javascripts/tables.js" > < / script >
< / body >
< / html >