From 5a16c533ff7c0b4053345835f6cda80c34b2ed7e Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Wed, 18 Mar 2020 14:43:50 -0700 Subject: [PATCH] Add better resnext50_32x4d weights trained by andravin --- README.md | 9 ++++++++- timm/models/resnet.py | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 56112e5c..6f487581 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ ### March 18, 2020 * Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) +* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) ### Feb 29, 2020 * New MobileNet-V3 Large weights trained from stratch with this code to 75.77% top-1 @@ -183,11 +184,11 @@ I've leveraged the training scripts in this repository to train a few of the mod | mixnet_xl | 80.478 (19.522) | 94.932 (5.068) | 11.90M | bicubic | 224 | | efficientnet_b2 | 80.402 (19.598) | 95.076 (4.924) | 9.11M | bicubic | 260 | | skresnext50d_32x4d | 80.156 (19.844) | 94.642 (5.358) | 27.5M | bicubic | 224 | +| resnext50_32x4d | 79.762 (20.238) | 94.600 (5.400) | 25M | bicubic | 224 | | resnext50d_32x4d | 79.674 (20.326) | 94.868 (5.132) | 25.1M | bicubic | 224 | | resnet50 | 79.038 (20.962) | 94.390 (5.610) | 25.6M | bicubic | 224 | | mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33M | bicubic | 224 | | efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.79M | bicubic | 240 | -| resnext50_32x4d | 78.512 (21.488) | 94.042 (5.958) | 25M | bicubic | 224 | | efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44M | bicubic | 224 | | seresnext26t_32x4d | 77.998 (22.002) | 93.708 (6.292) | 16.8M | bicubic | 224 | | seresnext26tn_32x4d | 77.986 (22.014) | 93.746 (6.254) | 16.8M | bicubic | 224 | @@ -388,6 +389,12 @@ Trained by [Andrew Lavin](https://github.com/andravin) with 8 V100 cards. Model `./distributed_train.sh 2 /imagenet/ --model mobilenetv3_large_100 -b 512 --sched step --epochs 600 --decay-epochs 2.4 --decay-rate .973 --opt rmsproptf --opt-eps .001 -j 7 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 --lr-noise 0.42 0.9` +### ResNeXt-50 32x4d w/ RandAugment - 79.762 top-1, 94.60 top-5 +These params will also work well for SE-ResNeXt-50 and SK-ResNeXt-50 and likely 101. I used them for the SK-ResNeXt-50 32x4d that I trained with 2 GPU using a slightly higher LR per effective batch size (lr=0.18, b=192 per GPU). The cmd line below are tuned for 8 GPU training. + + +`./distributed_train.sh 8 /imagenet --model resnext50_32x4d --lr 0.6 --warmup-epochs 5 --epochs 240 --weight-decay 1e-4 --sched cosine --reprob 0.4 --recount 3 --remode pixel --aa rand-m7-mstd0.5-inc1 -b 192 -j 6 --amp --dist-bn reduce` + **TODO dig up some more** diff --git a/timm/models/resnet.py b/timm/models/resnet.py index 456dc129..0013cbe0 100644 --- a/timm/models/resnet.py +++ b/timm/models/resnet.py @@ -53,7 +53,7 @@ default_cfgs = { 'wide_resnet50_2': _cfg(url='https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth'), 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'), 'resnext50_32x4d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d-068914d1.pth', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth', interpolation='bicubic'), 'resnext50d_32x4d': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth',