Fix some attributions, add copyrights to some file docstrings

pull/175/head
Ross Wightman 4 years ago
parent a69c0e04f0
commit 6c17d57a2c

@ -9,7 +9,7 @@ For any hope of decent results, the checkpoints should be from the same or child
EMA (exponential moving average) of the model weights or performing SWA (stochastic EMA (exponential moving average) of the model weights or performing SWA (stochastic
weight averaging), but post-training. weight averaging), but post-training.
Hacked together by Ross Wightman (https://github.com/rwightman) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
""" """
import torch import torch
import argparse import argparse

@ -5,7 +5,7 @@ Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, e
and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256 and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256
calculation for model zoo compatibility. calculation for model zoo compatibility.
Hacked together by Ross Wightman (https://github.com/rwightman) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
""" """
import torch import torch
import argparse import argparse

@ -3,7 +3,7 @@
An example inference script that outputs top-k class ids for images in a folder into a csv. An example inference script that outputs top-k class ids for images in a folder into a csv.
Hacked together by Ross Wightman (https://github.com/rwightman) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
""" """
import os import os
import time import time

@ -15,7 +15,7 @@ Papers:
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import random import random
import math import math

@ -1,3 +1,7 @@
""" Quick n Simple Image Folder, Tarfile based DataSet
Hacked together by / Copyright 2020 Ross Wightman
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function

@ -1,3 +1,11 @@
""" Loader Factory, Fast Collate, CUDA Prefetcher
Prefetcher and Fast Collate inspired by NVIDIA APEX example at
https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.utils.data import torch.utils.data
import numpy as np import numpy as np

@ -1,3 +1,8 @@
""" Mixup
Paper: `mixup: Beyond Empirical Risk Minimization` - https://arxiv.org/abs/1710.09412
Hacked together by / Copyright 2020 Ross Wightman
"""
import numpy as np import numpy as np
import torch import torch

@ -1,3 +1,10 @@
""" Random Erasing (Cutout)
Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
Copyright Zhun Zhong & Liang Zheng
Hacked together by / Copyright 2020 Ross Wightman
"""
import random import random
import math import math
import torch import torch

@ -1,3 +1,9 @@
""" Real labels evaluator for ImageNet
Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159
Based on Numpy example at https://github.com/google-research/reassessed-imagenet
Hacked together by / Copyright 2020 Ross Wightman
"""
import os import os
import json import json
import numpy as np import numpy as np

@ -1,3 +1,12 @@
""" Tensorflow Preprocessing Adapter
Allows use of Tensorflow preprocessing pipeline in PyTorch Transform
Copyright of original Tensorflow code below.
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");

@ -1,5 +1,7 @@
""" Transforms Factory """ Transforms Factory
Factory methods for building image transforms for use with TIMM (PyTorch Image Models) Factory methods for building image transforms for use with TIMM (PyTorch Image Models)
Hacked together by / Copyright 2020 Ross Wightman
""" """
import math import math

@ -12,7 +12,7 @@ class JsdCrossEntropy(nn.Module):
From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty -
https://arxiv.org/abs/1912.02781 https://arxiv.org/abs/1912.02781
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
def __init__(self, num_splits=3, alpha=12, smoothing=0.1): def __init__(self, num_splits=3, alpha=12, smoothing=0.1):
super().__init__() super().__init__()

@ -2,8 +2,9 @@
Based on original MXNet implementation https://github.com/cypw/DPNs with Based on original MXNet implementation https://github.com/cypw/DPNs with
many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs.
This implementation is compatible with the pretrained weights This implementation is compatible with the pretrained weights from cypw's MXNet implementation.
from cypw's MXNet implementation.
Hacked together by / Copyright 2020 Ross Wightman
""" """
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division

@ -22,7 +22,7 @@ An implementation of EfficienNet that covers variety of related models with effi
* And likely more... * And likely more...
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
import torch.nn as nn import torch.nn as nn

@ -1,3 +1,8 @@
""" EfficientNet, MobileNetV3, etc Blocks
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch import torch
import torch.nn as nn import torch.nn as nn
from torch.nn import functional as F from torch.nn import functional as F

@ -1,3 +1,11 @@
""" EfficientNet, MobileNetV3, etc Builder
Assembles EfficieNet and related network feature blocks from string definitions.
Handles stride, dilation calculations, and selects feature extraction points.
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging import logging
import math import math
import re import re

@ -3,7 +3,10 @@
A collection of classes, functions, modules to help extract features from models A collection of classes, functions, modules to help extract features from models
and provide a common interface for describing them. and provide a common interface for describing them.
Hacked together by Ross Wightman The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter
https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py
Hacked together by / Copyright 2020 Ross Wightman
""" """
from collections import OrderedDict, defaultdict from collections import OrderedDict, defaultdict
from copy import deepcopy from copy import deepcopy

@ -4,7 +4,7 @@ This is a port of the Gluon Xception code and weights, itself ported from a PyTo
Gluon model: (https://gluon-cv.mxnet.io/_modules/gluoncv/model_zoo/xception.html) Gluon model: (https://gluon-cv.mxnet.io/_modules/gluoncv/model_zoo/xception.html)
Original PyTorch DeepLab impl: https://github.com/jfzhang95/pytorch-deeplab-xception Original PyTorch DeepLab impl: https://github.com/jfzhang95/pytorch-deeplab-xception
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
from collections import OrderedDict from collections import OrderedDict

@ -1,3 +1,8 @@
""" Inception-V3
Originally from torchvision Inception3 model
Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE
"""
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F

@ -3,7 +3,7 @@
A collection of activations fn and modules with a common interface so that they can A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used. easily be swapped. All have an `inplace` arg even if not used.
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch

@ -7,7 +7,7 @@ All jit scripted activations are lacking in-place variations on purpose, scripte
currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted
versions if they contain in-place ops. versions if they contain in-place ops.
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch

@ -6,7 +6,7 @@ easily be swapped. All have an `inplace` arg even if not used.
These activations are not compatible with jit scripting or ONNX export of the model, please use either These activations are not compatible with jit scripting or ONNX export of the model, please use either
the JIT or basic versions of the activations. the JIT or basic versions of the activations.
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch

@ -7,7 +7,7 @@ Adaptive pooling with the ability to select the type of pooling from:
Both a functional and a nn.Module version of the pooling is provided. Both a functional and a nn.Module version of the pooling is provided.
Author: Ross Wightman (rwightman) Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
import torch.nn as nn import torch.nn as nn

@ -5,7 +5,7 @@ Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.o
WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on
some tasks, especially fine-grained it seems. I may end up removing this impl. some tasks, especially fine-grained it seems. I may end up removing this impl.
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch

@ -3,7 +3,7 @@
Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference
(https://arxiv.org/abs/1904.04971) (https://arxiv.org/abs/1904.04971)
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import math import math

@ -1,6 +1,6 @@
""" Conv2d w/ Same Padding """ Conv2d w/ Same Padding
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
import torch.nn as nn import torch.nn as nn

@ -1,6 +1,6 @@
""" Conv2d + BN + Act """ Conv2d + BN + Act
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
from torch import nn as nn from torch import nn as nn

@ -1,3 +1,6 @@
""" Activation Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from .activations import * from .activations import *
from .activations_jit import * from .activations_jit import *
from .activations_me import * from .activations_me import *

@ -1,6 +1,6 @@
""" Select AttentionFactory Method """ Select AttentionFactory Method
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
from .se import SEModule, EffectiveSEModule from .se import SEModule, EffectiveSEModule

@ -1,6 +1,6 @@
""" Create Conv2d Factory Method """ Create Conv2d Factory Method
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
from .mixed_conv2d import MixedConv2d from .mixed_conv2d import MixedConv2d

@ -1,3 +1,11 @@
""" NormAct (Normalizaiton + Activation Layer) Factory
Create norm + act combo modules that attempt to be backwards compatible with separate norm + act
isntances in models. Where these are used it will be possible to swap separate BN + act layers with
combined modules like IABN or EvoNorms.
Hacked together by / Copyright 2020 Ross Wightman
"""
import types import types
import functools import functools

@ -12,7 +12,7 @@ DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
import torch.nn as nn import torch.nn as nn

@ -6,7 +6,7 @@ in terms of memory usage and throughput (roughly 5x mem, 1/2 - 1/3x speed).
Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts. Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts.
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch

@ -1,6 +1,6 @@
""" Layer/Module Helpers """ Layer/Module Helpers
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
from itertools import repeat from itertools import repeat
from torch._six import container_abcs from torch._six import container_abcs

@ -1,8 +1,9 @@
import math """ Median Pool
import torch Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _quadruple from .helpers import tup_pair, tup_quadruple
class MedianPool2d(nn.Module): class MedianPool2d(nn.Module):
@ -16,9 +17,9 @@ class MedianPool2d(nn.Module):
""" """
def __init__(self, kernel_size=3, stride=1, padding=0, same=False): def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2d, self).__init__() super(MedianPool2d, self).__init__()
self.k = _pair(kernel_size) self.k = tup_pair(kernel_size)
self.stride = _pair(stride) self.stride = tup_pair(stride)
self.padding = _quadruple(padding) # convert to l, r, t, b self.padding = tup_quadruple(padding) # convert to l, r, t, b
self.same = same self.same = same
def _padding(self, x): def _padding(self, x):

@ -2,7 +2,7 @@
Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595)
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch

@ -1,6 +1,6 @@
""" Padding Helpers """ Padding Helpers
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import math import math
from typing import List, Tuple from typing import List, Tuple

@ -1,6 +1,6 @@
""" AvgPool2d w/ Same Padding """ AvgPool2d w/ Same Padding
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
import torch.nn as nn import torch.nn as nn

@ -2,7 +2,7 @@
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
from torch import nn as nn from torch import nn as nn

@ -1,3 +1,10 @@
""" Depthwise Separable Conv Modules
Basic DWS convs. Other variations of DWS exist with batch norm or activations between the
DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception.
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn from torch import nn as nn
from .create_conv2d import create_conv2d from .create_conv2d import create_conv2d

@ -9,7 +9,7 @@ This allows easily removing the auxiliary BN layers after training to efficientl
achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2,
'Disentangled Learning via An Auxiliary BN' 'Disentangled Learning via An Auxiliary BN'
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
import torch.nn as nn import torch.nn as nn

@ -1,6 +1,6 @@
""" Test Time Pooling (Average-Max Pool) """ Test Time Pooling (Average-Max Pool)
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import logging import logging

@ -5,7 +5,7 @@ A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl.
Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
import torch.nn as nn import torch.nn as nn

@ -1,3 +1,7 @@
""" Model Registry
Hacked together by / Copyright 2020 Ross Wightman
"""
import sys import sys
import re import re
import fnmatch import fnmatch

@ -11,6 +11,7 @@ Weights from original impl have been modified
* removed training specific dict entries from checkpoints and keep model state_dict only * removed training specific dict entries from checkpoints and keep model state_dict only
* remap names to match the ones here * remap names to match the ones here
Hacked together by / Copyright 2020 Ross Wightman
""" """
import numpy as np import numpy as np
import torch.nn as nn import torch.nn as nn

@ -4,6 +4,7 @@ This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-C
additional dropout and dynamic global avg/max pool. additional dropout and dynamic global avg/max pool.
ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman
Copyright 2020 Ross Wightman
""" """
import math import math
import copy import copy

@ -7,6 +7,7 @@ Adapted from original impl at https://github.com/clovaai/rexnet
Copyright (c) 2020-present NAVER Corp. MIT license Copyright (c) 2020-present NAVER Corp. MIT license
Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman
Copyright 2020 Ross Wightman
""" """
import torch.nn as nn import torch.nn as nn

@ -6,7 +6,7 @@ This was inspired by reading 'Compounding the Performance Improvements...' (http
and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer
to the original paper with some modifications of my own to better balance param count vs accuracy. to the original paper with some modifications of my own to better balance param count vs accuracy.
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
import math import math

@ -8,7 +8,7 @@ Looked at https://github.com/youngwanLEE/vovnet-detectron2 &
https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py
for some reference, rewrote most of the code. for some reference, rewrote most of the code.
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
from typing import List from typing import List

@ -3,7 +3,7 @@
This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at
https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md
Hacked together by Ross Wightman Hacked together by / Copyright 2020 Ross Wightman
""" """
from collections import OrderedDict from collections import OrderedDict

@ -1,6 +1,8 @@
""" Lookahead Optimizer Wrapper. """ Lookahead Optimizer Wrapper.
Implementation modified from: https://github.com/alphadl/lookahead.pytorch Implementation modified from: https://github.com/alphadl/lookahead.pytorch
Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610
Hacked together by / Copyright 2020 Ross Wightman
""" """
import torch import torch
from torch.optim.optimizer import Optimizer from torch.optim.optimizer import Optimizer

@ -1,3 +1,6 @@
""" Optimizer Factory w/ Custom Weight Decay
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch import torch
from torch import optim as optim from torch import optim as optim
from timm.optim import Nadam, RMSpropTF, AdamW, RAdam, NovoGrad, NvNovoGrad, Lookahead from timm.optim import Nadam, RMSpropTF, AdamW, RAdam, NovoGrad, NvNovoGrad, Lookahead

@ -1,3 +1,12 @@
""" RMSProp modified to behave like Tensorflow impl
Originally cut & paste from PyTorch RMSProp
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
Modifications Copyright 2020 Ross Wightman
"""
import torch import torch
from torch.optim import Optimizer from torch.optim import Optimizer
@ -6,7 +15,12 @@ class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon) """Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
to closer match Tensorflow for matching hyper-params. and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_. `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.

@ -1,3 +1,9 @@
""" Cosine Scheduler
Cosine LR schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging import logging
import math import math
import numpy as np import numpy as np

@ -1,3 +1,9 @@
""" Plateau Scheduler
Adapts PyTorch plateau scheduler and allows application of noise, warmup.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch import torch
from .scheduler import Scheduler from .scheduler import Scheduler

@ -1,3 +1,6 @@
""" Scheduler Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from .cosine_lr import CosineLRScheduler from .cosine_lr import CosineLRScheduler
from .tanh_lr import TanhLRScheduler from .tanh_lr import TanhLRScheduler
from .step_lr import StepLRScheduler from .step_lr import StepLRScheduler

@ -1,3 +1,9 @@
""" Step Scheduler
Basic step LR schedule with warmup, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math import math
import torch import torch

@ -1,3 +1,9 @@
""" TanH Scheduler
TanH schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging import logging
import math import math
import numpy as np import numpy as np

@ -1,3 +1,8 @@
""" Common training and validation utilities
Hacked together by / Copyright 2020 Ross Wightman
"""
from copy import deepcopy from copy import deepcopy
import torch import torch

@ -12,7 +12,7 @@ This script was started from an early version of the PyTorch ImageNet example
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet) (https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by Ross Wightman (https://github.com/rwightman) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
""" """
import argparse import argparse
import time import time

@ -5,7 +5,7 @@ This is intended to be a lean and easily modifiable ImageNet validation script f
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit. canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
""" """
import argparse import argparse
import os import os

Loading…
Cancel
Save