parent
1577c52976
commit
9d927a389a
@ -0,0 +1,61 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from torchvision.transforms import *
|
||||||
|
|
||||||
|
from PIL import Image
|
||||||
|
import random
|
||||||
|
import math
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
class RandomErasing:
|
||||||
|
""" Randomly selects a rectangle region in an image and erases its pixels.
|
||||||
|
'Random Erasing Data Augmentation' by Zhong et al.
|
||||||
|
See https://arxiv.org/pdf/1708.04896.pdf
|
||||||
|
Args:
|
||||||
|
probability: The probability that the Random Erasing operation will be performed.
|
||||||
|
sl: Minimum proportion of erased area against input image.
|
||||||
|
sh: Maximum proportion of erased area against input image.
|
||||||
|
r1: Minimum aspect ratio of erased area.
|
||||||
|
mean: Erasing value.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
probability=0.5, sl=0.02, sh=1/3, min_aspect=0.3,
|
||||||
|
per_pixel=False, random=False,
|
||||||
|
pl=0, ph=1., mean=[0.485, 0.456, 0.406]):
|
||||||
|
self.probability = probability
|
||||||
|
self.mean = torch.tensor(mean)
|
||||||
|
self.sl = sl
|
||||||
|
self.sh = sh
|
||||||
|
self.min_aspect = min_aspect
|
||||||
|
self.pl = pl
|
||||||
|
self.ph = ph
|
||||||
|
self.per_pixel = per_pixel # per pixel random, bounded by [pl, ph]
|
||||||
|
self.random = random # per block random, bounded by [pl, ph]
|
||||||
|
|
||||||
|
def __call__(self, img):
|
||||||
|
if random.random() > self.probability:
|
||||||
|
return img
|
||||||
|
|
||||||
|
chan, img_h, img_w = img.size()
|
||||||
|
area = img_h * img_w
|
||||||
|
for attempt in range(100):
|
||||||
|
target_area = random.uniform(self.sl, self.sh) * area
|
||||||
|
aspect_ratio = random.uniform(self.min_aspect, 1 / self.min_aspect)
|
||||||
|
|
||||||
|
h = int(round(math.sqrt(target_area * aspect_ratio)))
|
||||||
|
w = int(round(math.sqrt(target_area / aspect_ratio)))
|
||||||
|
c = torch.empty((chan)).uniform_(self.pl, self.ph) if self.random else self.mean[:chan]
|
||||||
|
if w < img_w and h < img_h:
|
||||||
|
top = random.randint(0, img_h - h)
|
||||||
|
left = random.randint(0, img_w - w)
|
||||||
|
if self.per_pixel:
|
||||||
|
img[:, top:top + h, left:left + w] = torch.empty((chan, h, w)).uniform_(self.pl, self.ph)
|
||||||
|
else:
|
||||||
|
img[:, top:top + h, left:left + w] = c
|
||||||
|
return img
|
||||||
|
|
||||||
|
return img
|
@ -0,0 +1,118 @@
|
|||||||
|
import math
|
||||||
|
import torch
|
||||||
|
from torch.optim import Optimizer
|
||||||
|
|
||||||
|
|
||||||
|
class AdaBound(Optimizer):
|
||||||
|
"""Implements AdaBound algorithm.
|
||||||
|
It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.
|
||||||
|
Arguments:
|
||||||
|
params (iterable): iterable of parameters to optimize or dicts defining
|
||||||
|
parameter groups
|
||||||
|
lr (float, optional): Adam learning rate (default: 1e-3)
|
||||||
|
betas (Tuple[float, float], optional): coefficients used for computing
|
||||||
|
running averages of gradient and its square (default: (0.9, 0.999))
|
||||||
|
final_lr (float, optional): final (SGD) learning rate (default: 0.1)
|
||||||
|
gamma (float, optional): convergence speed of the bound functions (default: 1e-3)
|
||||||
|
eps (float, optional): term added to the denominator to improve
|
||||||
|
numerical stability (default: 1e-8)
|
||||||
|
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
||||||
|
amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm
|
||||||
|
.. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:
|
||||||
|
https://openreview.net/forum?id=Bkg3g2R9FX
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,
|
||||||
|
eps=1e-8, weight_decay=0, amsbound=False):
|
||||||
|
if not 0.0 <= lr:
|
||||||
|
raise ValueError("Invalid learning rate: {}".format(lr))
|
||||||
|
if not 0.0 <= eps:
|
||||||
|
raise ValueError("Invalid epsilon value: {}".format(eps))
|
||||||
|
if not 0.0 <= betas[0] < 1.0:
|
||||||
|
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
|
||||||
|
if not 0.0 <= betas[1] < 1.0:
|
||||||
|
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
|
||||||
|
if not 0.0 <= final_lr:
|
||||||
|
raise ValueError("Invalid final learning rate: {}".format(final_lr))
|
||||||
|
if not 0.0 <= gamma < 1.0:
|
||||||
|
raise ValueError("Invalid gamma parameter: {}".format(gamma))
|
||||||
|
defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,
|
||||||
|
weight_decay=weight_decay, amsbound=amsbound)
|
||||||
|
super(AdaBound, self).__init__(params, defaults)
|
||||||
|
|
||||||
|
self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
super(AdaBound, self).__setstate__(state)
|
||||||
|
for group in self.param_groups:
|
||||||
|
group.setdefault('amsbound', False)
|
||||||
|
|
||||||
|
def step(self, closure=None):
|
||||||
|
"""Performs a single optimization step.
|
||||||
|
Arguments:
|
||||||
|
closure (callable, optional): A closure that reevaluates the model
|
||||||
|
and returns the loss.
|
||||||
|
"""
|
||||||
|
loss = None
|
||||||
|
if closure is not None:
|
||||||
|
loss = closure()
|
||||||
|
|
||||||
|
for group, base_lr in zip(self.param_groups, self.base_lrs):
|
||||||
|
for p in group['params']:
|
||||||
|
if p.grad is None:
|
||||||
|
continue
|
||||||
|
grad = p.grad.data
|
||||||
|
if grad.is_sparse:
|
||||||
|
raise RuntimeError(
|
||||||
|
'Adam does not support sparse gradients, please consider SparseAdam instead')
|
||||||
|
amsbound = group['amsbound']
|
||||||
|
|
||||||
|
state = self.state[p]
|
||||||
|
|
||||||
|
# State initialization
|
||||||
|
if len(state) == 0:
|
||||||
|
state['step'] = 0
|
||||||
|
# Exponential moving average of gradient values
|
||||||
|
state['exp_avg'] = torch.zeros_like(p.data)
|
||||||
|
# Exponential moving average of squared gradient values
|
||||||
|
state['exp_avg_sq'] = torch.zeros_like(p.data)
|
||||||
|
if amsbound:
|
||||||
|
# Maintains max of all exp. moving avg. of sq. grad. values
|
||||||
|
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
|
||||||
|
|
||||||
|
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
||||||
|
if amsbound:
|
||||||
|
max_exp_avg_sq = state['max_exp_avg_sq']
|
||||||
|
beta1, beta2 = group['betas']
|
||||||
|
|
||||||
|
state['step'] += 1
|
||||||
|
|
||||||
|
if group['weight_decay'] != 0:
|
||||||
|
grad = grad.add(group['weight_decay'], p.data)
|
||||||
|
|
||||||
|
# Decay the first and second moment running average coefficient
|
||||||
|
exp_avg.mul_(beta1).add_(1 - beta1, grad)
|
||||||
|
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
|
||||||
|
if amsbound:
|
||||||
|
# Maintains the maximum of all 2nd moment running avg. till now
|
||||||
|
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
|
||||||
|
# Use the max. for normalizing running avg. of gradient
|
||||||
|
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
|
||||||
|
else:
|
||||||
|
denom = exp_avg_sq.sqrt().add_(group['eps'])
|
||||||
|
|
||||||
|
bias_correction1 = 1 - beta1 ** state['step']
|
||||||
|
bias_correction2 = 1 - beta2 ** state['step']
|
||||||
|
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
|
||||||
|
|
||||||
|
# Applies bounds on actual learning rate
|
||||||
|
# lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay
|
||||||
|
final_lr = group['final_lr'] * group['lr'] / base_lr
|
||||||
|
lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))
|
||||||
|
upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))
|
||||||
|
step_size = torch.full_like(denom, step_size)
|
||||||
|
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)
|
||||||
|
|
||||||
|
p.data.add_(-step_size)
|
||||||
|
|
||||||
|
return loss
|
Loading…
Reference in new issue