Fix some formatting in utils/model.py

pull/933/head
Ross Wightman 3 years ago
parent 0fe4fd3f1f
commit 57992509f9

@ -2,11 +2,9 @@
Hacked together by / Copyright 2020 Ross Wightman
"""
from logging import root
from typing import Sequence
import fnmatch
import torch
import fnmatch
from torchvision.ops.misc import FrozenBatchNorm2d
from .model_ema import ModelEma
@ -24,18 +22,21 @@ def get_state_dict(model, unwrap_fn=unwrap_model):
def avg_sq_ch_mean(model, input, output):
"calculate average channel square mean of output activations"
return torch.mean(output.mean(axis=[0,2,3])**2).item()
""" calculate average channel square mean of output activations
"""
return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item()
def avg_ch_var(model, input, output):
"calculate average channel variance of output activations"
return torch.mean(output.var(axis=[0,2,3])).item()\
""" calculate average channel variance of output activations
"""
return torch.mean(output.var(axis=[0, 2, 3])).item()
def avg_ch_var_residual(model, input, output):
"calculate average channel variance of output activations"
return torch.mean(output.var(axis=[0,2,3])).item()
""" calculate average channel variance of output activations
"""
return torch.mean(output.var(axis=[0, 2, 3])).item()
class ActivationStatsHook:
@ -71,6 +72,7 @@ class ActivationStatsHook:
def append_activation_stats(module, input, output):
out = hook_fn(module, input, output)
self.stats[hook_fn.__name__].append(out)
return append_activation_stats
def register_hook(self, hook_fn_loc, hook_fn):
@ -80,10 +82,11 @@ class ActivationStatsHook:
module.register_forward_hook(self._create_hook(hook_fn))
def extract_spp_stats(model,
hook_fn_locs,
hook_fns,
input_shape=[8, 3, 224, 224]):
def extract_spp_stats(
model,
hook_fn_locs,
hook_fns,
input_shape=[8, 3, 224, 224]):
"""Extract average square channel mean and variance of activations during
forward pass to plot Signal Propogation Plots (SPP).
@ -188,7 +191,7 @@ def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True,
named_modules = submodules
submodules = [root_module.get_submodule(m) for m in submodules]
if not(len(submodules)):
if not len(submodules):
named_modules, submodules = list(zip(*root_module.named_children()))
for n, m in zip(named_modules, submodules):
@ -203,13 +206,14 @@ def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True,
module.get_submodule(split[0]).add_module(split[1], submodule)
else:
module.add_module(name, submodule)
# Freeze batch norm
if mode == 'freeze':
res = freeze_batch_norm_2d(m)
# It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't
# convert it in place, but will return the converted result. In this case `res` holds the converted
# result and we may try to re-assign the named module
if isinstance(m, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)):
if isinstance(m, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)):
_add_submodule(root_module, n, res)
# Unfreeze batch norm
else:
@ -267,4 +271,3 @@ def unfreeze(root_module, submodules=[], include_bn_running_stats=True):
See example in docstring for `freeze`.
"""
_freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze")
Loading…
Cancel
Save