From 2cb2699dc85a24a3aa8c84bd11c937fbebc48c23 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 3 Feb 2023 11:28:57 -0800 Subject: [PATCH] Apply fix from #1649 to main --- timm/models/maxxvit.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/timm/models/maxxvit.py b/timm/models/maxxvit.py index f41dba8b..9030f206 100644 --- a/timm/models/maxxvit.py +++ b/timm/models/maxxvit.py @@ -36,7 +36,7 @@ Hacked together by / Copyright 2022, Ross Wightman import math from collections import OrderedDict -from dataclasses import dataclass, replace +from dataclasses import dataclass, replace, field from functools import partial from typing import Callable, Optional, Union, Tuple, List @@ -133,8 +133,8 @@ class MaxxVitCfg: block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T') stem_width: Union[int, Tuple[int, int]] = 64 stem_bias: bool = False - conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg() - transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg() + conv_cfg: MaxxVitConvCfg = field(default_factory=MaxxVitConvCfg) + transformer_cfg: MaxxVitTransformerCfg = field(default_factory=MaxxVitTransformerCfg) head_hidden_size: int = None weight_init: str = 'vit_eff' @@ -2279,4 +2279,4 @@ def maxvit_xlarge_tf_384(pretrained=False, **kwargs): @register_model def maxvit_xlarge_tf_512(pretrained=False, **kwargs): - return _create_maxxvit('maxvit_xlarge_tf_512', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) \ No newline at end of file + return _create_maxxvit('maxvit_xlarge_tf_512', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)