From cbcb76d72c74cab6d0ab12915e1cf851605c6f59 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Thu, 18 Mar 2021 23:15:48 -0700 Subject: [PATCH] Should have included Conv2d layers in original weight init. Lets see what the impact is... --- timm/models/vision_transformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py index 5fb5c7c7..42943fab 100644 --- a/timm/models/vision_transformer.py +++ b/timm/models/vision_transformer.py @@ -476,7 +476,7 @@ class VisionTransformer(nn.Module): def _init_weights_original(m: nn.Module, n: str = ''): - if isinstance(m, nn.Linear): + if isinstance(m, (nn.Conv2d, nn.Linear)): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0)