diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py index e29c2231a0..8261aa009c 100644 --- a/timm/models/vision_transformer.py +++ b/timm/models/vision_transformer.py @@ -2152,10 +2152,20 @@ def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: 'vit_base_patch16_reg4_gap_256.untrained': _cfg( input_size=(3, 256, 256)), - 'vit_so150m_patch16_reg4_gap_256.untrained': _cfg( - input_size=(3, 256, 256)), + 'vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_so150m_patch16_reg4_gap_384.sbb_e250_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0), 'vit_so150m_patch16_reg4_map_256.untrained': _cfg( input_size=(3, 256, 256)), + 'vit_so150m2_patch16_reg1_gap_256.untrained': _cfg( + input_size=(3, 256, 256), crop_pct=0.95), 'vit_intern300m_patch14_448.ogvl_dist': _cfg( hf_hub_id='timm/', @@ -3462,6 +3472,7 @@ def vit_base_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionT @register_model def vit_so150m_patch16_reg4_map_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ SO150M (shape optimized, but diff than paper def, optimized for GPU) """ model_args = dict( patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, class_token=False, reg_tokens=4, global_pool='map', @@ -3473,6 +3484,7 @@ def vit_so150m_patch16_reg4_map_256(pretrained: bool = False, **kwargs) -> Visio @register_model def vit_so150m_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ SO150M (shape optimized, but diff than paper def, optimized for GPU) """ model_args = dict( patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, class_token=False, reg_tokens=4, global_pool='avg', fc_norm=False, @@ -3482,6 +3494,30 @@ def vit_so150m_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> Visio return model +@register_model +def vit_so150m_patch16_reg4_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ SO150M (shape optimized, but diff than paper def, optimized for GPU) """ + model_args = dict( + patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, + class_token=False, reg_tokens=4, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_so150m_patch16_reg4_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so150m2_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ SO150M v2 (shape optimized, but diff than paper def, optimized for GPU) """ + model_args = dict( + patch_size=16, embed_dim=896, depth=20, num_heads=14, mlp_ratio=2.429, init_values=1e-5, + qkv_bias=False, class_token=False, reg_tokens=1, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_so150m2_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + @register_model def vit_intern300m_patch14_448(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( diff --git a/timm/models/vovnet.py b/timm/models/vovnet.py index 86851666a2..08e6d0b6c3 100644 --- a/timm/models/vovnet.py +++ b/timm/models/vovnet.py @@ -419,7 +419,12 @@ def _cfg(url='', **kwargs): 'ese_vovnet39b.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), - 'ese_vovnet57b.untrained': _cfg(url=''), + 'ese_vovnet57b.ra4_e3600_r256_in1k': _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + crop_pct=0.95, input_size=(3, 256, 256), pool_size=(8, 8), + test_input_size=(3, 320, 320), test_crop_pct=1.0 + ), 'ese_vovnet99b.untrained': _cfg(url=''), 'eca_vovnet39b.untrained': _cfg(url=''), 'ese_vovnet39b_evos.untrained': _cfg(url=''),