def ddf_mul_resnet50(pretrained=False, **kwargs): model_args = dict(block=DDFMulBottleneck, layers=[3, 4, 6, 3], **kwargs) return build_model_with_cfg(ResNet, 'ddf_mul_resnet50', default_cfg=default_cfgs['ddf_mul_resnet50'], pretrained=pretrained, **model_args)
def _create_regnet(variant, pretrained, **kwargs): return build_model_with_cfg(model_cls=RegNet, variant=variant, pretrained=pretrained, default_cfg=default_cfgs[variant], model_cfg=model_cfgs[variant], **kwargs)
def ddf_add_resnet152(pretrained=False, **kwargs): model_args = dict(block=DDFAddBottleneck, layers=[3, 8, 36, 3], **kwargs) return build_model_with_cfg(ResNet, 'ddf_add_resnet152', default_cfg=default_cfgs['ddf_add_resnet152'], pretrained=pretrained, **model_args)
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = deepcopy(default_cfgs[variant]) overlay_external_default_cfg(default_cfg, kwargs) default_num_classes = default_cfg['num_classes'] default_img_size = default_cfg['input_size'][1:] num_classes = kwargs.pop('num_classes', default_num_classes) img_size = kwargs.pop('img_size', default_img_size) repr_size = kwargs.pop('representation_size', None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? repr_size = None if kwargs.get('features_only', None): raise RuntimeError( 'features_only not implemented for Vision Transformer models.') model_cls = VisionTransformer model = build_model_with_cfg(model_cls, variant, pretrained, default_cfg=default_cfg, img_size=img_size, num_classes=num_classes, representation_size=repr_size, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model
def _create_rexnet(variant, pretrained, **kwargs): feature_cfg = dict(flatten_sequential=True) if kwargs.get('feature_location', '') == 'expansion': feature_cfg['feature_cls'] = 'hook' return build_model_with_cfg(ReXNetV1, variant, pretrained, default_cfg=default_cfgs[variant], feature_cfg=feature_cfg, **kwargs)
def _create_tnt(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg( TNT, variant, pretrained, default_cfg=default_cfgs[variant], pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model
def _create_normfreenet(variant, pretrained=False, **kwargs): print('create nf net with pretraiend=' + str(pretrained)) model_cfg = model_cfgs[variant] feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg( NormFreeNet, variant, pretrained, default_cfg=default_cfgs[variant], model_cfg=model_cfg, feature_cfg=feature_cfg, **kwargs)
def scearesnet18(pretrained=False, **kwargs): model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer=SCEAModule), **kwargs) default_cfg = timm_resnet_cfg() return build_model_with_cfg(ResNet, 'scearesnet18', default_cfg=default_cfg, pretrained=pretrained, **model_args)
def scseresnet18(pretrained=False, **kwargs): model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer=SCSEModule), **kwargs) default_cfg = timm_resnet_cfg(url='', interpolation='bicubic'), return build_model_with_cfg(ResNet, 'scseresnet18', default_cfg=default_cfg, pretrained=pretrained, **model_args)
def _create_mnv3(model_kwargs, variant, pretrained=False): if model_kwargs.pop('features_only', False): load_strict = False model_kwargs.pop('num_classes', 0) model_kwargs.pop('num_features', 0) model_kwargs.pop('head_conv', None) model_kwargs.pop('head_bias', None) model_cls = MobileNetV3Features else: load_strict = True model_cls = MobileNetV3 return build_model_with_cfg(model_cls, variant, pretrained, default_cfg=default_cfgs[variant], pretrained_strict=load_strict, **model_kwargs)
def _create_vision_transformer(variant, pretrained=False, **kwargs): default_cfg = deepcopy(default_cfgs[variant]) overlay_external_default_cfg(default_cfg, kwargs) default_num_classes = default_cfg['num_classes'] num_classes = kwargs.pop('num_classes', default_num_classes) if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Hierarchical Visual Transformer models.') model = build_model_with_cfg( VisionTransformer, variant, pretrained, default_cfg=default_cfg, num_classes=num_classes, **kwargs) return model
def _create_volo(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError( 'features_only not implemented for Vision Transformer models.') return build_model_with_cfg(VOLO, variant, pretrained, **kwargs)