def get_squeezenet(version, pretrained=False, root=os.path.join(os.path.expanduser('~'), '.torch/models'), **kwargs): r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper. SqueezeNet 1.1 model from the `official SqueezeNet repo <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_. SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters than SqueezeNet 1.0, without sacrificing accuracy. Parameters ---------- version : str Version of squeezenet. Options are '1.0', '1.1'. pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = SqueezeNet(version, **kwargs) if pretrained: import torch from model.model_store import get_model_file net.load_state_dict( torch.load(get_model_file('squeezenet%s' % version, root=root))) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def inception_v3(pretrained=False, root=os.path.expanduser('~/.torch/models'), **kwargs): r"""Inception v3 model from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_ paper. Parameters ---------- pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. root : str, default $TORCH_HOME/models Location for keeping the model parameters. norm_layer : object Normalization layer used (default: :class:`nn.BatchNorm`) Can be :class:`nn.BatchNorm` or :class:`other normalization`. norm_kwargs : dict Additional `norm_layer` arguments. """ net = Inception3(**kwargs) if pretrained: import torch from model.model_store import get_model_file net.load_state_dict( torch.load(get_model_file('inceptionv3', root=root))) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def resnet152_v1b(pretrained=False, root=os.path.expanduser('~/.torch/models'), **kwargs): """Constructs a ResNetV1b-152 model. Parameters ---------- pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yielding a stride 8 model. norm_layer : object Normalization layer used (default: :class:`nn.BatchNorm`) Can be :class:`nn.BatchNorm` or :class:`other normalization`. last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. use_global_stats : bool, default False Whether forcing BatchNorm to use global statistics instead of minibatch statistics; optionally set to True if finetuning using ImageNet classification pretrained models. """ model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], **kwargs) if pretrained: import torch from model.model_store import get_model_file model.load_state_dict( torch.load(get_model_file('resnet%d_v%db' % (152, 1), root=root))) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() model.synset = attrib.synset model.classes = attrib.classes model.classes_long = attrib.classes_long return model
def resnet101_v1s(pretrained=False, root=os.path.expanduser('~/.torch/models'), **kwargs): """Constructs a ResNetV1s-101 model. Parameters ---------- pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yielding a stride 8 model. norm_layer : object Normalization layer used (default: :class:`nn.BatchNorm`). Can be :class:`nn.BatchNorm` or :class:`other normalization`. """ model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, stem_width=64, **kwargs) if pretrained: import torch from model.model_store import get_model_file model.load_state_dict( torch.load(get_model_file('resnet%d_v%ds' % (101, 1), root=root))) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() model.synset = attrib.synset model.classes = attrib.classes model.classes_long = attrib.classes_long return model
def resnet50_v1d_86(pretrained=False, root=os.path.expanduser('~/.torch/models'), **kwargs): """Constructs a ResNetV1d-50_1.8x model. Uses resnet50_v1d construction from resnetv1b.py Parameters ---------- pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. String value represents the hashtag for a certain version of pretrained weights. root : str, default '~/.torch/models' Location for keeping the model parameters. """ model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True, **kwargs) dirname = os.path.dirname(__file__) json_filename = os.path.join(dirname, 'resnet%d_v%dd_%.1fx' % (50, 1, 1.8) + ".json") with open(json_filename, "r") as jsonFile: params_items = json.load(jsonFile, object_pairs_hook=OrderedDict) prune_torch_block.idx = 0 if pretrained: from model.model_store import get_model_file params_file = get_model_file('resnet%d_v%dd_%.2f' % (50, 1, 0.86), root=root) prune_torch_block(model, list(params_items.keys()), list(params_items.values()), params=torch.load(params_file), pretrained=True) else: prune_torch_block(model, list(params_items.keys()), list(params_items.values()), params=None, pretrained=False) if pretrained: from data.imagenet import ImageNetAttr attrib = ImageNetAttr() model.synset = attrib.synset model.classes = attrib.classes model.classes_long = attrib.classes_long return model
def get_mobilenet_v2(multiplier, pretrained=None, **kwargs): r"""MobileNetV2 model from the `"Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation" <https://arxiv.org/abs/1801.04381>`_ paper. Parameters ---------- multiplier : float The width multiplier for controlling the model size. Only multipliers that are no less than 0.25 are supported. The actual number of channels is equal to the original channel size multiplied by this multiplier. pretrained : str the default pretrained weights for model. """ net = MobileNetV2(multiplier, **kwargs) if pretrained: import torch net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_squeezenet(version, pretrained=None, **kwargs): r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper. SqueezeNet 1.1 model from the `official SqueezeNet repo <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_. SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters than SqueezeNet 1.0, without sacrificing accuracy. Parameters ---------- version : str Version of squeezenet. Options are '1.0', '1.1'. pretrained : str the default pretrained weights for model. """ net = SqueezeNet(version, **kwargs) if pretrained: import torch net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_resnet(version, num_layers, pretrained=None, **kwargs): r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. ResNet V2 model from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- version : int Version of ResNet. Options are 1, 2. num_layers : int Numbers of layers. Options are 18, 34, 50, 101, 152. pretrained : str default pretrained weights for model. """ assert num_layers in resnet_spec, \ "Invalid number of layers: %d. Options are %s" % ( num_layers, str(resnet_spec.keys())) block_type, layers, channels = resnet_spec[num_layers] assert 1 <= version <= 2, \ "Invalid resnet version: %d. Options are 1 and 2." % version resnet_class = resnet_net_versions[version - 1] block_class = resnet_block_versions[version - 1][block_type] net = resnet_class(block_class, layers, channels, **kwargs) if pretrained: import torch net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def resnet18_v1b_89(pretrained=None, **kwargs): """Constructs a ResNetV1b-18_2.6x model. Uses resnet18_v1b construction from resnetv1b.py Parameters ---------- pretrained : str load the default pretrained weights for model. """ model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], **kwargs) dirname = os.path.dirname(__file__) json_filename = os.path.join( dirname, 'pruned_resnet/resnet%d_v%db_%.1fx' % (18, 1, 2.6) + ".json") with open(json_filename, "r") as jsonFile: params_items = json.load(jsonFile, object_pairs_hook=OrderedDict) prune_torch_block.idx = 0 if pretrained: params = torch.load(pretrained) prune_torch_block(model, list(params_items.keys()), list(params_items.values()), params=params, pretrained=True) else: prune_torch_block(model, list(params_items.keys()), list(params_items.values()), params=None, pretrained=False) if pretrained: from data.imagenet import ImageNetAttr attrib = ImageNetAttr() model.synset = attrib.synset model.classes = attrib.classes model.classes_long = attrib.classes_long return model
def get_vgg(num_layers, pretrained=False, root=os.path.join(os.path.expanduser('~'), '.torch/models'), **kwargs): r"""VGG model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/abs/1409.1556>`_ paper. Parameters ---------- num_layers : int Number of layers for the variant of densenet. Options are 11, 13, 16, 19. pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. String value represents the hashtag for a certain version of pretrained weights. root : str, default ~/.torch/models Location for keeping the model parameters. """ layers, filters = vgg_spec[num_layers] net = VGG(layers, filters, img_size=224, **kwargs) if pretrained: import torch from model.model_store import get_model_file batch_norm_suffix = '_bn' if kwargs.get('batch_norm') else '' net.load_state_dict( torch.load( get_model_file('vgg%d%s' % (num_layers, batch_norm_suffix), root=root))) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_resnext(num_layers, cardinality=32, bottleneck_width=4, use_se=False, pretrained=False, root=os.path.expanduser('~/.torch/models'), **kwargs): r"""ResNext model from `"Aggregated Residual Transformations for Deep Neural Network" <http://arxiv.org/abs/1611.05431>`_ paper. Parameters ---------- num_layers : int Numbers of layers. Options are 50, 101. cardinality: int Number of groups bottleneck_width: int Width of bottleneck block pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. norm_layer : object Normalization layer used (default: :class:`nn.BatchNorm`) Can be :class:`nn.BatchNorm` or :class:`other normalization`. norm_kwargs : dict Additional `norm_layer` arguments, for example `num_devices=4` """ assert num_layers in resnext_spec, \ "Invalid number of layers: %d. Options are %s" % ( num_layers, str(resnext_spec.keys())) layers = resnext_spec[num_layers] net = ResNext(layers, cardinality, bottleneck_width, use_se=use_se, **kwargs) if pretrained: from model.model_store import get_model_file if not use_se: net.load_state_dict( torch.load( get_model_file('resnext%d_%dx%dd' % (num_layers, cardinality, bottleneck_width), root=root))) else: net.load_state_dict( torch.load( get_model_file('se_resnext%d_%dx%dd' % (num_layers, cardinality, bottleneck_width), root=root))) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_darknet(darknet_version, num_layers, pretrained=False, root=os.path.join(os.path.expanduser('~'), '.torch/models'), **kwargs): """Get darknet by `version` and `num_layers` info. Parameters ---------- darknet_version : str Darknet version, choices are ['v3']. num_layers : int Number of layers. pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. norm_layer : object Normalization layer used (default: :class:`nn.BatchNorm`) Can be :class:`nn.BatchNorm` or :class:`other normalization`. norm_kwargs : dict Additional `norm_layer` arguments. Returns ------- nn.Module Darknet network. Examples -------- >>> model = get_darknet('v3', 53, pretrained=True) >>> print(model) """ assert darknet_version in darknet_versions and darknet_version in darknet_spec, ( "Invalid darknet version: {}. Options are {}".format( darknet_version, str(darknet_versions.keys()))) specs = darknet_spec[darknet_version] assert num_layers in specs, ( "Invalid number of layers: {}. Options are {}".format(num_layers, str(specs.keys()))) layers, channels = specs[num_layers] darknet_class = darknet_versions[darknet_version] net = darknet_class(layers, channels, **kwargs) if pretrained: import torch from model.model_store import get_model_file from data.imagenet import ImageNetAttr net.load_state_dict(torch.load(get_model_file('darknet%d' % num_layers, root=root))) attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_resnet(version, num_layers, pretrained=False, root=os.path.expanduser('~/.torch/models'), **kwargs): r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. ResNet V2 model from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- version : int Version of ResNet. Options are 1, 2. num_layers : int Numbers of layers. Options are 18, 34, 50, 101, 152. pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. root : str, default $~/.torch/models Location for keeping the model parameters. norm_layer : object Normalization layer used (default: :class:`nn.BatchNorm`) Can be :class:`nn.BatchNorm` or :class:`other normalization`. norm_kwargs : dict Additional `norm_layer` arguments """ assert num_layers in resnet_spec, \ "Invalid number of layers: %d. Options are %s" % ( num_layers, str(resnet_spec.keys())) block_type, layers, channels = resnet_spec[num_layers] assert 1 <= version <= 2, \ "Invalid resnet version: %d. Options are 1 and 2." % version resnet_class = resnet_net_versions[version - 1] block_class = resnet_block_versions[version - 1][block_type] net = resnet_class(block_class, layers, channels, **kwargs) if pretrained: import torch from model.model_store import get_model_file net.load_state_dict( torch.load(get_model_file('resnet%d_v%d' % (num_layers, version), root=root), map_location=lambda storage, loc: storage)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def alexnet(pretrained=None, **kwargs): r"""AlexNet model from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Parameters ---------- pretrained : str the default pretrained weights for model. """ net = AlexNet(**kwargs) if pretrained: import torch net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def resnet18_v1b(pretrained=None, **kwargs): """Constructs a ResNetV1b-18 model. Parameters ---------- pretrained : str the default pretrained weights for model. dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yielding a stride 8 model. """ model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() model.synset = attrib.synset model.classes = attrib.classes model.classes_long = attrib.classes_long return model
def resnet50_v1s(pretrained=None, **kwargs): """Constructs a ResNetV1s-50 model. Parameters ---------- pretrained : str the default pretrained weights for model. dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yielding a stride 8 model. """ model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, stem_width=64, **kwargs) if pretrained: model.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() model.synset = attrib.synset model.classes = attrib.classes model.classes_long = attrib.classes_long return model
def get_darknet(darknet_version, num_layers, pretrained=None, **kwargs): """Get darknet by `version` and `num_layers` info. Parameters ---------- darknet_version : str Darknet version, choices are ['v3']. num_layers : int Number of layers. pretrained : str the default pretrained weights for model. Returns ------- nn.Module Darknet network. Examples -------- >>> model = get_darknet('v3', 53, pretrained=True) >>> print(model) """ assert darknet_version in darknet_versions and darknet_version in darknet_spec, ( "Invalid darknet version: {}. Options are {}".format( darknet_version, str(darknet_versions.keys()))) specs = darknet_spec[darknet_version] assert num_layers in specs, ( "Invalid number of layers: {}. Options are {}".format( num_layers, str(specs.keys()))) layers, channels = specs[num_layers] darknet_class = darknet_versions[darknet_version] net = darknet_class(layers, channels, **kwargs) if pretrained: import torch from data.imagenet import ImageNetAttr net.load_state_dict(torch.load(pretrained)) attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def inception_v3(pretrained=None, **kwargs): r"""Inception v3 model from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_ paper. Parameters ---------- pretrained : bool or str the default pretrained weights for model. """ net = Inception3(**kwargs) if pretrained: import torch net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_vgg(num_layers, pretrained=None, **kwargs): r"""VGG model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/abs/1409.1556>`_ paper. Parameters ---------- num_layers : int Number of layers for the variant of densenet. Options are 11, 13, 16, 19. pretrained : str the default pretrained weights for model. """ layers, filters = vgg_spec[num_layers] net = VGG(layers, filters, **kwargs) if pretrained: import torch net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_mobilenet_v2(multiplier, pretrained=False, root=os.path.expanduser('~/.torch/models'), **kwargs): r"""MobileNetV2 model from the `"Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation" <https://arxiv.org/abs/1801.04381>`_ paper. Parameters ---------- multiplier : float The width multiplier for controlling the model size. Only multipliers that are no less than 0.25 are supported. The actual number of channels is equal to the original channel size multiplied by this multiplier. pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. String value represents the hashtag for a certain version of pretrained weights. root : str, default ~/.torch/models Location for keeping the model parameters. norm_layer : object Normalization layer used (default: :class:`nn.BatchNorm`) Can be :class:`nn.BatchNorm` or :class:`other normalization`. norm_kwargs : dict Additional `norm_layer` arguments. """ net = MobileNetV2(multiplier, **kwargs) if pretrained: import torch from model.model_store import get_model_file version_suffix = '{0:.2f}'.format(multiplier) if version_suffix in ('1.00', '0.50'): version_suffix = version_suffix[:-1] net.load_state_dict(torch.load(get_model_file('mobilenetv2_%s' % version_suffix, root=root))) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_densenet(num_layers, pretrained=None, **kwargs): r"""Densenet-BC model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- num_layers : int Number of layers for the variant of densenet. Options are 121, 161, 169, 201. pretrained : str the default pretrained weights for model. """ num_init_features, growth_rate, block_config = densenet_spec[num_layers] net = DenseNet(num_init_features, growth_rate, block_config, **kwargs) if pretrained: import torch net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def alexnet(pretrained=False, root=os.path.expanduser('~/.torch/models'), **kwargs): r"""AlexNet model from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Parameters ---------- pretrained : bool or str Boolean value controls whether to load the default pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = AlexNet(**kwargs) if pretrained: import torch from model.model_store import get_model_file net.load_state_dict(torch.load(get_model_file('alexnet', root=root))) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def resnet101_v1b_gn(pretrained=None, **kwargs): """Constructs a ResNetV1b-101 GroupNorm model. Parameters ---------- pretrained : str the default pretrained weights for model. dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yielding a stride 8 model. """ from model.module.basic import GroupNorm from model.module.convert import convert_norm_layer model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], **kwargs) norm_kwargs = {'num_groups': 32} model = convert_norm_layer(model, norm_layer=GroupNorm, norm_kwargs=norm_kwargs) if pretrained: model.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() model.synset = attrib.synset model.classes = attrib.classes model.classes_long = attrib.classes_long return model
def get_resnext(num_layers, cardinality=32, bottleneck_width=4, use_se=False, pretrained=None, **kwargs): r"""ResNext model from `"Aggregated Residual Transformations for Deep Neural Network" <http://arxiv.org/abs/1611.05431>`_ paper. Parameters ---------- num_layers : int Numbers of layers. Options are 50, 101. cardinality: int Number of groups bottleneck_width: int Width of bottleneck block pretrained : str the default pretrained weights for model. """ assert num_layers in resnext_spec, \ "Invalid number of layers: %d. Options are %s" % ( num_layers, str(resnext_spec.keys())) layers = resnext_spec[num_layers] net = ResNext(layers, cardinality, bottleneck_width, use_se=use_se, **kwargs) if pretrained: net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net
def get_mobilenet(multiplier, pretrained=None, **kwargs): r"""MobileNet model from the `"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" <https://arxiv.org/abs/1704.04861>`_ paper. Parameters ---------- multiplier : float The width multiplier for controlling the model size. Only multipliers that are no less than 0.25 are supported. The actual number of channels is equal to the original channel size multiplied by this multiplier. pretrained : str the default pretrained weights for model. """ net = MobileNet(multiplier, **kwargs) if pretrained: import torch net.load_state_dict(torch.load(pretrained)) from data.imagenet import ImageNetAttr attrib = ImageNetAttr() net.synset = attrib.synset net.classes = attrib.classes net.classes_long = attrib.classes_long return net