Esempio n. 1
0
    def __init__(self,
                 histogram_layer,
                 parallel=True,
                 model_name='resnet18',
                 add_bn=True,
                 scale=5,
                 pretrained=True):

        #inherit nn.module
        super(HistRes, self).__init__()
        self.parallel = parallel
        self.add_bn = add_bn
        self.scale = scale
        #Default to use resnet18, otherwise use Resnet50
        if model_name == 'resnet18':
            self.backbone = models.resnet18(pretrained=pretrained)
            num_ftrs = self.backbone.fc.in_features

        elif model_name == 'resnet50':
            self.backbone = models.resnet50(pretrained=pretrained)
            num_ftrs = self.backbone.fc.in_features

        elif model_name == "resnet50_wide":
            self.backbone = models.wide_resnet50_2(pretrained=pretrained)
            num_ftrs = self.backbone.fc.in_features

        elif model_name == "resnet50_next":
            self.backbone = models.resnext50_32x4d(pretrained=pretrained)
            num_ftrs = self.backbone.fc.in_features

        elif model_name == "densenet121":
            model_ft = models.densenet121(pretrained=pretrained,
                                          memory_efficient=True)
            num_ftrs = model_ft.classifier.in_features

        elif model_name == "efficientnet":
            model_ft = models.efficientnet_b0(pretrained=pretrained)
            num_ftrs = model_ft.classifier.in_features

        elif model_name == "regnet":
            model_ft = models.regnet_x_400mf(pretrained)
            num_ftrs = model_ft.classifier.in_features

        else:
            print('Model not defined')

        if self.add_bn:
            self.bn_norm = nn.BatchNorm2d(num_ftrs)

        #Define histogram layer and fc
        self.histogram_layer = histogram_layer

        try:
            self.fc = self.backbone.fc
            self.backbone.fc = torch.nn.Sequential()
        except:
            self.fc = self.backbone.classifier
            self.backbone.classifier = torch.nn.Sequential()
Esempio n. 2
0
def two_channel_efficientnetb0():
    model = efficientnet_b0()
    model.features[0][0] = nn.Conv2d(2,
                                     32,
                                     kernel_size=(3, 3),
                                     stride=(2, 2),
                                     padding=(1, 1),
                                     bias=False)
    return model
Esempio n. 3
0
    def _get_model_and_layer(self, model_name, layer):
        """ Internal method for getting layer from model
        :param model_name: model name such as 'resnet-18'
        :param layer: layer as a string for resnet-18 or int for alexnet
        :returns: pytorch model, selected layer
        """

        if model_name.startswith(
                'resnet') and not model_name.startswith('resnet-'):
            model = getattr(models, model_name)(pretrained=True)
            if layer == 'default':
                layer = model._modules.get('avgpool')
                self.layer_output_size = self.RESNET_OUTPUT_SIZES[model_name]
            else:
                layer = model._modules.get(layer)
            return model, layer
        elif model_name == 'resnet-18':
            model = models.resnet18(pretrained=True)
            if layer == 'default':
                layer = model._modules.get('avgpool')
                self.layer_output_size = 512
            else:
                layer = model._modules.get(layer)

            return model, layer

        elif model_name == 'alexnet':
            model = models.alexnet(pretrained=True)
            if layer == 'default':
                layer = model.classifier[-2]
                self.layer_output_size = 4096
            else:
                layer = model.classifier[-layer]

            return model, layer

        elif model_name == 'vgg':
            # VGG-11
            model = models.vgg11_bn(pretrained=True)
            if layer == 'default':
                layer = model.classifier[-2]
                self.layer_output_size = model.classifier[
                    -1].in_features  # should be 4096
            else:
                layer = model.classifier[-layer]

            return model, layer

        elif model_name == 'densenet':
            # Densenet-121
            model = models.densenet121(pretrained=True)
            if layer == 'default':
                layer = model.features[-1]
                self.layer_output_size = model.classifier.in_features  # should be 1024
            else:
                raise KeyError('Un support %s for layer parameters' %
                               model_name)

            return model, layer

        elif "efficientnet" in model_name:
            # efficientnet-b0 ~ efficientnet-b7
            if model_name == "efficientnet_b0":
                model = models.efficientnet_b0(pretrained=True)
            elif model_name == "efficientnet_b1":
                model = models.efficientnet_b1(pretrained=True)
            elif model_name == "efficientnet_b2":
                model = models.efficientnet_b2(pretrained=True)
            elif model_name == "efficientnet_b3":
                model = models.efficientnet_b3(pretrained=True)
            elif model_name == "efficientnet_b4":
                model = models.efficientnet_b4(pretrained=True)
            elif model_name == "efficientnet_b5":
                model = models.efficientnet_b5(pretrained=True)
            elif model_name == "efficientnet_b6":
                model = models.efficientnet_b6(pretrained=True)
            elif model_name == "efficientnet_b7":
                model = models.efficientnet_b7(pretrained=True)
            else:
                raise KeyError('Un support %s.' % model_name)

            if layer == 'default':
                layer = model.features
                self.layer_output_size = self.EFFICIENTNET_OUTPUT_SIZES[
                    model_name]
            else:
                raise KeyError('Un support %s for layer parameters' %
                               model_name)

            return model, layer

        else:
            raise KeyError('Model %s was not found' % model_name)
Esempio n. 4
0
def initialize_model(model_name,
                     num_classes,
                     in_channels,
                     out_channels,
                     feature_extract=False,
                     histogram=True,
                     histogram_layer=None,
                     parallel=True,
                     use_pretrained=True,
                     add_bn=True,
                     scale=5,
                     feat_map_size=4,
                     embed_dim=2):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None
    input_size = 0
    if (histogram):
        # Initialize these variables which will be set in this if statement. Each of these
        # variables is model specific.
        model_ft = HistRes(histogram_layer,
                           parallel=parallel,
                           model_name=model_name,
                           add_bn=add_bn,
                           scale=scale,
                           pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft.backbone, feature_extract)

        #Reduce number of conv channels from input channels to input channels/number of bins*feat_map size (2x2)
        reduced_dim = int(
            (out_channels / feat_map_size) / (histogram_layer.numBins))
        if (
                in_channels == reduced_dim
        ):  #If input channels equals reduced/increase, don't apply 1x1 convolution
            model_ft.histogram_layer = histogram_layer
        else:
            conv_reduce = nn.Conv2d(in_channels, reduced_dim, (1, 1))
            model_ft.histogram_layer = nn.Sequential(conv_reduce,
                                                     histogram_layer)
        if (parallel):
            num_ftrs = model_ft.fc.in_features * 2
        else:
            num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    # Baseline model
    else:
        pdb.set_trace()
        if model_name == "resnet18":
            """ Resnet18
            """
            model_ft = models.resnet18(pretrained=use_pretrained)
            set_parameter_requires_grad(model_ft, feature_extract)
            num_ftrs = model_ft.fc.in_features
            model_ft.fc = nn.Linear(num_ftrs, num_classes)
            input_size = 224

        elif model_name == "resnet50":
            """ Resnet50
            """
            model_ft = models.resnet50(pretrained=use_pretrained)
            set_parameter_requires_grad(model_ft, feature_extract)
            num_ftrs = model_ft.fc.in_features
            model_ft.fc = nn.Linear(num_ftrs, num_classes)
            input_size = 224

        elif model_name == "resnet50_wide":
            model_ft = models.wide_resnet50_2(pretrained=use_pretrained)
            set_parameter_requires_grad(model_ft, feature_extract)
            num_ftrs = model_ft.fc.in_features
            model_ft.fc = nn.Linear(num_ftrs, num_classes)
            input_size = 224

        elif model_name == "resnet50_next":
            model_ft = models.resnext50_32x4d(pretrained=use_pretrained)
            set_parameter_requires_grad(model_ft, feature_extract)
            num_ftrs = model_ft.fc.in_features
            model_ft.fc = nn.Linear(num_ftrs, num_classes)
            input_size = 224

        elif model_name == "densenet121":
            model_ft = models.densenet121(pretrained=use_pretrained,
                                          memory_efficient=True)
            set_parameter_requires_grad(model_ft, feature_extract)
            num_ftrs = model_ft.classifier.in_features
            model_ft.classifier = nn.Sequential()
            model_ft.fc = nn.Linear(num_ftrs, num_classes)
            input_size = 224

        elif model_name == "efficientnet":
            model_ft = models.efficientnet_b0(pretrained=use_pretrained)
            set_parameter_requires_grad(model_ft, feature_extract)
            num_ftrs = model_ft.classifier.in_features
            model_ft.classifier = nn.Sequential()
            model_ft.fc = nn.Linear(num_ftrs, num_classes)
            input_size = 224

        elif model_name == "regnet":
            model_ft = models.regnet_x_400mf(pretrained=use_pretrained)
            set_parameter_requires_grad(model_ft, feature_extract)
            num_ftrs = model_ft.classifier.in_features
            model_ft.classifier = nn.Sequential()
            model_ft.fc = nn.Linear(num_ftrs, num_classes)
            input_size = 224

        else:
            raise RuntimeError('{} not implemented'.format(model_name))

    #Take model and return embedding model
    model_ft = Embedding_model(model_ft,
                               input_feat_size=num_ftrs,
                               embed_dim=embed_dim)

    return model_ft, input_size
Esempio n. 5
0
# ==============================================================================

wide_resnet_model = models.wide_resnet50_2(pretrained=True)

input = torch.randn(1, 3, 224, 224)


@register_test_case(module_factory=lambda: getTracedRecursiveScriptModule(
    VisionModule(wide_resnet_model)))
def Wide_ResnetVisionModel_basic(module, tu: TestUtils):
    module.forward(input)


# ==============================================================================

efficientnet_model = models.efficientnet_b0(pretrained=True)

input = torch.randn(1, 3, 224, 224)


@register_test_case(module_factory=lambda: getTracedRecursiveScriptModule(
    VisionModule(efficientnet_model)))
def EfficientnetVisionModel_basic(module, tu: TestUtils):
    module.forward(input)


# ==============================================================================

mobilenet_v2_model = models.mobilenet_v2(pretrained=True)

input = torch.randn(1, 3, 224, 224)
 def __init__(self, num_dims):
     super().__init__()
     self.model = models.efficientnet_b0(pretrained=True)
     num_ftrs = self.model.classifier[1].in_features
     self.model.classifier[1] = nn.Linear(num_ftrs, num_dims)
     print(summary(self.model, input_size=(32, 3, 100, 100)))