Пример #1
0
def get_cnn(num_classes, args):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("args.pretrained: " + str(args.pretrained))
    if (args.arch.startswith("resnet") or args.arch.startswith("inception")):
        if (args.arch == "resnet18"):
            model_ft = models.resnet18(
                pretrained=args.pretrained
            )  # Load the pretrained model from pytorch
        elif (args.arch == "resnet50"):
            model_ft = models.resnet50(pretrained=args.pretrained)
        elif (args.arch == "resnetusm"):
            model_ft = resnet18_usm(pretrained=args.pretrained, cuda=args.cuda)

        elif (args.arch == "inception_v3"):
            print("Using Inception v3")
            model_ft = models.inception_v3(pretrained=args.pretrained)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    elif (args.arch == "vgg19_bn"):
        model_ft = models.vgg19_bn(pretrained=args.pretrained
                                   )  # Load the pretrained model from pytorch
        num_features = model_ft.classifier[
            6].in_features  # Newly created modules have require_grad=True by default
        features = list(
            model_ft.classifier.children())[:-1]  # Remove last layer
        features.extend([nn.Linear(num_features, num_classes)
                         ])  # Add our layer with 4 outputs
        model_ft.classifier = nn.Sequential(
            *features)  # Replace the model classifier

    model_ft = model_ft.to(device)
    return model_ft
Пример #2
0
def choose_vgg(name):

    f = None

    if name == 'vgg11':
        f = models.vgg11(pretrained=True)
    elif name == 'vgg11_bn':
        f = models.vgg11_bn(pretrained=True)
    elif name == 'vgg13':
        f = models.vgg13(pretrained=True)
    elif name == 'vgg13_bn':
        f = models.vgg13_bn(pretrained=True)
    elif name == 'vgg16':
        f = models.vgg16(pretrained=True)
    elif name == 'vgg16_bn':
        f = models.vgg16_bn(pretrained=True)
    elif name == 'vgg19':
        f = models.vgg19(pretrained=True)
    elif name == 'vgg19_bn':
        f = models.vgg19_bn(pretrained=True)

    for params in f.parameters():
        params.requires_grad = False

    return f
    def __init__(self, network='resnet-101'):
        # in original paper, authors used vgg.
        # however, there exist much better convolutional networks than vgg, and we may experiment with them
        # possible models may be vgg, resnet, etc
        super().__init__()
        assert network in ['vgg', 'resnet-101']

        if network == 'vgg':
            vgg = tvmodels.vgg19_bn(pretrained=True)
            self.feature_extractor = vgg.features[:37]
            # vgg.features[36] is conv4_4, which is what authors used
            # when input has shape [3, 512, 512], output of feature extractor is [512, 64, 64]

        elif network == 'resnet-101':
            # TODO
            resnet = tvmodels.resnet101(pretrained=True)
            layers = [
                resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool,
                resnet.layer1, resnet.layer2
            ]
            self.feature_extractor = nn.Sequential(*layers)
            # when input has shape [3, 512, 512], output of feature extractor is [512, 64, 64]
            # same output shape as vgg version.

        # FeatureExtractor should not be trained
        for child in self.feature_extractor.children():
            for param in child.parameters():
                param.requires_grad = False
Пример #4
0
def vgg19_bn(pretrained: bool,
             progress: bool = True,
             requires_grad: bool = True):
    model = models.vgg19_bn(pretrained=pretrained, progress=progress)
    for params in model.parameters():
        params.requires_grad = requires_grad
    return model
    def __init__(self, num_classes=1, num_input_channels=4, pretrained=False):
        super(SegNet, self).__init__()
        #######change to work with 4 channels

        vgg = models.vgg19_bn(pretrained=pretrained)
        features = list(vgg.features.children())
        self.enc1 = nn.Sequential(
            *([nn.Conv2d(num_input_channels, 64, 3, padding=1)]) +
            features[1:7])

        #######change to work with 4 channels

        self.enc2 = nn.Sequential(*features[7:14])
        self.enc3 = nn.Sequential(*features[14:27])
        self.enc4 = nn.Sequential(*features[27:40])
        self.enc5 = nn.Sequential(*features[40:])

        self.dec5 = nn.Sequential(
            *([nn.ConvTranspose2d(512, 512, kernel_size=2, stride=2)] + [
                nn.Conv2d(512, 512, kernel_size=3, padding=1),
                nn.BatchNorm2d(512),
                nn.ReLU(inplace=True)
            ] * 4))
        self.dec4 = _DecoderBlock2(1024, 256, 4)
        self.dec3 = _DecoderBlock2(512, 128, 4)
        self.dec2 = _DecoderBlock2(256, 64, 2)
        self.dec1 = _DecoderBlock2(128, num_classes, 2)
Пример #6
0
def vgg19_bn_kw(config):
    model = models.vgg19_bn()
    # remove all fc layers, replace for a single fc layer, from 143mi to 20mi parameters
    model.classifier = nn.Linear(7 * 7 * 512, config["num_classes"])

    new_features = []
    for layer in model.features:
        # remove max pooling
        if isinstance(layer, nn.MaxPool2d):
            nn.AvgPool2d(kernel_size=2, stride=2)
        # store the number of out channels from conv layers
        elif isinstance(layer, nn.Conv2d):
            new_features.append(layer)
            last_conv_out_channels = layer.out_channels
        # switch ReLU to kWinners2d
        elif isinstance(layer, nn.ReLU):
            new_features.append(
                KWinners2d(
                    channels=last_conv_out_channels,
                    percent_on=config["percent_on"],
                    boost_strength=config["boost_strength"],
                    boost_strength_factor=config["boost_strength_factor"],
                ))
        # otherwise add it as normal
        else:
            new_features.append(layer)
    model.features = nn.Sequential(*new_features)

    return model
Пример #7
0
def grad_cam(image):
    model = models.vgg19_bn(pretrained=True)
    model.to(device)
    model.eval()
    image = cv2.resize(image, (224, 224))
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(image).unsqueeze(0)
    image = image.to(device)

    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image)

    output = None
    for i in range(0, 5):
        gcam.backward(idx=idx[i])
        temp = gcam.generate(target_layer='features.52')
        if not output is None:
            output += temp
        else:
            output = temp

    output /= 5
    return output
Пример #8
0
 def __init__(self):
     super(VGG, self).__init__()
     self.model = vgg19_bn(True).features
     self.mean = torch.Tensor([123.68, 116.779,
                               103.939]).cuda().view(1, 3, 1, 1)
     for param in self.model.parameters():
         param.requires_grad = False
Пример #9
0
def vggcam(pretrained, vggconfig='vggcam16', method='cam', **kwargs):
    use_bn = ('bn' in vggconfig)

    if pretrained:
        model = VGGCAM(make_layers(cfg[vggconfig[:8]], batch_norm=use_bn),
                       method=method,
                       **kwargs)
        model_dict = model.state_dict()
        if vggconfig == 'vggcam16':
            vgg = vmodels.vgg16(pretrained=True)
        elif vggconfig == 'vggcam19':
            vgg = vmodels.vgg19(pretrained=True)
        elif vggconfig == 'vggcam16bn':
            vgg = vmodels.vgg16_bn(pretrained=True)
        elif vggconfig == 'vggcam19bn':
            vgg = vmodels.vgg19_bn(pretrained=True)
        else:
            print("NOT IMPLEMENTED FOR ", vggconfig)
            exit(-3)

        pretrained_dict = vgg.state_dict()
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items()
            if k in model_dict and 'features' in k
        }
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
    else:
        model = VGGCAM(make_layers(cfg[vggconfig[:8]], use_bn), **kwargs)
    return model
Пример #10
0
    def __init__(self, requires_grad=False, layers=['conv1_1'], BN=False):
        super(myVGG, self).__init__()

        if BN:
            self.original_model = models.vgg19_bn(pretrained=False)
            self.original_model.load_state_dict(torch.load('../weights/vgg19_bn-c79401a0.pth'))
            self.checkpoints = [3, 7, 10, 14, 17, 20, 23, 27, 30, 33, 36, 40, 43, 46, 49, 53]
        else:
            self.original_model = models.vgg19(pretrained=False)
            self.original_model.load_state_dict(torch.load('../weights/vgg19-dcbb9e9d.pth'))
            self.checkpoints = [2, 5, 7, 10, 12, 14, 16, 19, 21, 23, 25, 28, 30, 32, 34, 37]

        #if vgg_bn: 17, 17-30; vgg: 12, 12-21
        self.layers = layers
        self.convs = [
                       'conv1_1','conv1_2',
                       'conv2_1','conv2_2',
                       'conv3_1','conv3_2','conv3_3','conv3_4',
                       'conv4_1','conv4_2','conv4_3','conv4_4',
                       'conv5_1','conv5_2','conv5_3','conv5_4',
                       ]
        self.features = []
        self.CreateCheckPoint()

        if requires_grad == False:
            for param in self.parameters():
                param.requires_grad = False
Пример #11
0
    def __init__(self, embed_size, ver=19, attention_mechanism=False):
        '''
        Load the pretrained VGG and replace fc layer.

        Args:
            embed_size (int): dimension of word embedding vectors
            ver (int): version of the pretrained network
            attention_mechanism (bool): use attention layers in decoder network or not
        '''
        super(VGG, self).__init__()
        if ver == 19:
            vgg = models.vgg19_bn(pretrained=True)
        else:
            raise ModuleNotFoundError()

        self.features = vgg.features
        self.classifier = vgg.classifier
        # change the number of output features (4096 -> 2048)
        self.classifier[3] = nn.Linear(vgg.classifier[3].in_features, 2048)
        self.classifier[6] = nn.Linear(2048, embed_size)
        self.linear = self.classifier[6]
        self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)

        if attention_mechanism:
            # Initialize the weights
            self.classifier[3].weight.data.normal_(0.0, 0.02)
            self.classifier[3].bias.data.fill_(0)
            self.classifier[6].weight.data.normal_(0.0, 0.02)
            self.classifier[6].bias.data.fill_(0)

        self.attention_mechanism = attention_mechanism
Пример #12
0
	def __init__(self):
		super(VGG19custom, self).__init__()		#基底クラスのコンストラクタを実行する
		
		#学習済みモデルの取得
		from torchvision.models import vgg19_bn
		net = vgg19_bn(False)	#Trueならば学習済みの重みをダウンロードする

		#既にあるモデルファイルから重みをロードする
		import modelio
		#modelio.SaveModelWeights(net, "vgg19_bn.pt")
		modelio.LoadModelWeights("vgg19_bn.pt", net, False)

		#畳み込み部は流用
		self.features = net.features		#モデルを定義(たたみ込み部)

		#パラメータ固定
		for p in self.features.parameters():
			p.requires_grad = False

		#畳み込み部の出力サイズを調べる
		size = (3, 224, 224)		#入力画像のテンソル形状
		test_input = torch.ones(1,size[0],size[1],size[2])	#ダミーの入力データ
		temp = self.features(test_input)			#畳み込みする
		temp = temp.view(temp.size()[0], -1)		#Flattenをかける
		conv_output_size = temp.size()[-1]		#出力テンソルサイズを取得

		#クラス分類を定義
		self.classifier = torch.nn.Sequential(
			torch.nn.Linear(conv_output_size,512),
			torch.nn.ReLU(),
			#torch.nn.BatchNorm1d(512),
			torch.nn.Dropout(0.25),
			torch.nn.Linear(512,101)
		)
Пример #13
0
    def __init__(self, num_classes, pretrained=True, phase='train'):
        super(FCN8VGG, self).__init__()
        vgg = models.vgg19_bn()
        if pretrained:
            vgg.load_state_dict(torch.load(vgg19_bn_path))
        features = list(vgg.features.children())

        self.features3_4 = nn.Sequential(
            *features[0:26])  # for FCN 256,128,128
        # self.features3 = nn.Sequential(*features[26:27]) # for FCN 256,64,64
        self.features4_4 = nn.Sequential(*features[26:39])  # for FCN 256,64,64
        self.features4 = nn.Sequential(*features[39:40])  # for FCN 512, 32, 32
        self.features5_4 = nn.Sequential(
            *features[40:52])  # for FCN 512, 32, 32
        self.features5 = nn.Sequential(*features[52:])  # for FCN 512, 16, 16

        self.fconv3 = nn.Conv2d(256, num_classes, kernel_size=1)
        self.fconv4 = nn.Conv2d(512, num_classes, kernel_size=1)
        self.fconv5 = nn.Sequential(
            nn.Conv2d(1536, 2048, kernel_size=7), nn.ReLU(inplace=True),
            nn.Dropout(), nn.Conv2d(2048, 2048, kernel_size=1),
            nn.ReLU(inplace=True), nn.Dropout(),
            nn.Conv2d(2048, num_classes, kernel_size=1))
        initialize_weights(self.fconv3, self.fconv4, self.fconv5)

        self.ssd_conv5_1 = nn.Conv2d(512,
                                     512,
                                     kernel_size=3,
                                     padding=1,
                                     dilation=1)
Пример #14
0
def Classifier(arch = 'vgg16', dropout = 0.5, hidden_layer = 1024):
    if arch == 'vgg16':
        model = models.vgg16(pretrained = True)
        input_layer = 25088
    elif arch == 'vgg19_bn':
        model = models.vgg19_bn(pretrained = True)
        input_layer = 25088
    elif arch == 'densenet121':
        model = models.densenet121(pretrained = True)
        input_layer = 1024
        
    # Freeze parameters
    for param in model.parameters():
        param.requires_grad = False
        
    My_Classifier = nn.Sequential(nn.Linear(input_layer, hidden_layer),
                           nn.ReLU(),
                           nn.Dropout(dropout),
                           nn.Linear(hidden_layer, 512),
                           nn.ReLU(),
                           nn.Dropout(dropout),
                           nn.Linear(512, 102),
                           nn.LogSoftmax(dim = 1)
                           )
    model.classifier = My_Classifier
    return model
Пример #15
0
def vgg19():
    """Constructs a VGG-19 model for ILSVRC12 dataset"""
    model = models.vgg19_bn()
    model.classifier[2] = nn.BatchNorm1d(4096)
    model.classifier[5] = nn.BatchNorm1d(4096)
    model.apply(weights_init)
    return model
Пример #16
0
def load_checkpoint(filepath):
    '''
    Rebuilds a trained torchvision model
    filepath: string, contains filepath where the trained model parameters are saved in a dictionary
    returns: a pytorch model, the optimizer, and the number of epochs
    '''
    # Create a dictionary of torchvision models to choose from
    print('Downloading models...')
    arch_dict = {
        'vgg19_bn': models.vgg19_bn(pretrained=True),
        'alexnet': models.alexnet(pretrained=True)
    }
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model_dict = torch.load(filepath, map_location=device)
    model = arch_dict[model_dict['arch']]
    for param in model.parameters():
        param.requires_grad = False
    # Build the model layers
    model.classifier = DeepNetworkClassifier(model_dict['input_size'],
                                             model_dict['output_size'],
                                             model_dict['hidden_layers'])
    model.load_state_dict(model_dict['state_dict'])
    model.class_to_idx = model_dict['class_to_idx']
    optimizer = optim.Adam(model.classifier.parameters())
    optimizer.load_state_dict(model_dict['optimizer_state_dict'])
    epochs = model_dict['epochs']

    return model, optimizer, epochs
Пример #17
0
 def getVGG19(self, batch_norm=False):
     if batch_norm == True:
         model = models.vgg19_bn()
     else:
         model = models.vgg19()
     model.train()
     return model
Пример #18
0
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        """ Resnet50
        """
        model_ft = models.resnet101(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224
    elif model_name == "vgg":
        """ vgg19bn
        """
        model_ft = models.vgg19_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[-1].in_features
        model_ft.classifier[-1] = nn.Linear(num_ftrs, num_classes)
        input_size = 224 
    else:
    	print("Invalid model name, exiting...")
    	exit()

    return model_ft, input_size
Пример #19
0
 def __init__(self):
     """Select conv1_1 ~ conv5_1 activation maps."""
     super(VGGNet, self).__init__()
     self.vgg = models.vgg19_bn(pretrained=True)
     self.vgg_features = self.vgg.features
     self.fc_features = nn.Sequential(
         *list(self.vgg.classifier.children())[:-2])
Пример #20
0
 def __init__(self, device):
     super(Vgg, self).__init__()
     self.model_vgg = vgg19_bn(True).features
     self.mean = torch.Tensor([123.68, 116.779,
                               103.939]).to(device).view(1, 3, 1, 1)
     for param in self.model_vgg.parameters():
         param.requires_grad = False
Пример #21
0
 def __init__(self, num_classes=1000, *models):
     """
     models: string names of models (lookup for the global namespace)
     """
     super(Model, self).__init__()
     imgDim = 3
     if len(models) == 0:
         print('switching to the default combination')
         models = [
             # 'IceResNet'
             # , 'LeNet'
             'densenet161'
         ]
     self.models_str = models
     for model in models:
         if model == 'IceResNet':
             setattr(self, model,
                     IceResNet(IceSEBasicBlock, 1, num_classes, 3, 32))
         elif model == 'LeNet':
             setattr(self, model, LeNet(num_classes, imgDim))
         # elif model == 'MiniDenseNet':
         #     setattr(self, model,
         #             MiniDenseNet(growthRate=48, depth=20, reduction=0.5, bottleneck=True, nClasses=num_classes,
         #                          n_dim=imgDim))
         elif model == 'densenet161':
             setattr(self, model, built_in_models.densenet161())
         elif model == 'vgg19':
             setattr(self, model, built_in_models.vgg19_bn())
         else:
             raise ValueError('unrecognized model: %s' % str(model))
     self.fc = nn.Linear(len(models) * num_classes, num_classes)
     self.softmax = nn.Softmax()
     self.load_weights('weights.pth')
Пример #22
0
def select_model(model_param, in_arg):
    """
    Selects the NN model based on command line argument "arch"
    Parameters:
     model_param - dictionary of our model parameters
     in_arg - command line arguments

    Returns:
     Selected NN model (object)
     model_param - dictionary of our model parameters
    """

    # We will manage 3 choices: alexnet, resnet18, vgg19_bn
    alexnet = models.alexnet(pretrained=True)
    resnet18 = models.resnet18(pretrained=True)
    vgg19_bn = models.vgg19_bn(pretrained=True)

    models_dict = {
        'alexnet': alexnet,
        'resnet18': resnet18,
        'vgg19_bn': vgg19_bn
    }

    if in_arg.arch not in models_dict.keys():
        print(
            "CNN model '{}' is not managed. It can only be chosen among alexnet, resnet18 and vgg19_bn. Try again!"
            .format(in_arg.arch))
        sys.exit(0)

    model_param['arch'] = in_arg.arch

    return models_dict[in_arg.arch], model_param
Пример #23
0
def load_pretrained_model(arch):
    '''
    Load pretrained model
    '''
    if arch == 'densenet121':
        model = models.densenet121(pretrained = True)
        inputsize = model.classifier.in_features
    elif arch == 'densenet161':
        model = models.densenet161(pretrained = True)
        inputsize = model.classifier.in_features
    elif arch == 'densenet201':
        model = models.densenet201(pretrained = True)
        inputsize = model.classifier.in_features
    elif arch == 'resnet18':
        model = models.resnet18(pretrained = True)
        inputsize = model.fc.in_features
    elif arch == 'resnet34':
        model = models.resnet34(pretrained = True)
        inputsize = model.fc.in_features
    elif arch == 'resnet50':
        model = models.resnet50(pretrained = True)
        inputsize = model.fc.in_features
    elif arch == 'vgg13_bn':
        model = models.vgg13_bn(pretrained = True)
        inputsize = model.classifier[0].in_features
    elif arch == 'vgg16_bn':
        model = models.vgg16_bn(pretrained = True)
        inputsize = model.classifier[0].in_features
    elif arch == 'vgg19_bn':
        model = models.vgg19_bn(pretrained = True)
        inputsize = model.classifier[0].in_features
        
    return model, inputsize
Пример #24
0
def PoseModel(num_point, num_stages=6, batch_norm=False, pretrained=False):
    net_dict = make_net_dict()
    model = CPM(net_dict, batch_norm)

    if pretrained:
        parameter_num = 10
        if batch_norm:
            vgg19 = models.vgg19_bn(pretrained=True)
            parameter_num *= 6
        else:
            vgg19 = models.vgg19(pretrained=True)
            parameter_num *= 2

        vgg19_state_dict = vgg19.state_dict()
        vgg19_keys = vgg19_state_dict.keys()

        model_dict = model.state_dict()
        from collections import OrderedDict
        weights_load = OrderedDict()

        for i in range(parameter_num):
            weights_load[model.state_dict().keys()[i]] = vgg19_state_dict[
                vgg19_keys[i]]
        model_dict.update(weights_load)
        model.load_state_dict(model_dict)

    return model
Пример #25
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    if model_name == "resnet":
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        s = len(model_ft.classifier) - 1
        num_ftrs = model_ft.classifier[s].in_features
        model_ft.classifier[s] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        model_ft = models.vgg19_bn()
        set_parameter_requires_grad(model_ft, feature_extract)
        s = len(model_ft.classifier) - 1
        num_ftrs = model_ft.classifier[s].in_features
        model_ft.classifier[s] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    else:
        print("model not implemented")
        return None, None

    return model_ft, input_size
Пример #26
0
    def __init__(self, dir, arch, file):
        """Init the variable

        Parameters:
        -----------
        dir: string
            Image path
        arch: string
            CNN model archtecture
        file: json file
            Text file with spacies name
        """
        # set the default models
        pretrain_model = {
            "vgg19_bn": models.vgg19_bn(pretrained=True), 
            "vgg19": models.vgg19(pretrained=True),
            "resnet50": models.resnet50(pretrained=True)
        }

        self.dir = dir
        self.train = os.path.join(dir, "train")
        self.test = os.path.join(dir, "test")
        self. validate = os.path.join(dir, "valid")
        
        self.file = file
        try:
            self.model = pretrain_model[arch]
        except KeyError:
            arch = input("Invalidate model, choose vgg19_nb, vgg19, resnet50: ")
            self.model = pretrain_model[arch]

        # freeze the model grad
        for param in self.model.parameters():
            param.requires_grad = False
Пример #27
0
    def get_model(self):

        if self.args.architecture in ['resnet34' , 'resnet50' , 'resnet101']:
            if self.args.architecture == 'resnet34':
                model = torchmodels.resnet34(pretrained=True)
            if self.args.architecture == 'resnet50':
                model = torchmodels.resnet50(pretrained=True)
            if self.args.architecture == 'resnet101':
                model = torchmodels.resnet101(pretrained=True)

            num_classes = self.args.num_classes
            num_ftrs = model.fc.in_features
            model.fc = nn.Linear(num_ftrs, num_classes)

        if self.args.architecture in ['densenet121', 'densenet169', 'densenet201', 'densenet161']:

            if self.args.architecture == 'densenet121':
                model = torchmodels.densenet121(pretrained=True)
            if self.args.architecture == 'densenet169':
                model = torchmodels.densenet169(pretrained=True)
            if self.args.architecture == 'densenet201':
                model = torchmodels.densenet201(pretrained=True)
            if self.args.architecture == 'densenet161':
                model = torchmodels.densenet161(pretrained=True)

            num_classes = self.args.num_classes
            num_ftrs = model.classifier.in_features
            model.classifier = nn.Linear(num_ftrs, num_classes)

        if self.args.architecture in [ 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
        'vgg19_bn', 'vgg19']:
            if self.args.architecture == 'vgg11':
                model = torchmodels.vgg11(pretrained=True)
            if self.args.architecture == 'vgg11_bn':
                model = torchmodels.vgg11_bn(pretrained=True)
            if self.args.architecture == 'vgg13':
                model = torchmodels.vgg13(pretrained=True)
            if self.args.architecture == 'vgg13_bn':
                model = torchmodels.vgg13_bn(pretrained=True)
            if self.args.architecture == 'vgg16':
                model = torchmodels.vgg16(pretrained=True)
            if self.args.architecture == 'vgg16_bn':
                model = torchmodels.vgg16_bn(pretrained=True)
            if self.args.architecture == 'vgg19_bn':
                model = torchmodels.vgg19_bn(pretrained=True)
            if self.args.architecture == 'vgg19':
                model = torchmodels.vgg19(pretrained=True)

            num_classes = self.args.num_classes
            in_features = model.classifier[6].in_features
            n_module = nn.Linear(in_features, num_classes)
            n_classifier = list(model.classifier.children())[:-1]
            n_classifier.append(n_module)
            model.classifier = nn.Sequential(*n_classifier)

        if self.args.cuda:
            model.cuda()

        return model
Пример #28
0
def set_model (model_name, num_class, neurons_reducer_block=0, comb_method=None, comb_config=None, pretrained=True,
         freeze_conv=False, p_dropout=0.5):

    if pretrained:
        pre_ptm = 'imagenet'
        pre_torch = True
    else:
        pre_torch = False
        pre_ptm = None

    if model_name not in _MODELS:
        raise Exception("The model {} is not available!".format(model_name))

    model = None
    if model_name == 'resnet-50':
        model = MyResnet(models.resnet50(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
                         comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'resnet-101':
        model = MyResnet(models.resnet101(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
                         comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'densenet-121':
        model = MyDensenet(models.densenet121(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
                         comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'vgg-13':
        model = MyVGGNet(models.vgg13_bn(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
                         comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'vgg-16':
        model = MyVGGNet(models.vgg16_bn(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
                         comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'vgg-19':
        model = MyVGGNet(models.vgg19_bn(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
                         comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'mobilenet':
        model = MyMobilenet(models.mobilenet_v2(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
                         comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'efficientnet-b4':
        if pretrained:
            model = MyEffnet(EfficientNet.from_pretrained(model_name), num_class, neurons_reducer_block, freeze_conv,
                             comb_method=comb_method, comb_config=comb_config)
        else:
            model = MyEffnet(EfficientNet.from_name(model_name), num_class, neurons_reducer_block, freeze_conv,
                             comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'inceptionv4':
        model = MyInceptionV4(ptm.inceptionv4(num_classes=1000, pretrained=pre_ptm), num_class, neurons_reducer_block,
                              freeze_conv, comb_method=comb_method, comb_config=comb_config)

    elif model_name == 'senet':
        model = MySenet(ptm.senet154(num_classes=1000, pretrained=pre_ptm), num_class, neurons_reducer_block,
                        freeze_conv, comb_method=comb_method, comb_config=comb_config)

    return model
Пример #29
0
    def __init__(self, base_model='vgg'):
        """
        Construct the EncoderCNN class.

        Args:
            base_model: Base CNN model to use
        Returns;
            A PyTorch network model

        """
        super(EncoderCNN, self).__init__()

        try:
            assert base_model in self.base_model_options
        except AssertionError:
            print('Invalid base model: %s'.format(base_model))
            print(' -- Valid types: ', self.base_model_options)
            return

        # Load selected base model with pre-trained weights
        if base_model == 'resnet18':
            self.bm = models.resnet18(pretrained=True)
        elif base_model == 'resnet50':
            self.bm = models.resnet50(pretrained=True)
        elif base_model == 'resnet152':
            self.bm = models.resnet152(pretrained=True)
        elif base_model == 'vgg11':
            self.bm = models.vgg11(pretrained=True)
        elif base_model == 'vgg11_bn':
            self.bm = models.vgg11_bn(pretrained=True)
        elif base_model == 'vgg16':
            self.bm = models.vgg16(pretrained=True)
        elif base_model == 'vgg16_bn':
            self.bm = models.vgg16_bn(pretrained=True)
        elif base_model == 'vgg19':
            self.bm = models.vgg19(pretrained=True)
        elif base_model == 'vgg19_bn':
            self.bm = models.vgg19_bn(pretrained=True)
        elif base_model == 'squeezenet0':
            self.bm = models.squeezenet1_0(pretrained=True)
        elif base_model == 'squeezenet1':
            self.bm = models.squeezenet1_1(pretrained=True)
        elif base_model == 'densenet121':
            self.bm = models.densenet121(pretrained=True)
        elif base_model == 'densenet201':
            self.bm = models.densenet201(pretrained=True)
        elif base_model == 'inception':
            self.bm = models.inception_v3(pretrained=True)

        # Freeze layers
        for param in self.bm.parameters():
            param.requires_grad = False

        modules = list(self.bm.children())[:-1]
        # num_features = self.bm.fc.in_features

        self.bm = nn.Sequential(*modules)

        return
Пример #30
0
    def __init__(self):
        super().__init__()
        self.enc1_1 = VGGBlock(3, 64, 64, True)
        self.enc1_2 = VGGBlock(64, 128, 128, True)
        self.enc1_3 = VGGBlock(128, 256, 256, True)
        self.enc1_4 = VGGBlock(256, 512, 512, True)
        self.enc1_5 = VGGBlock(512, 512, 512, True)

        # apply pretrained vgg19 weights on 1st unet
        vgg19 = models.vgg19_bn()
        vgg19.load_state_dict(
            torch.load("E:\dataset\pretrain/vgg19_bn-c79401a0.pth"))

        self.enc1_1.conv1.weights = vgg19.features[0].weight
        self.enc1_1.bn1.weights = vgg19.features[1].weight
        self.enc1_1.conv2.weights = vgg19.features[3].weight
        self.enc1_1.bn2.weights = vgg19.features[4].weight
        self.enc1_2.conv1.weights = vgg19.features[7].weight
        self.enc1_2.bn1.weights = vgg19.features[8].weight
        self.enc1_2.conv2.weights = vgg19.features[10].weight
        self.enc1_2.bn2.weights = vgg19.features[11].weight
        self.enc1_3.conv1.weights = vgg19.features[14].weight
        self.enc1_3.bn1.weights = vgg19.features[15].weight
        self.enc1_3.conv2.weights = vgg19.features[17].weight
        self.enc1_3.bn2.weights = vgg19.features[18].weight
        self.enc1_4.conv1.weights = vgg19.features[27].weight
        self.enc1_4.bn1.weights = vgg19.features[28].weight
        self.enc1_4.conv2.weights = vgg19.features[30].weight
        self.enc1_4.bn2.weights = vgg19.features[31].weight
        self.enc1_5.conv1.weights = vgg19.features[33].weight
        self.enc1_5.bn1.weights = vgg19.features[34].weight
        self.enc1_5.conv2.weights = vgg19.features[36].weight
        self.enc1_5.bn2.weights = vgg19.features[37].weight
        del vgg19

        self.aspp1 = ASPP(512, 512)

        self.up = nn.Upsample(scale_factor=2, mode='bilinear')
        self.dec1_4 = VGGBlock(1024, 256, 256, False)
        self.dec1_3 = VGGBlock(512, 128, 128, False)
        self.dec1_2 = VGGBlock(256, 64, 64, False)
        self.dec1_1 = VGGBlock(128, 32, 32, False)

        self.output1 = output_block()

        self.enc2_1 = VGGBlock(3, 64, 64, True, True)
        self.enc2_2 = VGGBlock(64, 128, 128, True, True)
        self.enc2_3 = VGGBlock(128, 256, 256, True, True)
        self.enc2_4 = VGGBlock(256, 512, 512, True, True)
        self.enc2_5 = VGGBlock(512, 512, 512, True, True)

        self.aspp2 = ASPP(512, 512)

        self.dec2_4 = VGGBlock(1536, 256, 256, False, True)
        self.dec2_3 = VGGBlock(768, 128, 128, False, True)
        self.dec2_2 = VGGBlock(384, 64, 64, False, True)
        self.dec2_1 = VGGBlock(192, 32, 32, False, True)

        self.output2 = output_block()
def vgg19_bn(num_classes=1000, pretrained='imagenet'):
    """VGG 19-layer model (configuration 'E') with batch normalization
    """
    model = models.vgg19_bn(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg19_bn'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Пример #32
0
def select_model(name='vgg19'):
    if name == 'vgg19':
        model, feature_count = models.vgg19_bn(pretrained=True), 25088
    elif name == 'densenet161':
        model, feature_count = models.densenet161(pretrained=True), 2208
    elif name == 'densenet121':
        model, feature_count = models.densenet121(pretrained=True), 1024
    else:
        model, feature_count = models.alexnet(pretrained=True), 9216

    return model, feature_count
    def __init__(self, num_classes, pretrained=True):
        super(SegNet, self).__init__()
        vgg = models.vgg19_bn()
        if pretrained:
            vgg.load_state_dict(torch.load(vgg19_bn_path))
        features = list(vgg.features.children())
        self.enc1 = nn.Sequential(*features[0:7])
        self.enc2 = nn.Sequential(*features[7:14])
        self.enc3 = nn.Sequential(*features[14:27])
        self.enc4 = nn.Sequential(*features[27:40])
        self.enc5 = nn.Sequential(*features[40:])

        self.dec5 = nn.Sequential(
            *([nn.ConvTranspose2d(512, 512, kernel_size=2, stride=2)] +
              [nn.Conv2d(512, 512, kernel_size=3, padding=1),
               nn.BatchNorm2d(512),
               nn.ReLU(inplace=True)] * 4)
        )
        self.dec4 = _DecoderBlock(1024, 256, 4)
        self.dec3 = _DecoderBlock(512, 128, 4)
        self.dec2 = _DecoderBlock(256, 64, 2)
        self.dec1 = _DecoderBlock(128, num_classes, 2)
        initialize_weights(self.dec5, self.dec4, self.dec3, self.dec2, self.dec1)
Пример #34
0
        outputs = model(inputs)
        _, preds = torch.max(outputs.data, 1)

        for j in range(inputs.size()[0]):
            images_so_far += 1
            ax = plt.subplot(num_images // 2, 2, images_so_far)
            ax.axis('off')
            ax.set_title('predicted: {}'.format(class_names[preds[j]]))
            imshow(inputs.cpu().data[j])

            if images_so_far == num_images:
                return


model_ft = models.vgg19_bn(True)
mod = list(model_ft.classifier.children())
mod.pop()
mod.append(torch.nn.Linear(4096, 80))
new_classifier = torch.nn.Sequential(*mod)
model_ft.classifier = new_classifier


if use_gpu:
    model_ft = nn.DataParallel(model_ft, [0]).cuda()


model_ft = torch.load('model_pretrained_vgg19.pkl')
criterion = nn.CrossEntropyLoss()

# Observe that all parameters are being optimized