def __init__(self):
        super(Dunet, self).__init__()
        
        vgg13 = models.vgg13(pretrained=True)

        self.conv1 = vgg13.features[0]
        self.conv2 = vgg13.features[2]
        self.conv3 = vgg13.features[5]
        self.conv4 = vgg13.features[7]
        self.conv5 = vgg13.features[10]
        self.conv6 = vgg13.features[12]
        
        self.dilate_center = Dblock(512)

        self.up3 = self.conv_stage(512, 256)
        self.up2 = self.conv_stage(256, 128)
        self.up1 = self.conv_stage(128, 64)
        
        self.trans3 = self.upsample(512, 256)
        self.trans2 = self.upsample(256, 128)
        self.trans1 = self.upsample(128, 64)
        
        self.conv_last = nn.Sequential(
            nn.Conv2d(64, 1, 3, 1, 1),
            nn.Sigmoid()
        )
        
        self.max_pool = nn.MaxPool2d(2)
        
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                if m.bias is not None:
                    m.bias.data.zero_()
def vgg13(num_classes=1000, pretrained='imagenet'):
    """VGG 13-layer model (configuration "B")
    """
    model = models.vgg13(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg13'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Esempio n. 3
0
def getNetwork(args):
    if (args.net_type == 'alexnet'):
        net = models.alexnet(pretrained=args.finetune)
        file_name = 'alexnet'
    elif (args.net_type == 'vggnet'):
        if (args.depth == 11):
            net = models.vgg11(pretrained=args.finetune)
        elif (args.depth == 13):
            net = models.vgg13(pretrained=args.finetune)
        elif (args.depth == 16):
            net = models.vgg16(pretrained=args.finetune)
        elif (args.depth == 19):
            net = models.vgg19(pretrained=args.finetune)
        else:
            print(
                'Error : VGGnet should have depth of either [11, 13, 16, 19]')
            sys.exit(1)
        file_name = 'vgg-%s' % (args.depth)
    elif (args.net_type == 'densenet'):
        if (args.depth == 121):
            net = models.densenet121(pretrained=args.finetune)
        elif (args.depth == 161):
            net = models.densenet161(pretrained=args.finetune)
        elif (args.depth == 169):
            net = models.densenet169(pretrained=args.finetune)
        file_name = 'densenet-%s' % (args.depth)
    elif (args.net_type == 'resnet'):
        net = networks.resnet(args.finetune, args.depth)
        file_name = 'resnet-%s' % (args.depth)
    elif (args.net_type == 'xception'):
        net = pretrainedmodels.xception(num_classes=1000,
                                        pretrained='imagenet')
        file_name = 'xception'
    elif (args.net_type == 'inception'):
        net = models.inception_v3(num_classes=1000, pretrained=args.finetune)
        file_name = 'inception'
    else:
        print(
            'Error : Network should be either [alexnet / vggnet / resnet / densenet]'
        )
        sys.exit(1)

    return net, file_name
Esempio n. 4
0
def build_network(fc_model, trained_model, dropout=0.5, device='cuda'):

    model = None

    if (trained_model == 'vgg16'):
        model = models.vgg16(pretrained=True)
    elif (trained_model == 'resnet18'):
        model = models.resnet18(pretrained=True)
    elif (trained_model == 'vgg13'):
        model = models.vgg13(pretrained=True)

    for param in model.parameters():
        param.requires_grad = False

    classifier = nn.Sequential(fc_model)
    model.classifier = classifier
    model.to(device)
    print("Done - {} Network with Classifier... ".format(trained_model))
    return model
def create_base_model(arch, hidden_units, class_to_idx):
    print(arch)
    if arch =='vgg13':
        model = models.vgg13(pretrained=True)
    elif arch == 'vgg16':
        model = models.vgg16(pretrained=True) 
    elif arch == 'vgg19':
        model = models.vgg19(pretrained=True) 
    else:
        raise AssertionError('Architecture not recognized: choose among vgg13, vgg16 and vgg19')
        
    model.classifier = nn.Sequential(OrderedDict([
                              ('fc1', nn.Linear(25088, hidden_units)),
                              ('relu', nn.ReLU()),
                              ('fc2', nn.Linear(hidden_units, 102)),
                              ('output', nn.LogSoftmax(dim=1))
                              ]))
    model.class_to_idx = class_to_idx
    return model
def checkpoint_loading(filename):
    
    check = torch.load(filename)
    model = models.vgg13(pretrained=True)
    
    for param in model.parameters():
        param.requires_grad = False
    model.class_to_idx = check['class_to_idx']
    classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(25088, 2960,  bias=True)),
                                                ('Relu1', nn.ReLU()),
                                                ('Dropout1', nn.Dropout(p = 0.5)),
                                                ('fc2', nn.Linear(2960, 102,  bias=True)),
                                                ('output', nn.LogSoftmax(dim=1))
                                                 ]))
    
    model.classifier = classifier
    model.load_state_dict(checkpoint['state_dict'])
    
    return model
Esempio n. 7
0
def load_and_rebuild_model(nn_checkpoint):

    checkpoint = torch.load(nn_checkpoint)
    arch = checkpoint['arch']

    if arch == 'vgg13':
        model = models.vgg13(pretrained=True)
    elif arch == 'alexnet':
        model = models.alexnet(pretrained=True)
    else:
        print(
            "{} is not a correct architecture. Please choose either vgg13 or alexnet."
            .format(arch))

    model.classifier = checkpoint['classifier']
    model.load_state_dict = checkpoint['state_dict']
    model.class_to_idx = checkpoint['class_to_idx']

    return model
Esempio n. 8
0
def load_checkpoint(filepath):
    checkpoint = torch.load(filepath)

    #Checkpoint
    if checkpoint['arch'] == 'vgg13':
        model = models.vgg13(pretrained=True)
    else:
        model = models.vgg16(pretrained=True)

    #Freezing our fetures grediance
    for param in model.parameters():
        param.requires_grad = False

    #from checkpoint
    model.classifier = checkpoint['classifier']
    model.load_state_dict(checkpoint['state_dict'])
    model.class_to_idx = checkpoint['class_to_idx']

    return model
Esempio n. 9
0
def load_checkpoint(filepath):
    checkpoint = torch.load(filepath)
    arch = checkpoint['arch']
    
    model_vgg13 = models.vgg13(pretrained=True)
    model_densenet121 = models.densenet121(pretrained=True)
    
    aval_models = {'vgg13': model_vgg13, 'densenet121': model_densenet121}
    model = aval_models[arch]
    
    for param in model.parameters():
        param.requires_grad=False
    
    model.classifier = checkpoint['classifier']  # NOTE: we need to checkpoint our classfier as well, otherwise an error of mismatch classifier will occur
    
    model.load_state_dict(checkpoint['state_dict'])
    model.class_to_idx = checkpoint['class_to_idx']
    
    return model
Esempio n. 10
0
def model_arch(arch, hidden_units):
    if arch == 'vgg16':
        model = models.vgg16(pretrained=True)
    else:
        model = models.vgg13(pretrained=True)

    for parameters in model.parameters():
        parameters.requires_grad = False

    classifier = nn.Sequential(nn.Linear(25088, hidden_units),
                               nn.ReLU(inplace=True), nn.Dropout(p=0.2),
                               nn.Linear(hidden_units, hidden_units),
                               nn.ReLU(inplace=True), nn.Dropout(p=0.2),
                               nn.Linear(hidden_units, 102),
                               nn.LogSoftmax(dim=1))

    model.classifier = classifier

    return model
Esempio n. 11
0
def InitializeModel(architecture):
    switcher = {
        'vgg13': models.vgg13(pretrained=True),
        'vgg16': models.vgg16(pretrained=True),
        'vgg19': models.vgg19(pretrained=True),
        'resnet18': models.resnet18(pretrained=True),
        'resnet34': models.resnet34(pretrained=True),
        'resnet50': models.resnet50(pretrained=True),
        'resnet101': models.resnet101(pretrained=True),
        'resnet152': models.resnet152(pretrained=True),
        'squeezenet1.0': models.squeezenet1_0(pretrained=True),
        'squeezenet1.1': models.squeezenet1_1(pretrained=True),
        'densenet121': models.densenet121(pretrained=True),
        'densenet169': models.densenet169(pretrained=True),
        'densenet161': models.densenet161(pretrained=True),
        'densenet201': models.densenet201(pretrained=True)
    }
    model = switcher.get(architecture, "Invalid argument for architecture")
    return model
Esempio n. 12
0
def load_model(architecture, device, hidden_units):
    if architecture == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif architecture == 'vgg13':
        model = models.vgg13(pretrained=True)

    for param in model.parameters():
        param.requires_grad = False

    classifier = nn.Sequential(
        OrderedDict([('fc1', nn.Linear(25088, 12544)), ('relu1', nn.ReLU()),
                     ('fc2', nn.Linear(12544, hidden_units)),
                     ('relu2', nn.ReLU()),
                     ('fc3', nn.Linear(hidden_units, 102)),
                     ('output', nn.LogSoftmax(dim=1))]))

    model.classifier = classifier
    model.to(device)
    return model
Esempio n. 13
0
def nn_network(structure='vgg13',dropout=0.5, hidden_layer1 = 120,lr = 0.001,power='gpu'):
    '''
    Arguments: The architecture for the network(alexnet,densenet121,vgg13), the hyperparameters for the network (hidden layer 1 nodes, dropout and learning rate) and whether to use gpu or not
    Returns: The set up model, along with the criterion and the optimizer fo the Training

    '''

    if structure == 'vgg13':
        model = models.vgg13(pretrained=True)
    elif structure == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif structure == 'densenet121':
        model = models.densenet121(pretrained=True)
    elif structure == 'alexnet':
        model = models.alexnet(pretrained = True)
    else:
        print("Im sorry but {} is not a valid model.Did you mean vgg13,vgg16,densenet121,or alexnet?".format(structure))

    for param in model.parameters():
        param.requires_grad = False

        from collections import OrderedDict
        classifier = nn.Sequential(OrderedDict([
            ('dropout',nn.Dropout(dropout)),
            ('inputs', nn.Linear(arch[structure], hidden_layer1)),
            ('relu1', nn.ReLU()),
            ('hidden_layer1', nn.Linear(hidden_layer1, 90)),
            ('relu2',nn.ReLU()),
            ('hidden_layer2',nn.Linear(90,80)),
            ('relu3',nn.ReLU()),
            ('hidden_layer3',nn.Linear(80,102)),
            ('output', nn.LogSoftmax(dim=1))
                          ]))


        model.classifier = classifier
        criterion = nn.NLLLoss()
        optimizer = optim.Adam(model.classifier.parameters(), lr )

        if torch.cuda.is_available() and power == 'gpu':
            model.cuda()

        return model, criterion, optimizer
Esempio n. 14
0
def get_model(model_name):
    if model_name == 'alexnet':
        model = models.alexnet(pretrained=True)
    elif model_name == 'vgg19_bn':
        model = models.vgg19_bn(pretrained=True)
    elif model_name == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif model_name == 'vgg13':
        model = models.vgg13(pretrained=True)
    elif model_name == 'vgg11':
        model = models.vgg11(pretrained=True)
    else:
        print("\nError: selecting default vgg16\n")
        model = models.vgg16(pretrained=True)

    for param in model.parameters():
        param.requires_grad = False

    return model
Esempio n. 15
0
def create_model(hidden_units, input_size, output_size, dropout_rate,
                 selected_model):

    print('Creating Model...')
    #Load in VGG Model by default
    if (selected_model == 'vgg13'):
        model = models.vgg13(pretrained=True)  #input 25088
        input_size = 25088

    if (selected_model == 'alexnet'):
        model = models.alexnet(pretrained=True)  #input 9216
        input_size = 9216

    #Freeze parameters
    for param in model.parameters():
        param.requires_grad = False

    #creates the OrderedDict to be used in the classifer creation based on initial parameters
    classifier_struct = OrderedDict([('fc0',
                                      nn.Linear(input_size, hidden_units[0])),
                                     ('dropout0', nn.Dropout(dropout_rate)),
                                     ('relu0', nn.ReLU())])

    for i in range(len(hidden_units)):
        if i == (len(hidden_units) - 1):
            classifier_struct[('fc' + str(i + 1))] = nn.Linear(
                int(hidden_units[i]), output_size)
            classifier_struct['output'] = nn.LogSoftmax(dim=1)
        else:
            classifier_struct[('fc' + str(i + 1))] = nn.Linear(
                int(hidden_units[i]), hidden_units[i + 1])
            classifier_struct[('dropout' +
                               str(i + 1))] = nn.Dropout(dropout_rate)
            classifier_struct[('relu' + str(i + 1))] = nn.ReLU()

    #Create replacment classifer
    classifier = nn.Sequential(classifier_struct)

    #Create the model & load in the new classifer
    model.classifier = classifier

    print('Creating Model...Finished')
    return model
def creat_model(pre_model_arch, hidden_layers_size, input_size, output_size):

    # Create the network, adjust classifier define the criterion and optimizer
    if pre_model_arch == "vgg13":
        model_flower_imgs = models.vgg13(pretrained=True)
    elif pre_model_arch == "vgg16":
        model_flower_imgs = models.vgg16(pretrained=True)
    else:
        assert 0, "Architecture isn't supported"

    # Freeze parameters so we don't backprop through them
    for param in model_flower_imgs.parameters():
        param.requires_grad = False

    # Update the classifier by desired conf.
    model_flower_imgs.classifier = creat_classifier(input_size, output_size,
                                                    hidden_layers_size)

    return model_flower_imgs
Esempio n. 17
0
def build_model(arch, inputs, hidden_units, output, rate):
    """
    This function builds the model to be used in training or predition using
    the parameters passed
    Params:
        arch - Model architecture/type to be used
        hidden_units - Number of hidden units to be used in classifier
    Returns:
        model - Model with replace classifier and freezed gradients
    """
    fc1_input = inputs if inputs else 25088
    fc2_output = output if output else 102
    dp_rate = rate if rate else 0.2

    #Check if arch is valid and load the correct model
    model_options = ["vgg11", "vgg13", "vgg19"]
    if arch in model_options:
        print("Building model ---> arch: {}".format(arch))
        if arch == "vgg19":
            model = models.vgg19(pretrained=True)
        elif arch == "vgg13":
            model = models.vgg13(pretrained=True)
        else:
            model = models.vgg11(pretrained=True)
    else:
        print("Invalid model: {} --> Using arch: vgg11".format(arch))
        model = models.vgg11(pretrained=True)

    #Freezes gradients
    for param in model.parameters():
        param.requires_grad = False

    #Build classifier and replace it in model
    classifier = nn.Sequential(
        OrderedDict([("fc1", nn.Linear(fc1_input, hidden_units)),
                     ("relu", nn.ReLU()), ("drop", nn.Dropout(p=dp_rate)),
                     ("fc2", nn.Linear(hidden_units, fc2_output)),
                     ("output", nn.LogSoftmax(dim=1))]))

    model.classifier = classifier

    return model
Esempio n. 18
0
def build_network(hidden_layers, drop_p, model='vgg19'):
    if model == 'vgg11':
        model = models.vgg11(pretrained=True)
    elif model == 'vgg13':
        model = models.vgg13(pretrained=True)
    elif model == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif model == 'vgg19':
        model = models.vgg19(pretrained=True)
    else:
        print("Please load a VGG network")
        return False

    # Freeze parameters so we don't backprop through them
    for param in model.parameters():
        param.requires_grad = False

    # Add the classifier
    # Started from the lesson, but I tried to adapt to a large variety of hidden layers
    classifier_data = OrderedDict()
    for i in range(len(hidden_layers)):
        classifier_data['dropout' + str(i + 1)] = nn.Dropout(drop_p)

        if (i == 0):
            classifier_data['fc' + str(i + 1)] = nn.Linear(
                25088, hidden_layers[i])
        elif (i < len(hidden_layers)):
            classifier_data['fc' + str(i + 1)] = nn.Linear(
                hidden_layers[i - 1], hidden_layers[i])
        else:
            classifier_data['fc' + str(i + 1)] = nn.Linear(
                hidden_layers[i], 102)
            #classifier_data['fc' + str(i+1)] = nn.Linear(hidden_layers[i], 15)
        if (i < len(hidden_layers)):
            classifier_data['relu' + str(i + 1)] = nn.ReLU()

    # add softmax layer
    classifier_data['output'] = nn.LogSoftmax(dim=1)
    classifier = nn.Sequential(classifier_data)

    model.classifier = classifier
    return model
Esempio n. 19
0
def create_model(arch, hidden_features):

    if arch == "vgg11":
        model = models.vgg11(pretrained=True)
    elif arch == "vgg11_bn":
        model = models.vgg11_bn(pretrained=True)
    elif arch == "vgg13":
        model = models.vgg13(pretrained=True)
    elif arch == "vgg13_bn":
        model = models.vgg13_bn(pretrained=True)
    elif arch == "vgg16":
        model = models.vgg16(pretrained=True)
    elif arch == "vgg16_bn":
        model = models.vgg16_bn(pretrained=True)
    elif arch == "densenet121":
        model = models.densenet121(pretrained=True)
    elif arch == "densenet161":
        model = models.densenet161(pretrained=True)
    else:
        print("architecture not supported.")
        exit()

    # turn off gradient back propagation
    for param in model.parameters():
        param.requires_grad = False

    if arch in ["vgg11", "vgg11_bn", "vgg13", "vgg13_bn", "vgg16", "vgg16_bn"]:
        in_features = model.classifier[0].in_features
    elif arch in ["densenet121", "densenet161"]:
        in_features = model.classifier.in_features

    if hidden_features < 102 or hidden_features > in_features:
        print("please choose a number between {in_features} and 102")
        exit()

    classifier = nn.Sequential(nn.Linear(in_features, hidden_features),
                               nn.ReLU(), nn.Dropout(p=0.2),
                               nn.Linear(hidden_features, 102),
                               nn.LogSoftmax(dim=1))

    model.classifier = classifier
    return (model)
Esempio n. 20
0
def load_model_from_checkpoint(checkpoint_filepath):
    checkpoint = torch.load(checkpoint_filepath)
    if checkpoint['model_arch'] == 'vgg19':
        model = models.vgg19(pretrained=True)
        arch = 'vgg19'
    elif checkpoint['model_arch'] == 'vgg13':
        model = models.vgg13(pretrained=True)
        arch = 'vgg13'
    elif checkpoint['model_arch'] == 'vgg16':
        model = models.vgg16(pretrained=True)
        arch = 'vgg16'
    else:
        print('The models architecture is not recognized')
    for param in model.parameters():
        param.requires_grad = False
    model.class_to_idx = checkpoint['class_to_idx']
    model.classifier = Classifier(checkpoint['hidden_units'])
    model.load_state_dict(checkpoint['state_dict'])
    #model.to(checkpoint['model_device'])
    return model
Esempio n. 21
0
def Network(hidden_layers):
    # Check architecture
    if args.arch == 'vgg16':
        model = models.vgg16(pretrained=True)
    else:
        model = models.vgg13(pretrained=True)
    # Freeze parameters
    for param in model.parameters():
        param.requires_grad = False
    # Defining classifier
    classifier = nn.Sequential(
        OrderedDict([('fc1', nn.Linear(25088, hidden_layers[0])),
                     ('relu', nn.ReLU()), ('drop', nn.Dropout(p=0.5)),
                     ('fc2', nn.Linear(hidden_layers[0], hidden_layers[1])),
                     ('relu2', nn.ReLU()), ('drop2', nn.Dropout(p=0.5)),
                     ('fc3', nn.Linear(hidden_layers[1], 102)),
                     ('output', nn.LogSoftmax(dim=1))]))
    # Replacing classifier
    model.classifier = classifier
    return model
Esempio n. 22
0
def load_checkpoint(checkpoint_path):
   
    checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc:storage)
    model = models.vgg13(pretrained=True)
    for param in model.parameters():
        param.requires_grad=False
    for param in model.classifier.parameters():
        param.requires_grad= True
    model.class_to_idx = checkpoint['class_to_idx']
    model.classifier = nn.Sequential(
                            nn.Linear(25088, 4096), 
                            nn.ReLU(), 
                            nn.Dropout(0.4),
                            nn.Linear(4096, 102),                   
                            nn.LogSoftmax(dim=1))
    
    model.load_state_dict(checkpoint['model_state_dict'], strict=False)
    model.eval()
    
    return model
Esempio n. 23
0
    def build_network(self):
        model_select = {
            "vgg13": models.vgg13(pretrained=True),
            "vgg11": models.vgg11(pretrained=True)
        }
        model = model_select[self.arch]
        print(model)

        for param in model.parameters():
            param.requires_grad = False

        classifier = nn.Sequential(
            nn.Linear(model.classifier[0].in_features, self.hidden_layer),
            nn.ReLU(), nn.Dropout(0.05),
            nn.Linear(self.hidden_layer, self.output_size),
            nn.LogSoftmax(dim=1))
        model.classifier = classifier
        filename = 'model_base.pkl'
        pickle.dump(model, open(filename, 'wb'))
        return model, classifier
Esempio n. 24
0
def generate_model(arch, units):
    if (arch == "vgg13"):
        model = models.vgg13(pretrained=True)
        input_size = model.classifier[0].in_features
    elif (arch == "vgg16"):
        model = models.vgg16(pretrained=True)
        input_size = model.classifier[0].in_features
    elif (arch == "densenet121"):
        model = models.densenet121(pretrained=True)
        input_size = model.classifier.in_features
    else:
        print("error")
        exit()
    classifier_dict = OrderedDict([("fc1",nn.Linear(input_size, units)),
                                    ("relu1", nn.ReLU()),
                                    ("dropout", nn.Dropout(0.2)),
                                    ("fc2", nn.Linear(units,102)),
                                    ("p_out", nn.LogSoftmax(dim=1))])
    model.classifier = nn.Sequential(classifier_dict)
    return model, classifier_dict
def load_checkpoint():

    checkpoint = torch.load("checkpoint.pth")
    structure = checkpoint['architecture']

    if structure == 'vgg13':
        model = models.vgg13(pretrained=True)
        model.name = "vgg13"
    else:
        a = {}
        exec("model = models.{}(pretrained=True)".format(structure), globals(),
             a)
        model = a["model"]
        model.name = structure

    model.state_dict(checkpoint['state_dict'])
    model.classifier = checkpoint['classifier']
    model.class_to_idx = checkpoint['class_to_idx']

    return model
Esempio n. 26
0
def load_checkpoint(path):

    checkpoint = torch.load(path)

    if checkpoint['arch'] == 'vgg13':
        model = models.vgg13(pretrained=True)
    else:  #vgg11
        model = models.vgg11(pretrained=True)

    for param in model.parameters():
        param.requires_grad = False

    model.classifier = checkpoint['classifier']
    model.class_to_idx = checkpoint['class_to_idx']
    model.load_state_dict(checkpoint['state_dict'])
    optimizer = checkpoint['optimizer']
    optimizer.load_state_dict(checkpoint['optimizer_dict'])
    model.idx_to_class = {v: k for k, v in model.class_to_idx.items()}

    return model
def load_saved_model(filename):
    checkpoint_dict = torch.load(filename)
    if checkpoint_dict["model_used"] == "vgg11":
        model_check = models.vgg11(pretrained=True)
    elif checkpoint_dict["model_used"] == "vgg13":
        model_check = models.vgg13(pretrained=True)
    elif checkpoint_dict["model_used"] == "vgg16":
        model_check = models.vgg16(pretrained=True)
    elif checkpoint_dict["model_used"] == "vgg19":
        model_check = models.vgg19(pretrained=True)

    for param in model_check.parameters():
        param.requires_grad = False

    optimizer = checkpoint_dict["optimizer"]
    model_check.classifier = checkpoint_dict["classifier"]
    model_check.load_state_dict(checkpoint_dict["state_dict"])
    optimizer.load_state_dict(checkpoint_dict["optimizer_dict"])
    model_check.class_to_idx = checkpoint_dict["class_to_idx"]
    return model_check
Esempio n. 28
0
def getModel(output_units,
             hidden_units,
             arch='vgg16',
             class_to_idx=None,
             learning_rate=1e-3,
             criterion=NLLLoss,
             optimizer=Adam):
    #Choosing between the models
    if arch == 'vgg13':
        model = vgg13(pretrained=True)
    else:
        model = vgg16(pretrained=True)

    #Turning off all the pretrained parameters, since we don't want to retrain them
    for param in model.parameters():
        param.requires_grad = False

    #We define our new classifier, with input that match the default vgg16 classifier,
    #and output equals to the number of classes
    classifier = Sequential(
        OrderedDict([
            ('fc1', Linear(25088, hidden_units)),
            ('act1', ReLU()),
            ('Dropout1', Dropout(p=0.5)),
            ('fc2', Linear(hidden_units, hidden_units)),
            ('act2', ReLU()),
            ('Dropout2', Dropout(p=0.5)),
            ('fc3', Linear(hidden_units, output_units)),
            ('output', LogSoftmax(dim=1)),
        ]))
    #we deattach the default vgg16 classeifer and plug the one
    model.classifier = classifier
    model.class_to_idx = class_to_idx

    criterion = criterion()
    optimizer = optimizer(model.classifier.parameters(), lr=learning_rate)
    model.criterion = criterion
    model.optimizer = optimizer
    model.arch = arch

    return model
def load_checkpoint(file_path, use_this_device):

    print('- Loading checkpoint from file:',file_path)
    print('- Using device:', use_this_device)

    checkpoint = torch.load(file_path, map_location = use_this_device)

    model_arch = checkpoint['model_arch']

    if model_arch == 'vgg19':
        model = models.vgg19(pretrained=True)
    elif model_arch == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif model_arch == 'vgg13':
        model = models.vgg13(pretrained = True)
    else:
        print('- Model architecture {} unsupported: using vgg19 instead.'.format(model_arch))
        model = models.vgg19(pretrained = True)

    # Freeze parameters!
    for param in model.parameters():
        param.requires_grad = False

    # Recreate the classifier
    from collections import OrderedDict
    classifier = nn.Sequential(OrderedDict([
                          ('fc1', nn.Linear(25088, checkpoint['hidden_units'])), # model has 25088 in_features
                          ('relu', nn.ReLU()),
                          ('fc2', nn.Dropout(p = 0.5)), # this is defined in train.py
                          ('fc2', nn.Linear(checkpoint['hidden_units'], 102)), # model has 102 out_features
                          ('output', nn.LogSoftmax(dim=1))
                          ]))
    model.classifier = classifier

    # Load model state
    model.load_state_dict(checkpoint['model_state_dict'])
    
    # Load class to idx
    model.class_to_idx = checkpoint['class_to_idx']

    return model
def load_model(name,
               num_classes,
               input_height=64,
               input_width=64,
               num_of_channels=3):
    if name == 'COAPModNet':
        net = COAPModNet(num_classes=num_classes)
    elif name == 'COAPNet':
        net = COAPNet(num_classes=num_classes)
    elif name == 'SimpleNet':
        net = SimpleNet(num_classes=num_classes)
    elif name == 'AlexNet':
        net = models.AlexNet(num_classes=num_classes)
    elif name == 'PlanktonNet':
        net = PlanktonNet(num_classes=num_classes)
    elif name == 'ResNet18':
        net = models.resnet18(num_of_channels, num_classes)
    elif name == 'ResNet34':
        net = models.resnet34(num_of_channels, num_classes)
    elif name == 'ResNet50':
        net = models.resnet50(num_of_channels, num_classes)
    elif name == 'ResNet101':
        net = models.resnet101(num_of_channels, num_classes)
    elif name == 'ResNet152':
        net = models.resnet152(num_of_channels, num_classes)
    elif name == 'VGGNet11':
        net = models.vgg11(num_classes=num_classes)
    elif name == 'VGGNet13':
        net = models.vgg13(num_classes=num_classes)
    elif name == 'VGGNet16':
        net = models.vgg16(num_classes=num_classes)
    elif name == 'VGGNet19':
        net = models.vgg19(num_classes=num_classes)
    elif name == 'ResNext50':
        net = models.resnext50_32x4d(num_classes=num_classes)
    elif name == 'ResNext101':
        net = models.resnext101_32x8d(num_classes=num_classes)
    elif name == 'GoogLeNet':
        net = models.GoogLeNet(num_classes=num_classes)

    return net
Esempio n. 31
0
def create_model(model_name, hidden_units, learning_rate, device):
    if model_name == 'vgg13':
        model = models.vgg13(pretrained=True)
    elif model_name == 'vgg16':
        model = models.vgg16(pretrained=True)
    else:
        model = models.vgg19(pretrained=True)

    for param in model.parameters():
        param.requires_grad = False

    model.classifier = nn.Sequential(nn.Linear(25088, hidden_units), nn.ReLU(),
                                     nn.Dropout(0.25),
                                     nn.Linear(hidden_units, 102),
                                     nn.LogSoftmax(dim=1))

    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)

    model.to(device)
    return model, criterion, optimizer
def load_checkpoint(path):
    checkpoint = torch.load(path, map_location="cpu")
    
    if checkpoint['arch'] == 'vgg11':
        model = models.vgg11(pretrained=True)
    elif checkpoint['arch'] == 'vgg13':
        model = models.vgg13(pretrained=True)
    elif checkpoint['arch'] == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif checkpoint['arch'] == 'vgg19':
        model = models.vgg19(pretrained=True)
    
    for param in model.parameters():
        param.requires_grad = False
        
    model.features = checkpoint['features']
    model.classifier = checkpoint['classifier']
    model.class_to_idx = checkpoint['class_to_idx']
    model.load_state_dict(state_dict=checkpoint['state_dict'])
        
    return model