def get_params(example_index):
    """
        Gets used variables for almost all visualizations, like the image, model etc.

    Args:
        example_index (int): Image id to use from examples

    returns:
        original_image (numpy arr): Original image read from the file
        prep_img (numpy_arr): Processed image
        target_class (int): Target class for the image
        file_name_to_export (string): File name to export the visualizations
        pretrained_model(Pytorch model): Model to use for the operations
    """
    # Pick one of the examples
    example_list = [['../input_images/snake.jpg', 56],
                    ['../input_images/cat_dog.png', 243],
                    ['../input_images/spider.png', 72]]
    selected_example = example_index
    img_path = example_list[selected_example][0]
    target_class = example_list[selected_example][1]
    file_name_to_export = img_path[img_path.rfind('/')+1:img_path.rfind('.')]
    # Read image
    original_image = cv2.imread(img_path, 1)
    # Process image
    prep_img = preprocess_image(original_image)
    # Define model
    pretrained_model = models.alexnet(pretrained=True)
    return (original_image,
            prep_img,
            target_class,
            file_name_to_export,
            pretrained_model)
Exemple #2
0
 def __init__(self):
   super(AlexNetFc, self).__init__()
   model_alexnet = models.alexnet(pretrained=True)
   self.features = model_alexnet.features
   self.classifier = nn.Sequential()
   for i in range(6):
     self.classifier.add_module("classifier"+str(i), model_alexnet.classifier[i])
   self.__in_features = model_alexnet.classifier[6].in_features
def alexnet(num_classes=1000, pretrained='imagenet'):
    r"""AlexNet model architecture from the
    `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
    """
    model = models.alexnet(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['alexnet'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Exemple #4
0
 def __init__(self, pretrained_models = models.alexnet(pretrained=True) , input_shape = (3, 224, 224), num_class = 200, freeze_layers = range(5), replace_clf=True):
     super(pretrainedNetwork, self).__init__()
     self.features, self.classifier = pretrained_models.features, pretrained_models.classifier
     self.flat_fts = self.get_flat_fts(input_shape, self.features)
     if replace_clf: self.classifier = nn.Sequential(nn.Linear(self.flat_fts, 100),nn.Dropout(p=0.2),nn.ReLU(),nn.Linear(100, num_class))
     if freeze_layers is not None:
         for idx, layer in enumerate(chain(self.features.children(), self.classifier.children())):
             if idx in freeze_layers:
                 for p in layer.parameters(): p.requires_grad = False
     self.input_shape = input_shape
def alexnet(num_classes=1000, pretrained='imagenet'):
    r"""AlexNet model architecture from the
    `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
    """
    # https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
    model = models.alexnet(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['alexnet'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_alexnet(model)
    return model
Exemple #6
0
def select_model(name='vgg19'):
    if name == 'vgg19':
        model, feature_count = models.vgg19_bn(pretrained=True), 25088
    elif name == 'densenet161':
        model, feature_count = models.densenet161(pretrained=True), 2208
    elif name == 'densenet121':
        model, feature_count = models.densenet121(pretrained=True), 1024
    else:
        model, feature_count = models.alexnet(pretrained=True), 9216

    return model, feature_count
def test_fast_initialization_without_orthonormal(image_data):
    alexnet = models.alexnet(pretrained=False)
    pre_init_var = run_with_capture(alexnet, image_data)
    assert pre_init_var[0] >= 1000  # the first few pre-init variances are huge,
    assert pre_init_var[1] >= 100   # even larger than these conservative tests.

    tol = 0.1
    alexnet = apply_lsuv_init(alexnet, image_data, std_tol=tol, do_orthonorm=False, cuda=False)
    *post_init_var, final_var = run_with_capture(alexnet, image_data)
    for var in post_init_var:
        assert 2 <= var <= 4
    assert final_var == pytest.approx(1, tol**2)
Exemple #8
0
    def __init__(self, model=None):
        super(ModifiedVGG16Model, self).__init__()

        # model = models.vgg16(pretrained=True)
        model = models.alexnet(pretrained=True)
        self.features = model.features

        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(25088, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, 2))
def load_model(arch='vgg19', num_labels=102, hidden_units=4096):
    # Load a pre-trained model
    if arch=='vgg19':
        # Load a pre-trained model
        model = models.vgg19(pretrained=True)
    elif arch=='alexnet':
        model = models.alexnet(pretrained=True)
    else:
        raise ValueError('Unexpected network architecture', arch)
        
    # Freeze its parameters
    for param in model.parameters():
        param.requires_grad = False
    
    # Features, removing the last layer
    features = list(model.classifier.children())[:-1]
  
    # Number of filters in the bottleneck layer
    num_filters = model.classifier[len(features)].in_features

    # Extend the existing architecture with new layers
    features.extend([
        nn.Dropout(),
        nn.Linear(num_filters, hidden_units),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(hidden_units, hidden_units),
        nn.ReLU(True),
        nn.Linear(hidden_units, num_labels),
        ##nn.Softmax(dim=1) 
        # Please, notice Softmax layer has not been added as per Pytorch answer:
        # https://github.com/pytorch/vision/issues/432#issuecomment-368330817
        # It is not either included in its transfer learning tutorial:
        # https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
    ])
    
    model.classifier = nn.Sequential(*features)

    return model
 def __init__(self, requires_grad=False, pretrained=True):
     super(alexnet, self).__init__()
     alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features
     self.slice1 = torch.nn.Sequential()
     self.slice2 = torch.nn.Sequential()
     self.slice3 = torch.nn.Sequential()
     self.slice4 = torch.nn.Sequential()
     self.slice5 = torch.nn.Sequential()
     self.N_slices = 5
     for x in range(2):
         self.slice1.add_module(str(x), alexnet_pretrained_features[x])
     for x in range(2, 5):
         self.slice2.add_module(str(x), alexnet_pretrained_features[x])
     for x in range(5, 8):
         self.slice3.add_module(str(x), alexnet_pretrained_features[x])
     for x in range(8, 10):
         self.slice4.add_module(str(x), alexnet_pretrained_features[x])
     for x in range(10, 12):
         self.slice5.add_module(str(x), alexnet_pretrained_features[x])
     if not requires_grad:
         for param in self.parameters():
             param.requires_grad = False
def load_arch(arch):
    """
    Load a pretrained network
    """
    if arch == 'vgg16':
        model = models.vgg16(pretrained=True)
        input_size = 25088
    elif arch == 'alexnet':
        model = models.alexnet(pretrained=True)
        input_size = 9216
    elif arch == 'resnet18':
        model = models.resnet18(pretrained=True)
        input_size = 512
    elif arch == 'densenet121':
        model = models.densenet121(pretrained=True)
        input_size = 1024
    else:
        raise ValueError('Please choose one of \'vgg16\', \'alexnet\', \'resnet18\' or , \'densenet121\' for parameter arch.')
        
    for param in model.parameters():
        param.requires_grad = False
    
    return model, input_size
Exemple #12
0
    def __init__(self,
                 image_size,
                 image_features_size,
                 word_embedding,
                 words2ids,
                 ids2words,
                 lstm_hidden_size=256,
                 word_embedding_size=300,
                 cnn=models.alexnet(pretrained=True).features,
                 start_symbol=DEF_START,
                 end_symbol=DEF_SEND):
        """Init NN
            image_size - size of input image.
            lstm_hidden_size - size of cnn features output
            image_features_size - size of image features vector
            word_embedding - pretrained word embedding model
            words2ids - dictionary word -> id
            ids2words - dictionary id -> word
            cnn - pretrained cnn net (alexnet, vgg and other)
            start_symbol - symbol starting sequence
            end_symbol - symbol ending sequence
        """

        super(LSTM_W2V_Net_Cnn_Preload, self).__init__()
        self.image_size = image_size
        self.image_features_size = image_features_size
        #self.cnn = cnn
        #   self.cnn_comp_features = cnn_comp_features

        self.vocab_size = len(words2ids)
        print(self.vocab_size)

        self.word_embedding_size = word_embedding_size
        self.word_embedding = word_embedding

        self.words2ids = words2ids
        self.ids2words = ids2words

        self.start_symbol = start_symbol
        self.start_symbol_embed = torch.from_numpy(
            self.word_embedding[self.start_symbol])

        self.end_symbol = end_symbol
        self.end_symbol_embed = torch.from_numpy(
            self.word_embedding[self.end_symbol])

        #         self.sentence_end_symbol = sentence_end_symbol
        #         self.sentence_end_symbol_id = self.words2ids[self.sentence_end_symbol]

        #         if sentence_end_embed is not None:
        #             self.sentence_end_embed = sentence_end_embed
        #         else:
        #             self.sentence_end_embed = word_embeding['.']

        #self.max_sentence_len = max_sentence_len

        self.lstm_hidden_size = lstm_hidden_size

        # self.fc1 = nn.Sequential( nn.BatchNorm1d(self.image_features_size),
        #                           nn.Linear(self.image_features_size, int(self.image_features_size/2)),
        #                           nn.Dropout(0.001),
        #                           nn.ReLU(),
        #                           nn.Linear(int(self.image_features_size/2), int(self.image_features_size/4) ),
        #                           nn.Dropout(0.001),
        #                           nn.ReLU(),
        #                           nn.Linear(int(self.image_features_size/4), self.lstm_hidden_size),
        #                           nn.BatchNorm1d(self.lstm_hidden_size)
        #                         ).cuda()

        self.fc1 = nn.Sequential(
            nn.Linear(self.image_features_size, self.lstm_hidden_size)).cuda()

        self.lstm_cell = nn.LSTMCell(
            self.lstm_hidden_size + self.word_embedding_size,
            self.lstm_hidden_size).cuda()

        self.fc2 = nn.Sequential(
            nn.Linear(self.lstm_hidden_size, self.vocab_size),
            nn.LogSoftmax()).cuda()
from torchvision.models import alexnet

net = alexnet()
print(net)
def get_model(architecture_name, freeze_parameters):
    print("Initiating the Neural Network Model...\n")

    trained_model = None
    cleared_for_training = False

    #Load a Pretrained Network - Default vgg16
    if architecture_name == 'vgg16':
        trained_model = models.vgg16(pretrained=True)
        trained_model.name = 'vgg16'
        cleared_for_training = True
    elif architecture_name == 'resnet18':
        trained_model = models.resnet18(pretrained=True)
        trained_model.name = 'resnet18'
        cleared_for_training = True
    elif architecture_name == 'alexnet':
        trained_model = models.alexnet(pretrained=True)
        trained_model.name = 'alexnet'
        cleared_for_training = True
    elif architecture_name == 'squeezenet1':
        trained_model = models.squeezenet1_0(pretrained=True)
        trained_model.name = 'squeezenet1'
        cleared_for_training = False
    elif architecture_name == 'densenet161':
        trained_model = models.densenet161(pretrained=True)
        trained_model.name = 'densenet161'
        cleared_for_training = False
    elif architecture_name == 'inception':
        trained_model = models.inception_v3(pretrained=True)
        trained_model.name = 'inception_v3'
        cleared_for_training = False
    elif architecture_name == 'googlenet':
        trained_model = models.googlenet(pretrained=True)
        trained_model.name = 'googlenet'
        cleared_for_training = False
    elif architecture_name == 'shufflenet':
        trained_model = models.shufflenet_v2_x1_0(pretrained=True)
        trained_model.name = 'shufflenet_v2_x1_0'
        cleared_for_training = False
    elif architecture_name == 'mobilenet':
        trained_model = models.mobilenet_v2(pretrained=True)
        trained_model.name = 'mobilenet_v2'
        cleared_for_training = False
    elif architecture_name == 'resnext50':
        trained_model = models.resnext50_32x4d(pretrained=True)
        trained_model.name = 'resnext50_32x4d'
        cleared_for_training = False
    elif architecture_name == 'wide_resnet50':
        trained_model = models.wide_resnet50_2(pretrained=True)
        trained_model.name = 'wide_resnet50_2'
        cleared_for_training = False
    elif architecture_name == 'mnasnet1':
        trained_model = models.mnasnet1_0(pretrained=True)
        trained_model.name = 'mnasnet1_0'
        cleared_for_training = False
    else:
        print(
            "train.py - Function: get_model\nERROR:  Unknown model passed at the command line..."
        )

    if (cleared_for_training == True) and (freeze_parameters == True):
        #Freeze parameters to prevent training
        for param in trained_model.parameters():
            param.requires_grad = False

    return trained_model, cleared_for_training
Exemple #15
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    model_ft = None
    input_size = 0

    if model_name == 'resnet':
        model_ft = models.resnet152(pretrained=use_pretrained)  #50 or 152
        '''
        in case, feature_extract is true, set_parameter_requires_grad will set all grad parameters to false
        and because after this operation a new layer (the output one) is added, these new parameters will have grad as true
        '''

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.fc.in_features

        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'alexnet':
        model_ft = models.alexnet(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier[6].in_features

        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'vgg':
        model_ft = models.vgg11_bn(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier[6].in_features

        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'squeezenet':
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == 'densenet':

        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)

        input_size = 224

    elif model_name == 'inception':
        #inception v3
        model_ft = models.inception_v3(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        #Aux net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)

        #Primary Net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

        input_size = 299

    else:
        print('invalid model name')
        exit()

    return model_ft, input_size
        batch_size = batch_size,
        device = device,
        LE = LE,
        checkpoint_path = None,
        verbose = True
    )
    training.fit(epochs = 500, train_data = data_loaders['train'], val_data = data_loaders['val'], \
                 callbacks = [
                     MetricTracker(metrics = [
                         ('log_loss', metrics.log_loss),
                         ('accuracy_score', metrics.accuracy_score),
                         ('f1_score', partial(metrics.fbeta_score, beta = 2)),
                         # ('sk_accuracy_score', metrics.sk_accuracy_score),
                         # ('sk_f1_weighted', partial(metrics.sk_f1_score, average = 'weighted')),
                         # ('sk_f1_macro', partial(metrics.sk_f1_score, average='macro')),
                     ], save_folder_path = r'/data/PyTorch-Pipeline/logs/{}/MetricTracker/'.format(experiment_name)),
                     ProgressBar(show_batch_metrics = ['log_loss']),
                     # ModelCheckpoint(save_folder_path = r'/data/PyTorch-Pipeline/models/{}/'.format(experiment_name), metric = 'log_loss', best_metric_highest = False, best_only = False, write_frequency = 2, verbose = True),
                     TensorBoard(log_dir = r'/data/PyTorch-Pipeline/logs/{}/TensorBoard/'.format(experiment_name), update_frequency = 1),
                     EarlyStopping(save_folder_path=r'/data/PyTorch-Pipeline/models/{}/'.format(experiment_name), metric='accuracy_score', best_metric_highest=True, best_only=True, write_frequency=2, patience=10, min_delta =0, verbose=True),

                 ])

    return


if __name__ == '__main__':

    model = pretrainedNetwork(pretrained_models = models.alexnet(pretrained=True), input_shape = (3, 224, 224), freeze_layers=range(15), replace_clf=True, num_class=2)
    model_evaluation(path = '/data/DreamPhant/datasets', model = model, experiment_name = r'test7')
    print 'done'
Exemple #17
0
import torch
from torchvision import models
from torchvision import transforms
from PIL import Image

# Change pretrained from false-true to pretrain or vice-versa
alexnet = models.alexnet(pretrained=True)

# Transform for image to fit into alexnet
transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

img = Image.open("WelshCorgi.jpeg")

img_t = transform(img)
batch_t = torch.unsqueeze(img_t, 0)

# Uncomment to show the neuralnet details
# print(alexnet)

# Code to run Alexnet in eval
alexnet.eval()
output = alexnet(batch_t)

#  Get the classes from the imagenet file to be used
with open('classes.txt') as f:
    labels = [line.strip() for line in f.readlines()]
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these variables is model specific.
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        """ Resnet18
        """
        model_ft = models.resnet18(pretrained=use_pretrained)
        if feature_extract == True:
            set_parameter_requires_grad(model_ft)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        if feature_extract == True:
            set_parameter_requires_grad(model_ft)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        if feature_extract == True:
            set_parameter_requires_grad(model_ft)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        if feature_extract == True:
            set_parameter_requires_grad(model_ft)
        model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        """ Densenet
        """
        model_ft = models.densenet121(pretrained=use_pretrained)
        if feature_extract == True:
            set_parameter_requires_grad(model_ft)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        if feature_extract == True:
            set_parameter_requires_grad(model_ft)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs,num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size
Exemple #19
0
import time

import torch
from torchvision import models
from torch import nn
import torchvision.transforms as transforms
from torch.autograd import Variable

import PIL
from PIL import Image

orignal_models = models.alexnet(pretrained=True)


class AlexnetConv4(nn.Module):
    def __init__(self):
        super(AlexnetConv4, self).__init__()
        self.features = nn.Sequential(
            *list(orignal_models.features.children())[:-3])

    def forward(self, x):
        x = self.features(x)
        return (x)


model = AlexnetConv4()
print(model)

image_transforms = transforms.Compose([
    transforms.Scale((256, 256), PIL.Image.NEAREST),
    transforms.CenterCrop(224),
Exemple #20
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    model_ft = None
    input_size = 0
    # 其他网络要求输入为224,inception要求输入为299
    if model_name == "resnet":
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # 处理辅助网络
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # 处理主网络
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size
import ast
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
import torchvision.models as models
from torch import __version__

resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
vgg16 = models.vgg16(pretrained=True)

models = {'resnet': resnet18, 'alexnet': alexnet, 'vgg': vgg16}

# obtain ImageNet labels
with open('imagenet1000_clsid_to_human.txt') as imagenet_classes_file:
    imagenet_classes_dict = ast.literal_eval(imagenet_classes_file.read())

def classifier(img_path, model_name):
    # load the image
    img_pil = Image.open(img_path)

    # define transforms
    preprocess = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    # preprocess the image
    img_tensor = preprocess(img_pil)
Exemple #22
0
def display_net():
    net_from_web = alexnet()
    net_from_torchvision = models.alexnet()

    print(net_from_web)
    print(net_from_torchvision)
 def pretrained_state_dict(cls):
     weights = alexnet(pretrained=True).state_dict().values()
     return dict(zip(cls().state_dict().keys(), weights))
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        """ Resnet18
        """
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        """ Densenet
        """
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes) 
        input_size = 224

    elif model_name == "inception":
        """ Inception v3 
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs,num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()
    
    return model_ft, input_size
Exemple #25
0
import torch
import torchvision.models as models

models = {
    "alexnet": {
        "model": models.alexnet(pretrained=True),
        "path": "both"
    },
    "vgg16": {
        "model": models.vgg16(pretrained=True),
        "path": "both"
    },
    "squeezenet": {
        "model": models.squeezenet1_0(pretrained=True),
        "path": "both"
    },
    "densenet": {
        "model": models.densenet161(pretrained=True),
        "path": "both"
    },
    "inception_v3": {
        "model": models.inception_v3(pretrained=True),
        "path": "both"
    },
    #"googlenet": models.googlenet(pretrained=True),
    "shufflenet": {
        "model": models.shufflenet_v2_x1_0(pretrained=True),
        "path": "both"
    },
    "mobilenet_v2": {
        "model": models.mobilenet_v2(pretrained=True),
Exemple #26
0
from pytorch_utils import *  

# Test
import torchvision.models as models
model = models.alexnet()
print(model)
print('\n\n#### WITH torch_summarize: ####')
print(torch_summarize(model))

# # Output
# AlexNet (
#   (features): Sequential (
#     (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2)), weights=((64, 3, 11, 11), (64,)), parameters=23296
#     (1): ReLU (inplace), weights=(), parameters=0
#     (2): MaxPool2d (size=(3, 3), stride=(2, 2), dilation=(1, 1)), weights=(), parameters=0
#     (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)), weights=((192, 64, 5, 5), (192,)), parameters=307392
#     (4): ReLU (inplace), weights=(), parameters=0
#     (5): MaxPool2d (size=(3, 3), stride=(2, 2), dilation=(1, 1)), weights=(), parameters=0
#     (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), weights=((384, 192, 3, 3), (384,)), parameters=663936
#     (7): ReLU (inplace), weights=(), parameters=0
#     (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), weights=((256, 384, 3, 3), (256,)), parameters=884992
#     (9): ReLU (inplace), weights=(), parameters=0
#     (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), weights=((256, 256, 3, 3), (256,)), parameters=590080
#     (11): ReLU (inplace), weights=(), parameters=0
#     (12): MaxPool2d (size=(3, 3), stride=(2, 2), dilation=(1, 1)), weights=(), parameters=0
#   ), weights=((64, 3, 11, 11), (64,), (192, 64, 5, 5), (192,), (384, 192, 3, 3), (384,), (256, 384, 3, 3), (256,), (256, 256, 3, 3), (256,)), parameters=2469696
#   (classifier): Sequential (
#     (0): Dropout (p = 0.5), weights=(), parameters=0
#     (1): Linear (9216 -> 4096), weights=((4096, 9216), (4096,)), parameters=37752832
#     (2): ReLU (inplace), weights=(), parameters=0
#     (3): Dropout (p = 0.5), weights=(), parameters=0
def fine_tune_l2transfer(dataset_path,
                         model_path,
                         exp_dir,
                         batch_size=100,
                         num_epochs=100,
                         lr=0.0004,
                         reg_lambda=100,
                         init_freeze=0,
                         weight_decay=0,
                         saving_freq=5):
    """
    IMM pipeline, only using L2-transfer technique and weight transfer.

    reg_params is dictionary which looks like:
    - param tensors
    - param weights/backup: tensor(){omega=[one-vectors], init_val=[weights prev task net]}
    - lambda = the regularization hyperparameter used

    :param reg_lambda:  reg hyperparam for the L2-transfer
    """
    print('lr is ' + str(lr))

    ########################################
    # DATASETS
    ########################################
    dsets = torch.load(dataset_path)
    dset_loaders = {
        x: torch.utils.data.DataLoader(dsets[x],
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=8,
                                       pin_memory=True)
        for x in ['train', 'val']
    }
    dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
    dset_classes = dsets['train'].classes

    ########################################
    # LOAD INIT MODEL
    ########################################
    resume = os.path.join(exp_dir, 'epoch.pth.tar')

    if os.path.isfile(resume):
        checkpoint = torch.load(resume)
        model_ft = checkpoint['model']
        print("=> RESUMING FROM CHECKPOINTED MODEL: ", resume)
    else:
        if not os.path.isfile(model_path):
            model_ft = models.alexnet(pretrained=True)
            print("=> STARTING PRETRAINED ALEXNET")

        else:
            model_ft = torch.load(model_path)
            print("=> STARTING FROM OTHER MODEL: ", model_path)

    # Replace last layer classifier, for the amount of classes in the current dataset
    if not init_freeze:
        # Alexnet vs VGG
        last_layer_index = len(model_ft.classifier) - 1
        num_ftrs = model_ft.classifier[last_layer_index].in_features
        model_ft.classifier._modules[str(last_layer_index)] = nn.Linear(
            num_ftrs, len(dset_classes))

    if not os.path.exists(exp_dir):
        os.makedirs(exp_dir)

    # If not resuming from an preempted IMM model, cleanup model
    if not os.path.isfile(resume):

        # Prev task model, last 2 hyperparam
        parameters = list(model_ft.parameters())
        parameter1 = parameters[-1]
        parameter2 = parameters[-2]

        # Try to remove them from the reg_params
        try:
            model_ft.reg_params.pop(parameter1, None)
            model_ft.reg_params.pop(parameter2, None)
        except:
            print('nothing to remove')

        # The regularization params are the parameters of the prev model (trying to)
        reg_params = update_reg_params(model_ft)
        print('update')
        reg_params['lambda'] = reg_lambda  # The regularization hyperparam
        model_ft.reg_params = reg_params

    # Only transfer here to CUDA, preventing non-cuda network adaptations
    use_gpu = torch.cuda.is_available()
    if use_gpu:
        model_ft = model_ft.cuda()

    ########################################
    # TRAIN
    ########################################
    # Define Optimizer for IMM: extra loss term in step
    optimizer_ft = train_L2transfer.Weight_Regularized_SGD(
        model_ft.parameters(), lr, momentum=0.9, weight_decay=weight_decay)
    criterion = nn.CrossEntropyLoss()  # Loss
    model_ft, acc = train_L2transfer.train_model(model_ft,
                                                 criterion,
                                                 optimizer_ft,
                                                 lr,
                                                 dset_loaders,
                                                 dset_sizes,
                                                 use_gpu,
                                                 num_epochs,
                                                 exp_dir,
                                                 resume,
                                                 saving_freq=saving_freq)
    return model_ft, acc