def inceptionv3(num_classes=1000, pretrained='imagenet'):
    r"""Inception v3 model architecture from
    `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
    """
    model = models.inception_v3(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['inceptionv3'][pretrained]
        model = load_pretrained(model, num_classes, settings)

    # Modify attributs
    model.last_linear = model.fc
    del model.fc

    def features(self, input):
        # 299 x 299 x 3
        x = self.Conv2d_1a_3x3(input) # 149 x 149 x 32
        x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32
        x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64
        x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64
        x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80
        x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192
        x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192
        x = self.Mixed_5b(x) # 35 x 35 x 256
        x = self.Mixed_5c(x) # 35 x 35 x 288
        x = self.Mixed_5d(x) # 35 x 35 x 288
        x = self.Mixed_6a(x) # 17 x 17 x 768
        x = self.Mixed_6b(x) # 17 x 17 x 768
        x = self.Mixed_6c(x) # 17 x 17 x 768
        x = self.Mixed_6d(x) # 17 x 17 x 768
        x = self.Mixed_6e(x) # 17 x 17 x 768
        if self.training and self.aux_logits:
            self._out_aux = self.AuxLogits(x) # 17 x 17 x 768
        x = self.Mixed_7a(x) # 8 x 8 x 1280
        x = self.Mixed_7b(x) # 8 x 8 x 2048
        x = self.Mixed_7c(x) # 8 x 8 x 2048
        return x

    def logits(self, features):
        x = F.avg_pool2d(features, kernel_size=8) # 1 x 1 x 2048
        x = F.dropout(x, training=self.training) # 1 x 1 x 2048
        x = x.view(x.size(0), -1) # 2048
        x = self.last_linear(x) # 1000 (num_classes)
        if self.training and self.aux_logits:
            aux = self._out_aux
            self._out_aux = None
            return x, aux
        return x

    def forward(self, input):
        x = self.features(input)
        x = self.logits(x)
        return x
        
    # Modify methods
    setattr(model.__class__, 'features', features)
    setattr(model.__class__, 'logits', logits)
    setattr(model.__class__, 'forward', forward)  
    return model
def inceptionv3(num_classes=1000, pretrained='imagenet'):
    r"""Inception v3 model architecture from
    `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
    """
    model = models.inception_v3(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['inceptionv3'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Ejemplo n.º 3
0
 def __init__(self):
     super(INCEPTION_V3, self).__init__()
     self.model = models.inception_v3()
     url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
     # print(next(model.parameters()).data)
     state_dict = \
         model_zoo.load_url(url, map_location=lambda storage, loc: storage)
     self.model.load_state_dict(state_dict)
     for param in self.model.parameters():
         param.requires_grad = False
     print('Load pretrained model from ', url)
Ejemplo n.º 4
0
 def __init__(self, model):
     super(feature_net, self).__init__()
     if model == 'vgg':
         vgg = models.vgg19(pretrained=True)
         self.feature == nn.Sequential(*list(vgg.children())[:-1])
         self.feature.add_module('global average', nn.AvgPool2d(9))
     elif model == 'inceptionv3':
         inception = models.inception_v3(pretrained=True)
         self.feature = nn.Sequential(*list(inception.children())[:-1])
         self.feature._modules.pop('13')
         self.feature.add_modules('global average', nn.AvgPool2d(35))
     elif model == 'resnet152':
         resnet = models.resnet152(pretrained=True)
         self.feature = nn.Sequential(*list(resnet.children())[:-1])
Ejemplo n.º 5
0
    def get_FID_scores(self):
        fake_features_list = []
        real_features_list = []

        #self.gen.eval()

        #CHECK OP PÅ DEN HER ift. hvad vi har
        #OBS OBS OBS
        n_samples = self.numberOfSamples  # The total number of samples
        batch_size = self.batchsize  # Samples per iteration

        inception_model = inception_v3(pretrained=False)
        #find pretrained downlaoded model
        if self.config.run_polyaxon:
            finalPath = self.config.data_path / 'models' / 'OutputModels' / 'inception_v3_google-1a9a5a14.pth'
        else:
            localdir = Path().absolute().parent
            modelOutputPath = Path.joinpath(localdir, 'OutputModels')
            finalPath = Path.joinpath(modelOutputPath,
                                      'inception_v3_google-1a9a5a14.pth')

        inception_model.load_state_dict(torch.load(finalPath))
        inception_model.to(self.device)
        inception_model = inception_model.eval()  # Evaluation mode
        inception_model.fc = torch.nn.Identity()

        cur_samples = 0
        Fake_dataloader_Iterations = iter(self.fakes)
        with torch.no_grad(
        ):  # You don't need to calculate gradients here, so you do this to save memory
            try:
                if self.TCI:
                    for real_example, target in tqdm(
                            self.reals,
                            total=n_samples // batch_size,
                            disable=self.config.run_polyaxon):  # Go by batch
                        real_samples = real_example
                        real_features = inception_model(
                            real_samples.to(self.device)).detach().to(
                                'cpu')  # Move features to CPU
                        real_features_list.append(real_features)

                        # Usikker på om det her dur
                        # https://stackoverflow.com/questions/53280967/pytorch-nextitertraining-loader-extremely-slow-simple-data-cant-num-worke
                        fake_samples = next(Fake_dataloader_Iterations)
                        # fake_samples = self.preprocess(self.gen(fake_samples))
                        fake_features = inception_model(fake_samples[0].to(
                            self.device)).detach().to('cpu')
                        fake_features_list.append(fake_features)
                        cur_samples += len(real_samples)
                        if cur_samples >= n_samples:
                            break
                else:
                    for real_example in tqdm(
                            self.reals,
                            total=n_samples // batch_size,
                            disable=self.config.run_polyaxon):  # Go by batch
                        real_samples = real_example
                        real_features = inception_model(
                            real_samples.to(self.device)).detach().to(
                                'cpu')  # Move features to CPU
                        real_features_list.append(real_features)

                        # Usikker på om det her dur
                        # https://stackoverflow.com/questions/53280967/pytorch-nextitertraining-loader-extremely-slow-simple-data-cant-num-worke
                        fake_samples = next(Fake_dataloader_Iterations)
                        # fake_samples = self.preprocess(self.gen(fake_samples))
                        fake_features = inception_model(
                            fake_samples.to(self.device)).detach().to('cpu')
                        fake_features_list.append(fake_features)
                        cur_samples += len(real_samples)
                        if cur_samples >= n_samples:
                            break
            except:
                print("Error in FID loop")

        #Combine all features of loop
        fake_features_all = torch.cat(fake_features_list)
        real_features_all = torch.cat(real_features_list)
        #Get sigma, my
        self.mu_fake = torch.mean(fake_features_all,
                                  0)  # ,True)#,dim=0,keepdim=True)
        self.mu_real = torch.mean(real_features_all,
                                  0)  # ,True)#,dim=0,keepdim=True)
        self.sigma_fake = self.get_covariance(fake_features_all)
        self.sigma_real = self.get_covariance(real_features_all)

        with torch.no_grad():
            FID_Distance = self.frechet_distance(self.mu_real, self.mu_fake,
                                                 self.sigma_real,
                                                 self.sigma_fake).item()
        #If you want to visualize uncomment this
        #self.visualizeResults()
        return FID_Distance
Ejemplo n.º 6
0
 def __init__(self, device, model='resnet34', workers=4, batch_size=64):
     '''
     model: inception_v3, vgg13, vgg16, vgg19, resnet18, resnet34,
            resnet50, resnet101, or resnet152
     '''
     self.model = model
     self.batch_size = batch_size
     self.workers = workers
     if self.model.find('vgg') >= 0:
         self.vgg = getattr(models, model)(pretrained=True).to(device).eval()
         self.trans = transforms.Compose([
             transforms.Resize(224),
             transforms.ToTensor(),
             transforms.Normalize((0.485, 0.456, 0.406),
                                  (0.229, 0.224, 0.225)),
         ])
     elif self.model.find('resnet') >= 0:
         resnet = getattr(models, model)(pretrained=True)
         resnet.to(device).eval()
         resnet_feature = nn.Sequential(resnet.conv1, resnet.bn1,
                                        resnet.relu,
                                        resnet.maxpool, resnet.layer1,
                                        resnet.layer2, resnet.layer3,
                                        resnet.layer4).to(device).eval()
         self.resnet = resnet
         self.resnet_feature = resnet_feature
         self.trans = transforms.Compose([
             transforms.Resize(224),
             transforms.ToTensor(),
             transforms.Normalize((0.485, 0.456, 0.406),
                                  (0.229, 0.224, 0.225)),
         ])
     elif self.model == 'inception' or self.model == 'inception_v3':
         inception = models.inception_v3(
             pretrained=True, transform_input=False).to(device).eval()
         inception_feature = nn.Sequential(inception.Conv2d_1a_3x3,
                                           inception.Conv2d_2a_3x3,
                                           inception.Conv2d_2b_3x3,
                                           nn.MaxPool2d(3, 2),
                                           inception.Conv2d_3b_1x1,
                                           inception.Conv2d_4a_3x3,
                                           nn.MaxPool2d(3, 2),
                                           inception.Mixed_5b,
                                           inception.Mixed_5c,
                                           inception.Mixed_5d,
                                           inception.Mixed_6a,
                                           inception.Mixed_6b,
                                           inception.Mixed_6c,
                                           inception.Mixed_6d,
                                           inception.Mixed_7a,
                                           inception.Mixed_7b,
                                           inception.Mixed_7c,
                                           ).to(device).eval()
         self.inception = inception
         self.inception_feature = inception_feature
         self.trans = transforms.Compose([
             transforms.Resize(299),
             transforms.ToTensor(),
             transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
         ])
     else:
         raise NotImplementedError
import torch
from torch import nn
from torchvision import models
import numpy as np
import cv2


class AbandonDetector(nn.module):
    def __init__():
        super(AbandonDetector, self).__init__()
        self.inception = models.inception_v3(pretrained=True)


inception = models.inception_v3(pretrained=True)
inception = inception.cuda()
frames = np.load('data/tensor0.npy')

batch = []
for i in range(10):
    frame = cv2.resize(frames[i], (299, 299), cv2.INTER_AREA)
    batch.append(frame)
batch = np.stack(batch)
batch = torch.FloatTensor(batch).transpose(1, 3).cuda()

out = inception(batch)
print(out)
Ejemplo n.º 8
0
def get_dcnn(arch, num_classes, pretrained):
    if arch == 'resnet18':
        model = models.resnet18(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            model.apply(weights_init)
    elif arch == 'myresnet18':
        model = myresnet.myresnet18(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            model.apply(weights_init)
    elif arch == 'resnet34':
        model = models.resnet34(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            model.apply(weights_init)
    elif arch == 'myresnet34':
        model = myresnet.myresnet34(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            model.apply(weights_init)
    elif arch == 'resnet50':
        model = models.resnet50(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            print('initializing the resnet50 ...')
            model.apply(weights_init)
    elif arch == 'resnet101':
        model = models.resnet101(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            model.apply(weights_init)
    elif arch == 'myresnet101':
        model = myresnet.myresnet101(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            model.apply(weights_init)
    elif arch == 'resnet152':
        model = models.resnet152(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            model.apply(weights_init)
    elif arch == 'myresnet152':
        model = myresnet.myresnet152(pretrained=pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        if not pretrained:
            model.apply(weights_init)
    elif arch == 'vgg16':
        model = models.vgg16(pretrained=pretrained)
        mod = list(model.classifier.children())[:-1] + [
            torch.nn.Linear(4096, num_classes)
        ]
        new_classifier = torch.nn.Sequential(*mod)
        model.classifier = new_classifier
    elif arch == 'vgg19':
        model = models.vgg19(pretrained=pretrained)
        mod = list(model.classifier.children())[:-1] + [
            torch.nn.Linear(4096, num_classes)
        ]
        new_classifier = torch.nn.Sequential(*mod)
        model.classifier = new_classifier
        # model = nets.Vgg19(num_classes, pretrained=bool(args.pretrained), bn_after_act=False)
    elif arch == 'inception_v3':
        model = models.inception_v3(pretrained=pretrained,
                                    transform_input=True)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)

        model.AuxLogits.fc = nn.Linear(model.AuxLogits.fc.in_features,
                                       num_classes)
    elif arch == 'inception_v3_dropout':
        model = models.inception_v3(pretrained=pretrained,
                                    transform_input=True)
        num_ftrs = model.fc.in_features  # 2048

        model.fc = nn.Sequential(
            nn.Linear(num_ftrs, num_ftrs),
            nn.BatchNorm1d(num_ftrs),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5, inplace=False),
            # True will cause: "one of the variables needed for gradient computation
            #   has been modified by an inplace operation"
            nn.Linear(num_ftrs, num_classes),
        )

        model.AuxLogits.fc = nn.Linear(model.AuxLogits.fc.in_features,
                                       num_classes)
    elif arch == 'inception_v3_alldrop':
        model = my_inception.my_inception_v3(pretrained=pretrained,
                                             transform_input=True)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)

        model.AuxLogits.fc = nn.Linear(model.AuxLogits.fc.in_features,
                                       num_classes)
    else:
        print('No {}!'.format(arch))
    return model
Ejemplo n.º 9
0
    def __init__(self,
                 output_blocks=[3],
                 resize_input=True,
                 normalize_input=True,
                 requires_grad=False,
                 use_fid_inception=True):
        """Build pretrained InceptionV3

        Parameters
        ----------
        output_blocks : list of int
            Indices of blocks to return features of. Possible values are:
                - 0: corresponds to output of first max pooling
                - 1: corresponds to output of second max pooling
                - 2: corresponds to output which is fed to aux classifier
                - 3: corresponds to output of final average pooling
        resize_input : bool
            If true, bilinearly resizes input to width and height 299 before
            feeding input to model. As the network without fully connected
            layers is fully convolutional, it should be able to handle inputs
            of arbitrary size, so resizing might not be strictly needed
        normalize_input : bool
            If true, scales the input from range (0, 1) to the range the
            pretrained Inception network expects, namely (-1, 1)
        requires_grad : bool
            If true, parameters of the model require gradients. Possibly useful
            for finetuning the network
        use_fid_inception : bool
            If true, uses the pretrained Inception model used in Tensorflow's
            FID implementation. If false, uses the pretrained Inception model
            available in torchvision. The FID Inception model has different
            weights and a slightly different structure from torchvision's
            Inception model. If you want to compute FID scores, you are
            strongly advised to set this parameter to true to get comparable
            results.
        """
        super(InceptionV3, self).__init__()

        self.resize_input = resize_input
        self.normalize_input = normalize_input
        self.output_blocks = sorted(output_blocks)
        self.last_needed_block = max(output_blocks)

        assert self.last_needed_block <= 3, \
            'Last possible output block index is 3'

        self.blocks = nn.ModuleList()

        if use_fid_inception:
            inception = fid_inception_v3()
        else:
            inception = models.inception_v3(pretrained=True)

        # Block 0: input to maxpool1
        block0 = [
            inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3,
            inception.Conv2d_2b_3x3,
            nn.MaxPool2d(kernel_size=3, stride=2)
        ]
        self.blocks.append(nn.Sequential(*block0))

        # Block 1: maxpool1 to maxpool2
        if self.last_needed_block >= 1:
            block1 = [
                inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3,
                nn.MaxPool2d(kernel_size=3, stride=2)
            ]
            self.blocks.append(nn.Sequential(*block1))

        # Block 2: maxpool2 to aux classifier
        if self.last_needed_block >= 2:
            block2 = [
                inception.Mixed_5b,
                inception.Mixed_5c,
                inception.Mixed_5d,
                inception.Mixed_6a,
                inception.Mixed_6b,
                inception.Mixed_6c,
                inception.Mixed_6d,
                inception.Mixed_6e,
            ]
            self.blocks.append(nn.Sequential(*block2))

        # Block 3: aux classifier to final avgpool
        if self.last_needed_block >= 3:
            block3 = [
                inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c,
                nn.AdaptiveAvgPool2d(output_size=(1, 1))
            ]
            self.blocks.append(nn.Sequential(*block3))

        for param in self.parameters():
            param.requires_grad = requires_grad
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        """ Resnet18
        """
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        """ Densenet
        """
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes) 
        input_size = 224

    elif model_name == "inception":
        """ Inception v3 
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs,num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()
    
    return model_ft, input_size
Ejemplo n.º 11
0
import math
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.models import inception_v3

net = inception_v3(pretrained=True).cuda()


def inception_score(images, batch_size=5):
    scores = []
    for i in range(int(math.ceil(float(len(images)) / float(batch_size)))):
        batch = Variable(
            torch.cat(images[i * batch_size:(i + 1) * batch_size], 0))
        s, _ = net(batch)  # skipping aux logits
        scores.append(s)
    p_yx = F.softmax(torch.cat(scores, 0), 1)
    p_y = p_yx.mean(0).unsqueeze(0).expand(p_yx.size(0), -1)
    KL_d = p_yx * (torch.log(p_yx) - torch.log(p_y))
    final_score = KL_d.mean()
    return final_score


if __name__ == '__main__':
    if softmax is None:
        _init_inception()

    def get_images(filename):
        return scipy.misc.imread(filename)

    for iterations in range(100, 5000, 100):
Ejemplo n.º 12
0
def set_model(model_name, num_classes):
    if model_name == 'resnext50_32x4d':
        model = models.resnext50_32x4d(pretrained=True)
        model.fc = torch.nn.Sequential(torch.nn.Linear(in_features=2048, out_features=num_classes))
    elif model_name == 'resnet18':
        model = models.resnet18(pretrained=True)
        model.fc = torch.nn.Linear(in_features=512, out_features=num_classes)
    elif model_name == 'resnet34':
        model = models.resnet34(pretrained=True)
        model.fc = torch.nn.Linear(in_features=512, out_features=num_classes)
    elif model_name == 'resnet50':
        model = models.resnet50(pretrained=True)
        model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes)
    elif model_name == 'vgg16':
        model = models.vgg16(pretrained=True)
        model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes)
    elif model_name == 'densenet121':
        model = models.densenet121(pretrained=True)
        model.classifier = torch.nn.Linear(in_features=1024, out_features=num_classes)
    elif model_name == 'densenet161':
        model = models.densenet161(pretrained=True)
        model.classifier = torch.nn.Linear(in_features=2208, out_features=num_classes)
    elif model_name == 'inception':
        model = models.inception_v3(pretrained=True)
        model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes)
    elif model_name == 'googlenet':
        model = models.googlenet(pretrained=True)
        model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes)
    elif model_name == 'shufflenet_v2_x0_5':
        model = models.shufflenet_v2_x0_5(pretrained=True)
        model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes)
    elif model_name == 'shufflenet_v2_x1_0':
        model = models.shufflenet_v2_x1_0(pretrained=True)
        model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes)
    elif model_name == 'mobilenet_v2':
        model = models.mobilenet_v2(pretrained=True)
        model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'mobilenet_v3_large':
        model = models.mobilenet_v3_large(pretrained=True)
        model.classifier[3] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'mobilenet_v3_small':
        model = models.mobilenet_v3_small(pretrained=True)
        model.classifier[3] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'wide_resnet50_2':
        model = models.wide_resnet50_2(pretrained=True)
        model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes)
    elif model_name == 'mnasnet0_5':
        model = models.mnasnet0_5(pretrained=True)
        model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'mnasnet1_0':
        model = models.mnasnet1_0(pretrained=True)
        model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'alexnet':
        model = models.alexnet(pretrained=True)
        model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes)
    elif model_name == 'vgg19_bn':
        model = models.vgg19_bn(pretrained=True)
        model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes)    
    elif model_name == 'efficientnet-b0':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b1':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b2':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b3':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b4':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b5':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b6':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b7':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    else:
        raise NameError(f'!!!!! Model ERROR : {model_name} !!!!!')
    return model
Ejemplo n.º 13
0
     "path": "both"
 },
 "vgg16": {
     "model": models.vgg16(pretrained=True),
     "path": "both"
 },
 "squeezenet": {
     "model": models.squeezenet1_0(pretrained=True),
     "path": "both"
 },
 "densenet": {
     "model": models.densenet161(pretrained=True),
     "path": "both"
 },
 "inception_v3": {
     "model": models.inception_v3(pretrained=True),
     "path": "both"
 },
 #"googlenet": models.googlenet(pretrained=True),
 "shufflenet": {
     "model": models.shufflenet_v2_x1_0(pretrained=True),
     "path": "both"
 },
 "mobilenet_v2": {
     "model": models.mobilenet_v2(pretrained=True),
     "path": "both"
 },
 "resnext50_32x4d": {
     "model": models.resnext50_32x4d(pretrained=True),
     "path": "both"
 },
Ejemplo n.º 14
0
def main():

    data_root = '/home/jsk/s/prcv/dataset'
    data_dir = os.path.join(data_root, 'v2')
    dataset_dir = '/home/jsk/s/prcv/dataset/v4/dataset'
    gt_path = './txt/clean_dataset_v2_5010_50.txt'

    pathlist = []

    indexes = os.listdir(dataset_dir)
    indexes.sort()
    for index in indexes:
        dire = os.path.join(dataset_dir, index)
        pathlist.append(dire)
    print('total image is %d' % (len(pathlist)))
    #print(pathlist[394:405])

    if model_str[:9] == 'inception':
        size1 = 299
        size2 = 299
    else:
        size1 = 256
        size2 = 224
    data_transforms = transforms.Compose([
        transforms.Resize(size1),
        transforms.CenterCrop(size2),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    image_datasets = customData(img_path=dataset_dir,
                                txt_path=(gt_path),
                                data_transforms=data_transforms)
    testloader = torch.utils.data.DataLoader(image_datasets,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=32)

    dataset_sizes = len(image_datasets)
    #assert dataset_sizes==len(pathlist),'pytorch dataset is not equal to pathlist'

    device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")

    model = {  #'resnet18':models.resnet18(pretrained=False), 
        'resnet50': models.resnet50(pretrained=False),
        'resnet101': models.resnet101(pretrained=False),
        #'resnet152':models.resnet152(pretrained=False),
        #'densenet121':models.densenet121(pretrained=False),
        'densenet161': models.densenet161(pretrained=False),
        #'densenet169':models.densenet169(pretrained=False),
        'densenet201': models.densenet201(pretrained=False),
        'inception_v3': models.inception_v3(pretrained=False),
        'vgg16_bn': models.vgg16_bn(pretrained=False),
        'vgg19_bn': models.vgg19_bn(pretrained=False)
    }

    param = {
        'v3_8': 'inception_v3_epoch30_all_batch8_SGD_0.001.pkl',
        '16_bn': 'vgg16_bn_epoch30_all_batch8_SGD_0.001.pkl',
        '101_8': 'resnet101_epoch26_ft_batch8.pkl',
        '161_8': 'densenet161_epoch30_all_batch8_SGD_0.001.pkl',
        '201_8': 'densenet201_epoch30_all_batch8_SGD_0.002.pkl',
        '50_8_sgd': 'resnet50_epoch30_ft_batch8.pkl',
        '50_8_adam': 'resnet50_epoch30_all_batch8_Adam_6e-05.pkl',
        '101_4': 'resnet101_epoch30_all_batch4_SGD_0.0008.pkl',
        'v3_4': 'inception_v3_epoch30_all_batch4_SGD_0.001.pkl',
        '161_4': 'densenet161_epoch30_all_batch4_SGD_0.0008.pkl'
    }

    model_test = model[model_str]

    if model_str[:6] == 'resnet':
        num_ftrs = model_test.fc.in_features
        model_test.fc = nn.Linear(num_ftrs, num_classes)
    elif model_str[:8] == 'densenet':
        num_ftrs = model_test.classifier.in_features
        model_test.classifier = nn.Linear(num_ftrs, num_classes)
    elif model_str[:9] == 'inception':
        num_ftrs = model_test.fc.in_features
        model_test.fc = nn.Linear(num_ftrs, num_classes)
    elif model_str[:3] == 'vgg':
        num_ftrs = model_test.classifier[6].in_features
        model_test.classifier[6] = nn.Linear(num_ftrs, num_classes)
    else:
        raise ValueError('choose the available')

    print(model_test)
    print('current mode: %s' % (TEST_OR_COPY))
    print('test param %s of model %s' % (param[param_str], model_str))

    param_dir = os.path.join(root, param[param_str])
    model_test.load_state_dict(torch.load(param_dir))
    model_test = model_test.to(device)
    model_test.eval()

    correct = 0
    correct_1 = 0
    correct_5 = 0
    top1 = 0
    top5 = 0
    total = 0
    batch = 0
    num1 = 0
    num2 = 0

    filename = param[param_str] + '.txt'
    file_txt = open(filename, 'w')
    with torch.no_grad():
        index = 0
        for inputs, labels in testloader:
            inputs = inputs.to(device)
            labels = labels.to(device)
            outputs = model_test(inputs)

            #_, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            #correct += (predicted == labels).sum().item()
            #print(type(inputs))
            assert inputs.size()[0] == 1
            img = pathlist[index]
            index += 1
            itop1, itop5 = stand_output(outputs, topk=(1, 5))
            #top1_list.append(itop1)
            #top5_list.append(itop5)
            print(
                img.split('/')[-1] + ' top1: ' + str(itop1) + ' top5: ' +
                " ".join(str(x) for x in itop5))
            str1 = img.split('/')[-1] + ' top1: ' + str(
                itop1) + ' top5: ' + " ".join(str(x) for x in itop5) + '\n'
            file_txt.writelines(str1)

            dcorrect_1, dcorrect_5 = accuracy(outputs, labels, topk=(1, 5))

            correct_1 += dcorrect_1
            correct_5 += dcorrect_5
            top1 = correct_1.float() / total
            top5 = correct_5.float() / total

            batch += 1
            print('batch %d  label %d  correct %d' %
                  (batch, labels.item(), dcorrect_1.item()))
            #print('batch %d accuracy: %.3f %%' % (batch,100.*correct/total))
            #print('batch %d top1 accuracy: %.3f %% top5 accuracy: %.3f %%' % (batch,100*top1,100*top5))
    print(
        'Accuracy of the %s on the %d test images: top1 %.3f %%  top5 %.3f %%'
        % (param[param_str], total, 100 * top1, 100 * top5))
    file_txt.close()
Ejemplo n.º 15
0
            plt.subplot(num_samples,batch_size,j*batch_size+i+1)
            img_clip = np.clip(img[i], 0, 1)
            plt.imshow(img_clip.numpy().transpose((1, 2, 0)))
        img, label = next(iter(train_dl))
    #plt.show()

    ## Load pre-trained model
    cache_dir = os.path.expanduser(os.path.join('~', '.torch'))
    if not os.path.exists(cache_dir):
        os.makedirs(cache_dir)
    models_dir = os.path.join(cache_dir, 'models')
    if not os.path.exists(models_dir):
        os.makedirs(models_dir)
    
    use_gpu = torch.cuda.is_available()
    model_conv = models.inception_v3(pretrained=True)
    print("Use GPU: ", use_gpu)
    
    ## freeze the first few layers. This is done in two stages:
    freeze_layers = True
    
    # Stage-1 Freezing all the layers
    if freeze_layers:
        for i, param in model_conv.named_parameters():
            param.requires_grad = False

        # Stage-2 , Freeze all the layers till "Conv2d_4a_3*3"
        ct = []
        for name, child in model_conv.named_children():
            if "Conv2d_4a_3x3" in ct:
                for params in child.parameters():
Ejemplo n.º 16
0
def Encode(use_cuda):
    enc = inception_v3(True)
    if use_cuda: enc = enc.cuda()
    return enc
                    epoch)
                torch.save(model, PATH)

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model


base_model = models.inception_v3(pretrained=True)
for param in base_model.parameters():
    param.requires_grad = False

#num_ftrs = base_model.AuxLogits.fc.in_features
#base_model.AuxLogits.fc = nn.Linear(num_ftrs, len(class_names))

num_ftrs = base_model.fc.in_features
#base_model.fc = nn.Linear(num_ftrs,len(class_names))

base_model.fc = nn.Sequential(nn.Linear(num_ftrs, 1024, bias=True), nn.ReLU(),
                              nn.Linear(1024, len(class_names), bias=True))

epochs = 10
model = base_model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
Ejemplo n.º 18
0
    def __init__(self,
                 num_classes=100,
                 network='resnet18',
                 pretrained=True,
                 dropout_rate=0.2,
                 num_domain_classes=None):
        super(BasicResNet, self).__init__()

        if network == 'resnet152':
            resnet_model = models.resnet152(pretrained=pretrained)
        elif network == 'resnet101':
            resnet_model = models.resnet101(pretrained=pretrained)
        elif network == 'resnet50':
            resnet_model = models.resnet50(pretrained=pretrained)
        elif network == 'resnet34':
            resnet_model = models.resnet34(pretrained=pretrained)
        elif network == 'resnet18':
            resnet_model = models.resnet18(pretrained=pretrained)
        elif network == 'MobileNetV2':
            resnet_model = models.mobilenet_v2(pretrained=pretrained)
        elif network == 'inceptionV3':
            resnet_model = models.inception_v3(pretrained=pretrained)
        elif network == 'mnasnet':
            resnet_model = models.mnasnet1_0(pretrained=pretrained)
        elif network == 'VGG':
            resnet_model = models.vgg11(pretrained=pretrained)
        else:
            raise Exception("{} model type not supported".format(network))

        self.num_domain_classes = num_domain_classes
        self.resnet_model = resnet_model

        if 'resnet' in network:
            if num_domain_classes is None:
                self.resnet_model.fc = nn.Sequential(
                    nn.BatchNorm1d(resnet_model.fc.in_features),
                    nn.Dropout(dropout_rate),
                    nn.Linear(resnet_model.fc.in_features,
                              resnet_model.fc.in_features),
                    nn.ReLU(),
                    nn.BatchNorm1d(resnet_model.fc.in_features),
                    nn.Dropout(dropout_rate),
                    nn.Linear(resnet_model.fc.in_features,
                              resnet_model.fc.in_features),
                    nn.ReLU(),
                    nn.BatchNorm1d(resnet_model.fc.in_features),
                    nn.Dropout(dropout_rate),
                    nn.Linear(resnet_model.fc.in_features, num_classes),
                )
            else:
                self.resnet_model.fc = nn.Sequential(
                    nn.BatchNorm1d(resnet_model.fc.in_features),
                    nn.Dropout(dropout_rate),
                    nn.Linear(resnet_model.fc.in_features,
                              resnet_model.fc.in_features), nn.ReLU(),
                    nn.BatchNorm1d(resnet_model.fc.in_features),
                    nn.Dropout(dropout_rate),
                    nn.Linear(resnet_model.fc.in_features,
                              resnet_model.fc.in_features), nn.ReLU(),
                    nn.BatchNorm1d(resnet_model.fc.in_features),
                    nn.Dropout(dropout_rate),
                    nn.Linear(resnet_model.fc.in_features,
                              num_classes + num_domain_classes), nn.ReLU(),
                    nn.BatchNorm1d(num_classes + num_domain_classes),
                    nn.Dropout(dropout_rate))
                self.classes_output = nn.Linear(
                    num_classes + num_domain_classes, num_classes)
                self.domain_output = nn.Linear(
                    num_classes + num_domain_classes, num_domain_classes)
        elif 'VG' in network:
            self.resnet_model.classifier = nn.Sequential(
                nn.Linear(25088, 4096, bias=True), nn.ReLU(),
                nn.Dropout(p=0.5, inplace=False),
                nn.Linear(4096, 4096, bias=True), nn.ReLU(),
                nn.Dropout(p=0.5, inplace=False), nn.Linear(4096, num_classes))
Ejemplo n.º 19
0
#----------------------------------------------------------------------------------------------
#  Copyright (c) Microsoft Corporation. All rights reserved.
#  Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------

import argparse
import os
from six import text_type as _text_type
from mmdnn.conversion.examples.imagenet_test import TestKit
import torch
import torchvision.models as models


NETWORKS_MAP = {
    'inception_v3'      : lambda : models.inception_v3(pretrained=True),
    'vgg16'             : lambda : models.vgg16(pretrained=True),
    'vgg19'             : lambda : models.vgg19(pretrained=True),
    'resnet152'         : lambda : models.resnet152(pretrained=True),
    'densenet'          : lambda : models.densenet201(pretrained=True),
    'squeezenet'        : lambda : models.squeezenet1_1(pretrained=True)
}


def _main():
    parser = argparse.ArgumentParser()

    parser.add_argument('-n', '--network',
                        type=_text_type, help='Model Type', required=True,
                        choices=NETWORKS_MAP.keys())

    parser.add_argument('-i', '--image', type=_text_type, help='Test Image Path')
Ejemplo n.º 20
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract=False,
                     use_pretrained=True):
    model_ft = None
    input_size = 0

    if model_name == 'resnet18':
        """ Resnet18
        """
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'resnet101':
        """ Resnet101
        """
        model_ft = models.resnet101(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'resnext':
        """ ResNext50_32x4d
        """
        model_ft = models.resnext50_32x4d(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'alexnet':
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'vgg':
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'squeezenet':
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == 'inception':
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    else:
        logger.warning("Invalid model name, exiting...")
        exit()

    return model_ft, input_size
Ejemplo n.º 21
0
    def __init__(self,
                 output_blocks=[DEFAULT_BLOCK_INDEX],
                 resize_input=True,
                 normalize_input=True,
                 requires_grad=False):
        """Build pretrained InceptionV3

        Parameters
        ----------
        output_blocks : list of int
            Indices of blocks to return features of. Possible values are:
                - 0: corresponds to output of first max pooling
                - 1: corresponds to output of second max pooling
                - 2: corresponds to output which is fed to aux classifier
                - 3: corresponds to output of final average pooling
        resize_input : bool
            If true, bilinearly resizes input to width and height 299 before
            feeding input to model. As the network without fully connected
            layers is fully convolutional, it should be able to handle inputs
            of arbitrary size, so resizing might not be strictly needed
        normalize_input : bool
            If true, normalizes the input to the statistics the pretrained
            Inception network expects
        requires_grad : bool
            If true, parameters of the model require gradient. Possibly useful
            for finetuning the network
        """
        super(InceptionV3, self).__init__()

        self.resize_input = resize_input
        self.normalize_input = normalize_input
        self.output_blocks = sorted(output_blocks)
        self.last_needed_block = max(output_blocks)

        assert self.last_needed_block <= 3, \
            'Last possible output block index is 3'

        self.blocks = nn.ModuleList()

        inception = models.inception_v3(pretrained=True)

        # Block 0: input to maxpool1
        block0 = [
            inception.Conv2d_1a_3x3,
            inception.Conv2d_2a_3x3,
            inception.Conv2d_2b_3x3,
            nn.MaxPool2d(kernel_size=3, stride=2)
        ]
        self.blocks.append(nn.Sequential(*block0))

        # Block 1: maxpool1 to maxpool2
        if self.last_needed_block >= 1:
            block1 = [
                inception.Conv2d_3b_1x1,
                inception.Conv2d_4a_3x3,
                nn.MaxPool2d(kernel_size=3, stride=2)
            ]
            self.blocks.append(nn.Sequential(*block1))

        # Block 2: maxpool2 to aux classifier
        if self.last_needed_block >= 2:
            block2 = [
                inception.Mixed_5b,
                inception.Mixed_5c,
                inception.Mixed_5d,
                inception.Mixed_6a,
                inception.Mixed_6b,
                inception.Mixed_6c,
                inception.Mixed_6d,
                inception.Mixed_6e,
            ]
            self.blocks.append(nn.Sequential(*block2))

        # Block 3: aux classifier to final avgpool
        if self.last_needed_block >= 3:
            block3 = [
                inception.Mixed_7a,
                inception.Mixed_7b,
                inception.Mixed_7c,
                nn.AdaptiveAvgPool2d(output_size=(1, 1))
            ]
            self.blocks.append(nn.Sequential(*block3))

        for param in self.parameters():
            param.requires_grad = requires_grad
Ejemplo n.º 22
0
def main():
    start_time = time()
    
    def get_input_args():
        """
        Retrieves and parses the command line arguments provided by the user when
        they run the program from a terminal window. This function uses Python's 
        argparse module to created and defined these command line arguments. If 
        the user fails to provide some or all of the arguments, then the default 
        values are used for the missing arguments. 
  
        """
        # Create Parse using ArgumentParser

        parser = argparse.ArgumentParser()

        # Create command line arguments as mentioned above using add_argument() from ArguementParser method

        parser.add_argument('--image_path',type=str,default='flowers/test/10/image_07090.jpg',help='path for image to predict')
        parser.add_argument('--save_dir',type=str,default='fc_checkpoint.pth',help='path for checkpoint')
        parser.add_argument('--topk',type=int,default=5,help='input number of top classes for prediction')
        parser.add_argument('--arch', type = str, default = 'vgg16', help = 'architecure of Model') 
        parser.add_argument('--gpu',default=True,help='use GPU to make predictions')
        parser.add_argument('--cat_to_name', default = 'cat_to_name.json',help='enters a path to image.')

        in_arg = parser.parse_args()

        return in_arg
    
    in_arg = get_input_args()
    
    with open('cat_to_name.json', 'r') as f:
        cat_to_name = json.load(f)
        
    resnet18 = models.resnet18(pretrained=True)
    alexnet = models.alexnet(pretrained=True)
    squeezenet = models.squeezenet1_0(pretrained=True)
    vgg16 = models.vgg16(pretrained=True)
    densenet = models.densenet161(pretrained=True)
    inception = models.inception_v3(pretrained=True)

    models_dict = {'resnet':resnet18
                   ,'alexnet':alexnet
                   ,'squeezenet':squeezenet
                   ,'vgg16':vgg16
                   ,'densenet':densenet
                   ,'inception':inception
                  }
    
    def load_checkpoint(path):
        checkpoint = torch.load(path)
        if checkpoint['arch'] == in_arg.arch:
            model = models_dict[in_arg.arch]

            for param in model.parameters():
                param.requires_grad = False

            model.class_to_idx = checkpoint['class_to_idx']
            model.classifier = checkpoint['classifier']
            model.load_state_dict(checkpoint['model_state_dict'])
            #optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            epochs = checkpoint['epochs']

        return model
    
    trained_model = load_checkpoint(in_arg.save_dir)
    print('Checkpoint loaded')

    def predict(image_path, model, topk):
        
        ''' Predict the class of an image using a trained model.
        '''
        #Preprocess the image
        im = process_image(image_path)
        print('Image preprocessed')
        
        im = im.unsqueeze_(0)
        im = im.float()
        
        print('image type is: {}'.format(type(im)))
        print('Beginning Model Eval')

        if torch.cuda.is_available() and in_arg.gpu == True:
            model.cuda()
        #Pass through the model
        model.eval()
        with torch.no_grad():
            logps = model.forward(im.cuda())

        ps = torch.exp(logps)
        
        #Find top-k probabilities and indices 
        top_probability, indices = torch.topk(ps, dim=1, k=topk)
        
        #Find the class using the indices
        indices = np.array(indices) 
        index_to_class = {val: key for key, val in model.class_to_idx.items()} 
        top_classes = [index_to_class[i] for i in indices[0]]

        #Map the class name with collected top-k classes
        names = []
        for classes in top_classes:
                names.append(cat_to_name[str(classes)])
                
        print('prediction complete')
        return top_probability.cpu().numpy(), names
    
    top_probability = predict(in_arg.image_path,trained_model,in_arg.topk)[0]
    top_classes = predict(in_arg.image_path,trained_model,in_arg.topk)[1]
    print(top_probability,top_classes)
    
    end_time = time()
    
    tot_time = end_time - start_time
    
    print("\n** Total Elapsed Runtime:",
          str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
          +str(int((tot_time%3600)%60)) )
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_fltrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_fltrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_fltrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_fltrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_fltrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_fltrs, num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_fltrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_fltrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_fltrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_fltrs, num_classes)
        # Handle the primary net
        num_fltrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_fltrs, num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size
Ejemplo n.º 24
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    """
    Source: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
    Initialize these variables which will be set in this if statement. Each of these
    variables is model specific.

    :param str model_name: model to be loaded
    :param int num_classes: number of classes
    :param bool feature_extract: deactivate gradients
    :param bool use_pretrained: load pretrained weights
    :return: pretrained model, input_size
    """
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        """ Resnet18
        """
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        """ Densenet
        """
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        """ Inception v3, Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size
Ejemplo n.º 25
0
    def __init__(self,
                 output_blocks=[DEFAULT_BLOCK_INDEX],
                 resize_input=True,
                 normalize_input=True,
                 requires_grad=False):
        # 学習ずみ InceptionV3の構築

        super(InceptionV3, self).__init__()

        self.resize_input = resize_input
        self.normalize_input = normalize_input
        self.output_blocks = sorted(output_blocks)
        self.last_needed_block = max(output_blocks)

        assert self.last_needed_block <= 3, \
            'Last possible output block index is 3'

        self.blocks = nn.ModuleList()

        inception = models.inception_v3(pretrained=True)

        # Block 0: input から max pooling 1 まで
        block0 = [
            inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3,
            inception.Conv2d_2b_3x3,
            nn.MaxPool2d(kernel_size=3, stride=2)
        ]
        self.blocks.append(nn.Sequential(*block0))

        # Block 1: maxpool1 から maxpool2 まで
        if self.last_needed_block >= 1:
            block1 = [
                inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3,
                nn.MaxPool2d(kernel_size=3, stride=2)
            ]
            self.blocks.append(nn.Sequential(*block1))

        # Block 2: maxpool2 から aux classifier まで
        if self.last_needed_block >= 2:
            block2 = [
                inception.Mixed_5b,
                inception.Mixed_5c,
                inception.Mixed_5d,
                inception.Mixed_6a,
                inception.Mixed_6b,
                inception.Mixed_6c,
                inception.Mixed_6d,
                inception.Mixed_6e,
            ]
            self.blocks.append(nn.Sequential(*block2))

        # Block 3: aux classifier から final avgpool まで
        if self.last_needed_block >= 3:
            block3 = [
                inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c,
                nn.AdaptiveAvgPool2d(output_size=(1, 1))
            ]
            self.blocks.append(nn.Sequential(*block3))

        for param in self.parameters():
            param.requires_grad = requires_grad
Ejemplo n.º 26
0
    def __init__(self, in_size=(3, 256, 256)):
        self.inception_network = inception_v3(pretrained=True)

        self.in_size = in_size
Ejemplo n.º 27
0
    # return model


# In[23]:


model_dict = {
    "ResNet_152" : {"model" : models.resnet152(pretrained=True), "type" : "fc"},
    "AlexNet" : {"model" : models.alexnet(pretrained=True), "type" : "cl"},
    "VGG_19_bn" : {"model" : models.vgg19_bn(pretrained=True), "type" : "cl"},
#     "ShuffleNet_v2_x1-0" : {"model" : models.shufflenet_v2_x1_0(pretrained=True), "type" : "fc"},
    "MobileNet_v2" : {"model" : models.mobilenet_v2(pretrained=True), "type" : "cl"},
    "MnasNet_1-0" : {"model" : models.mnasnet1_0(pretrained=True), "type" : "cl"},
    "ResNeXt_101_32x8d" : {"model" : models.resnext101_32x8d(pretrained=True), "type" : "fc"},
    "Wide_ResNet_101-2" : {"model" : models.wide_resnet101_2(pretrained=True), "type" : "fc"},
    "Inception_v3" : {"model" : models.inception_v3(pretrained=True), "type" : "fc"}
}


# In[24]:


data_transforms, image_datasets, dataloaders = update_data(paths, resize_s, crop_s, batch_size)


# In[ ]:


for model_name in model_dict:
    model = model_dict[model_name]["model"]
    
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=False) -> (nn.Module, int):
    """get models from https://pytorch.org/hub/.

    Args:
        model_name (string): model name.
        num_classes (int): the output dimension of model classifier.
        feature_extract (bool): if true, will freeze all the gradients.
        use_pretrained (bool): if true, model will load pretrained weights.
    Return:
        model, input size.
    """
    # Initialize these variables which will be set in this if statement. Each of these variables is model specific.
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        """ Resnet18
        """
        model_ft = models.resnet18(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        """ Densenet
        """
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    elif model_name == "vision_transformer":
        """ Vision Transformer base 16
        """
        model_ft = models.vit_b_16(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the primary net
        num_ftrs = model_ft.hidden_dim
        model_ft.heads = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size
Ejemplo n.º 29
0
    def __init__(self,
                 output_blocks=[DEFAULT_BLOCK_INDEX],
                 resize_input=True,
                 normalize_input=True,
                 requires_grad=False):
        """Build pretrained InceptionV3

        Parameters
        ----------
        output_blocks : list of int
            Indices of blocks to return features of. Possible values are:
                - 0: corresponds to output of first max pooling
                - 1: corresponds to output of second max pooling
                - 2: corresponds to output which is fed to aux classifier
                - 3: corresponds to output of final average pooling
        resize_input : bool
            If true, bilinearly resizes input to width and height 299 before
            feeding input to model. As the network without fully connected
            layers is fully convolutional, it should be able to handle inputs
            of arbitrary size, so resizing might not be strictly needed
        normalize_input : bool
            If true, normalizes the input to the statistics the pretrained
            Inception network expects
        requires_grad : bool
            If true, parameters of the model require gradient. Possibly useful
            for finetuning the network
        """
        super(InceptionV3, self).__init__()

        self.resize_input = resize_input
        self.normalize_input = normalize_input
        self.output_blocks = sorted(output_blocks)
        self.last_needed_block = max(output_blocks)

        assert self.last_needed_block <= 3, \
            'Last possible output block index is 3'

        self.blocks = nn.ModuleList()

        inception = models.inception_v3(pretrained=True)

        # Block 0: input to maxpool1
        block0 = [
            inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3,
            inception.Conv2d_2b_3x3,
            nn.MaxPool2d(kernel_size=3, stride=2)
        ]
        self.blocks.append(nn.Sequential(*block0))

        # Block 1: maxpool1 to maxpool2
        if self.last_needed_block >= 1:
            block1 = [
                inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3,
                nn.MaxPool2d(kernel_size=3, stride=2)
            ]
            self.blocks.append(nn.Sequential(*block1))

        # Block 2: maxpool2 to aux classifier
        if self.last_needed_block >= 2:
            block2 = [
                inception.Mixed_5b,
                inception.Mixed_5c,
                inception.Mixed_5d,
                inception.Mixed_6a,
                inception.Mixed_6b,
                inception.Mixed_6c,
                inception.Mixed_6d,
                inception.Mixed_6e,
            ]
            self.blocks.append(nn.Sequential(*block2))

        # Block 3: aux classifier to final avgpool
        if self.last_needed_block >= 3:
            block3 = [
                inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c,
                nn.AdaptiveAvgPool2d(output_size=(1, 1))
            ]
            self.blocks.append(nn.Sequential(*block3))

        for param in self.parameters():
            param.requires_grad = requires_grad
Ejemplo n.º 30
0
 def __init__(self):
     super(Inception, self).__init__()
     self.model = models.inception_v3(pretrained=True)
     self.input_size = 299
Ejemplo n.º 31
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):

    # Initialize these variables which will be set in this if statement. Each of these variables is model specific.

    model_ft = None

    input_size = 0

    if model_name == "resnet":
        """ Resnet18

        """

        model_ft = models.resnet18(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.fc.in_features

        model_ft.fc = nn.Linear(num_ftrs, num_classes)

        input_size = 224

    elif model_name == "alexnet":
        """ Alexnet

        """

        model_ft = models.alexnet(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier[6].in_features

        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn

        """

        model_ft = models.vgg11_bn(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier[6].in_features

        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

        input_size = 224

    elif model_name == "squeezenet":
        """ Squeezenet

        """

        model_ft = models.squeezenet1_0(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))

        model_ft.num_classes = num_classes

        input_size = 224

    elif model_name == "densenet":
        """ Densenet

        """

        model_ft = models.densenet121(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier.in_features

        model_ft.classifier = nn.Linear(num_ftrs, num_classes)

        input_size = 224

    elif model_name == "inception":
        """ Inception v3 

        Be careful, expects (299,299) sized images and has auxiliary output

        """

        model_ft = models.inception_v3(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        # Handle the auxilary net

        num_ftrs = model_ft.AuxLogits.fc.in_features

        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)

        # Handle the primary net

        num_ftrs = model_ft.fc.in_features

        model_ft.fc = nn.Linear(num_ftrs, num_classes)

        input_size = 299

    else:

        print("Invalid model name, exiting...")

        exit()

    return model_ft, input_size
Ejemplo n.º 32
0
#Model parameters:  40740314
#
#
#EfficientNet b7  has input image size:  600
#63792082 63792082
#Model parameters:  63792082

from torchvision import models

net = models.MobileNetV2()
count_parameters(net)

net = models.densenet201()
count_parameters(net)

net = models.inception_v3()
count_parameters(net)
#[27161264, 27161264]

net = models.googlenet()
count_parameters(net)
#[13004888, 13004888]

net = models.vgg19_bn()
count_parameters(net)
# 143678248

net = models.vgg19()
count_parameters(net)
# 143667240
 def __init__():
     super(AbandonDetector, self).__init__()
     self.inception = models.inception_v3(pretrained=True)
Ejemplo n.º 34
0
def saveFeature(imgFolder, opt, model='resnet34', workers=4, batch_size=64):
    '''
        model: inception_v3, vgg13, vgg16, vgg19, resnet18, resnet34,
               resnet50, resnet101, or resnet152
    '''
    g = Globals()

    mkdir(g.default_feature_dir + opt.data)
    feature_dir = g.default_feature_dir + opt.data + "/" + lastFolder(imgFolder)
    
    conv_path = '{}_{}_conv.pth'.format(feature_dir, model)
    class_path = '{}_{}_class.pth'.format(feature_dir, model)
    smax_path = '{}_{}_smax.pth'.format(feature_dir, model)

    if (os.path.exists(conv_path) and  os.path.exists(class_path) and os.path.exists(class_path)):
        print("Feature already generated before. Now pass.")
        return

    if hasattr(opt, 'feat_model') and opt.feat_model is not None:
        model = opt.feat_model
    if model == 'vgg' or model == 'vgg16':
        vgg = models.vgg16(pretrained=True).cuda().eval()

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving vgg features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = vgg.features(input)
            fconv_out = fconv.mean(3).mean(2).squeeze()
            fconv = fconv.view(fconv.size(0), -1)
            flogit = vgg.classifier(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv_out.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

    elif model.find('resnet') >= 0:
        if model == 'resnet34_cifar':
            # Please load your own model. Example here:
            # c = torch.load(
            #     '/home/gh349/xqt/wide-resnet.pytorch/checkpoint/cifar10/gan-resnet-34.t7')
            # resnet = c['net']
            pass
            print('Using resnet34 trained on cifar10.')
            raise NotImplementedError()

        elif model == 'resnet34_random':
            # Please load your own model. Example here:
            # resnet = torch.load(
            #     '/home/gh349/xqt/wide-resnet.pytorch/checkpoint/cifar10/random_resnet34.t7')
            pass
            print('Using resnet34 with random weights.')
            raise NotImplementedError()

        else:
            resnet = getattr(models, 'resnet34')(pretrained=True)
            print('Using resnet34 with pretrained weights.')

        resnet.cuda().eval()
        resnet_feature = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                       resnet.maxpool, resnet.layer1,
                                       resnet.layer2, resnet.layer3, resnet.layer4)
        input = Variable(torch.FloatTensor().cuda())

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
            # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving resnet features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = resnet_feature(input)
            fconv = fconv.mean(3).mean(2).squeeze()
            flogit = resnet.fc(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

        mkdir(g.default_feature_dir)
        feature_dir = g.default_feature_dir + \
            opt.data + "/" + lastFolder(imgFolder)
        mkdir(g.default_feature_dir + opt.data)

        torch.save(feature_conv, feature_dir + '_' + model + '_conv.pth')
        torch.save(feature_class, feature_dir + '_' + model + '_class.pth')
        torch.save(feature_smax, feature_dir + '_' + model + '_smax.pth')
        return feature_conv, feature_class, feature_smax

    elif model == 'inception' or model == 'inception_v3':
        inception = models.inception_v3(
            pretrained=True, transform_input=False).cuda().eval()
        inception_feature = nn.Sequential(inception.Conv2d_1a_3x3,
                                          inception.Conv2d_2a_3x3,
                                          inception.Conv2d_2b_3x3,
                                          nn.MaxPool2d(3, 2),
                                          inception.Conv2d_3b_1x1,
                                          inception.Conv2d_4a_3x3,
                                          nn.MaxPool2d(3, 2),
                                          inception.Mixed_5b,
                                          inception.Mixed_5c,
                                          inception.Mixed_5d,
                                          inception.Mixed_6a,
                                          inception.Mixed_6b,
                                          inception.Mixed_6c,
                                          inception.Mixed_6d,
                                          inception.Mixed_7a,
                                          inception.Mixed_7b,
                                          inception.Mixed_7c,
                                          ).cuda().eval()

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving resnet features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = inception_feature(input)
            fconv = fconv.mean(3).mean(2).squeeze()
            flogit = inception.fc(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

    else:
        raise NotImplementedError


    torch.save(feature_conv, '{}_{}_conv.pth'.format(feature_dir, model))
    torch.save(feature_class, '{}_{}_class.pth'.format(feature_dir, model))
    torch.save(feature_smax, '{}_{}_smax.pth'.format(feature_dir, model))
    return feature_conv, feature_class, feature_smax
Ejemplo n.º 35
0
 def __init__(self, model='resnet34', workers=4, batchSize=64):
     '''
     model: inception_v3, vgg13, vgg16, vgg19, resnet18, resnet34,
            resnet50, resnet101, or resnet152
     '''
     self.model = model
     self.batch_size = batchSize
     self.workers = workers
     if self.model.find('vgg') >= 0:
         self.vgg = getattr(models, model)(pretrained=True).cuda().eval()
         self.trans = transforms.Compose([
             transforms.Resize(224),
             transforms.ToTensor(),
             transforms.Normalize((0.485, 0.456, 0.406),
                                  (0.229, 0.224, 0.225)),
         ])
     elif self.model.find('resnet') >= 0:
         resnet = getattr(models, model)(pretrained=True)
         resnet.cuda().eval()
         resnet_feature = nn.Sequential(resnet.conv1, resnet.bn1,
                                        resnet.relu,
                                        resnet.maxpool, resnet.layer1,
                                        resnet.layer2, resnet.layer3,
                                        resnet.layer4).cuda().eval()
         self.resnet = resnet
         self.resnet_feature = resnet_feature
         self.trans = transforms.Compose([
             transforms.Resize(224),
             transforms.ToTensor(),
             transforms.Normalize((0.485, 0.456, 0.406),
                                  (0.229, 0.224, 0.225)),
         ])
     elif self.model == 'inception' or self.model == 'inception_v3':
         inception = models.inception_v3(
             pretrained=True, transform_input=False).cuda().eval()
         inception_feature = nn.Sequential(inception.Conv2d_1a_3x3,
                                           inception.Conv2d_2a_3x3,
                                           inception.Conv2d_2b_3x3,
                                           nn.MaxPool2d(3, 2),
                                           inception.Conv2d_3b_1x1,
                                           inception.Conv2d_4a_3x3,
                                           nn.MaxPool2d(3, 2),
                                           inception.Mixed_5b,
                                           inception.Mixed_5c,
                                           inception.Mixed_5d,
                                           inception.Mixed_6a,
                                           inception.Mixed_6b,
                                           inception.Mixed_6c,
                                           inception.Mixed_6d,
                                           inception.Mixed_7a,
                                           inception.Mixed_7b,
                                           inception.Mixed_7c,
                                           ).cuda().eval()
         self.inception = inception
         self.inception_feature = inception_feature
         self.trans = transforms.Compose([
             transforms.Resize(299),
             transforms.ToTensor(),
             transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
         ])
     else:
         raise NotImplementedError
Ejemplo n.º 36
0
    resnet18 = models.resnet18(pretrained=pretrained)
    resnet34 = models.resnet34(pretrained=pretrained)
    resnet50 = models.resnet50(pretrained=pretrained)
    resnet101 = models.resnet101(pretrained=pretrained)
    resnet152 = models.resnet152(pretrained=pretrained)

    squeezenet1_0 = models.squeezenet1_0(pretrained=pretrained)
    squeezenet1_1 = models.squeezenet1_1(pretrained=pretrained)

    densenet121 = models.densenet121(pretrained=pretrained)
    densenet169 = models.densenet169(pretrained=pretrained)
    densenet161 = models.densenet161(pretrained=pretrained)
    densenet201 = models.densenet201(pretrained=pretrained)

    inception_v3 = models.inception_v3(pretrained=pretrained)

    googlenet = models.googlenet(pretrained=pretrained)

    shufflenet_v2_x0_5 = models.shufflenet_v2_x0_5(pretrained=pretrained)
    shufflenet_v2_x1_0 = models.shufflenet_v2_x1_0(pretrained=pretrained)
    # shufflenet_v2_x1_5 = models.shufflenet_v2_x1_5(pretrained=pretrained)
    # shufflenet_v2_x2_0 = models.shufflenet_v2_x2_0(pretrained=pretrained)

    mobilenet_v2 = models.mobilenet_v2(pretrained=pretrained)

    resnext50_32x4d = models.resnext50_32x4d(pretrained=pretrained)
    resnext101_32x8d = models.resnext101_32x8d(pretrained=pretrained)

    wide_resnet50_2 = models.wide_resnet50_2(pretrained=pretrained)
    wide_resnet101_2 = models.wide_resnet101_2(pretrained=pretrained)
Ejemplo n.º 37
0
 def __init__(self, num_classes: int):
     super().__init__()
     self.net = M.inception_v3(pretrained=True)
     self.net.fc = nn.Linear(self.net.fc.in_features, num_classes)
Ejemplo n.º 38
0
def initialize_model(model_name, num_classes, feature_extract, verbose=False):
    """Initialize required model and set it up for feature extracting or finetuning.

    Args:
        model_name: Type of model to initialize.
        num_classes: Total number of target classes.
        feature_extract: Feature extracting or finetuning.
        verbose: Print model info in the end or not.

    Returns:
        model_ft: Initialized model.
        params_to_update: List of parameters to be updated during training.

    """

    model_ft = None

    if model_name == "resnet":
        """ Resnet18
        """
        model_ft = models.resnet18(pretrained=True)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=True)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=True)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        with warnings.catch_warnings(
        ):  # temporarily suppress warnings about deprecated functions
            warnings.simplefilter("ignore")
            model_ft = models.squeezenet1_0(pretrained=True)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes

    elif model_name == "densenet":
        """ Densenet
        """
        model_ft = models.densenet121(pretrained=True)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)

    elif model_name == "inception":
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=True)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    else:  # Unreachable
        exit()

    # Gather the parameters to be optimized
    params_to_update = list(
        filter(lambda p: p.requires_grad, model_ft.parameters()))

    # Print model info
    if verbose:
        print()
        print(model_ft)
        print()
        print("Params to learn:")
        for name, param in model_ft.named_parameters():
            if param.requires_grad:
                print('\t', name)

    return model_ft, params_to_update
Ejemplo n.º 39
0
 def test_inception_v3(self):
     self.image = read_image2()
     process_model(models.inception_v3(self.pretrained), self.image,
                   _C_tests.forward_inceptionv3, 'Inceptionv3')
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None
    input_size = 0

    # Ignore ssl certification (prevent error for some users)
    ssl._create_default_https_context = ssl._create_unverified_context

    if model_name == "resnet":
        """ Resnet18
        """
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "resnet152":
        model_ft = models.resnet152(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        """ Densenet
        """
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299
    elif model_name == "xception":
        """ Xception
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = xception.xception(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    elif model_name == "fleming_v1":
        """ Fleming Model
        Custom model created by team Fleming
        """
        model_ft = fleming.FlemingModel_v1(num_classes=196)
        input_size = 224

    elif model_name == "fleming_v2":
        """ Fleming Model
        Custom model created by team Fleming
        """
        model_ft = fleming.FlemingModel_v2(num_classes=196)
        input_size = 224

    elif model_name == "fleming_v3":
        """ Fleming Model
        Custom model created by team Fleming
        """
        model_ft = fleming.FlemingModel_v3(num_classes=196)
        input_size = 224

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size