예제 #1
0
def get_model(name, n_classes):
    model = _get_model_instance(name)

    if name in ['frrnA', 'frrnB']:
        model = model(n_classes, model_type=name[-1])

    elif name in ['fcn32s', 'fcn16s', 'fcn8s']:
        model = model(n_classes=n_classes)
        vgg16 = models.vgg16(pretrained=True)
        model.init_vgg16_params(vgg16)

    elif name == 'segnet':
        model = model(n_classes=n_classes,
                      is_unpooling=True)
        vgg16 = models.vgg16(pretrained=True)
        model.init_vgg16_params(vgg16)

    elif name == 'unet':
        model = model(n_classes=n_classes,
                      is_batchnorm=True,
                      in_channels=3,
                      is_deconv=True)
    
    else:
        model = model(n_classes=n_classes)

    return model
    def __init__(self,model_path=None, normalize=True):
        super(VGG16Fc, self).__init__()
        if model_path:
            if os.path.exists(model_path):
                self.model_vgg = models.vgg16(pretrained=False)
                self.model_vgg.load_state_dict(torch.load(model_path))
            else:
                raise Exception('invalid model path!')
        else:
            self.model_vgg = models.vgg16(pretrained=True)

        if model_path or normalize:
            # pretrain model is used, use ImageNet normalization
            self.normalize = True
            self.register_buffer('mean', torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
            self.register_buffer('std', torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
        else:
            self.normalize = False

        model_vgg = self.model_vgg
        self.features = model_vgg.features
        self.classifier = nn.Sequential()
        for i in range(6):
            self.classifier.add_module("classifier"+str(i), model_vgg.classifier[i])
        self.feature_layers = nn.Sequential(self.features, self.classifier)

        self.__in_features = 4096
def decom_vgg16():
    # the 30th layer of features is relu of conv5_3
    if opt.caffe_pretrain:
        model = vgg16(pretrained=False)
        if not opt.load_path:
            model.load_state_dict(t.load(opt.caffe_pretrain_path))
    else:
        model = vgg16(not opt.load_path)

    features = list(model.features)[:30]
    classifier = model.classifier

    classifier = list(classifier)
    del classifier[6]
    if not opt.use_drop:
        del classifier[5]
        del classifier[2]
    classifier = nn.Sequential(*classifier)

    # freeze top4 conv
    for layer in features[:10]:
        for p in layer.parameters():
            p.requires_grad = False

    return nn.Sequential(*features), classifier
예제 #4
0
 def __init__(self, num_classes=1000):
     
     super(FCN_32s, self).__init__()
     
     # Load the model with convolutionalized
     # fully connected layers
     vgg16 = models.vgg16(pretrained=True,
                          fully_conv=True)
     
     # Copy all the feature layers as is
     self.features = vgg16.features
     
     # TODO: check if Dropout works correctly for
     # fully convolutional mode
     
     # Remove the last classification 1x1 convolution
     # because it comes from imagenet 1000 class classification.
     # We will perform classification on different classes
     fully_conv = list(vgg16.classifier.children())
     fully_conv = fully_conv[:-1]
     self.fully_conv = nn.Sequential(*fully_conv)
     
     # Get a new 1x1 convolution and randomly initialize
     score_32s = nn.Conv2d(4096, num_classes, 1)
     self._normal_initialization(score_32s)
     self.score_32s = score_32s
    def __init__(self, num_classes, pretrained=True):
        super(FCN32VGG, self).__init__()
        vgg = models.vgg16()
        if pretrained:
            vgg.load_state_dict(torch.load(vgg16_caffe_path))
        features, classifier = list(vgg.features.children()), list(vgg.classifier.children())

        features[0].padding = (100, 100)

        for f in features:
            if 'MaxPool' in f.__class__.__name__:
                f.ceil_mode = True
            elif 'ReLU' in f.__class__.__name__:
                f.inplace = True

        self.features5 = nn.Sequential(*features)

        fc6 = nn.Conv2d(512, 4096, kernel_size=7)
        fc6.weight.data.copy_(classifier[0].weight.data.view(4096, 512, 7, 7))
        fc6.bias.data.copy_(classifier[0].bias.data)
        fc7 = nn.Conv2d(4096, 4096, kernel_size=1)
        fc7.weight.data.copy_(classifier[3].weight.data.view(4096, 4096, 1, 1))
        fc7.bias.data.copy_(classifier[3].bias.data)
        score_fr = nn.Conv2d(4096, num_classes, kernel_size=1)
        score_fr.weight.data.zero_()
        score_fr.bias.data.zero_()
        self.score_fr = nn.Sequential(
            fc6, nn.ReLU(inplace=True), nn.Dropout(), fc7, nn.ReLU(inplace=True), nn.Dropout(), score_fr
        )

        self.upscore = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=64, stride=32, bias=False)
        self.upscore.weight.data.copy_(get_upsampling_weight(num_classes, num_classes, 64))
예제 #6
0
  def _init_modules(self):
    vgg = models.vgg16()
    if self.pretrained:
        print("Loading pretrained weights from %s" %(self.model_path))
        state_dict = torch.load(self.model_path)
        vgg.load_state_dict({k:v for k,v in state_dict.items() if k in vgg.state_dict()})

    vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])

    # not using the last maxpool layer
    self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1])

    # Fix the layers before conv3:
    for layer in range(10):
      for p in self.RCNN_base[layer].parameters(): p.requires_grad = False

    # self.RCNN_base = _RCNN_base(vgg.features, self.classes, self.dout_base_model)

    self.RCNN_top = vgg.classifier

    # not using the last maxpool layer
    self.RCNN_cls_score = nn.Linear(4096, self.n_classes)

    if self.class_agnostic:
      self.RCNN_bbox_pred = nn.Linear(4096, 4)
    else:
      self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes)      
def vgg16(num_classes=1000, pretrained='imagenet'):
    """VGG 16-layer model (configuration "D")
    """
    model = models.vgg16(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg16'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
    def __init__(self, num_classes, pretrained=True, caffe=False):
        super(FCN8s, self).__init__()
        vgg = models.vgg16()
        if pretrained:
            if caffe:
                # load the pretrained vgg16 used by the paper's author
                vgg.load_state_dict(torch.load(vgg16_caffe_path))
            else:
                vgg.load_state_dict(torch.load(vgg16_path))
        features, classifier = list(vgg.features.children()), list(vgg.classifier.children())

        '''
        100 padding for 2 reasons:
            1) support very small input size
            2) allow cropping in order to match size of different layers' feature maps
        Note that the cropped part corresponds to a part of the 100 padding
        Spatial information of different layers' feature maps cannot be align exactly because of cropping, which is bad
        '''
        features[0].padding = (100, 100)

        for f in features:
            if 'MaxPool' in f.__class__.__name__:
                f.ceil_mode = True
            elif 'ReLU' in f.__class__.__name__:
                f.inplace = True

        self.features3 = nn.Sequential(*features[: 17])
        self.features4 = nn.Sequential(*features[17: 24])
        self.features5 = nn.Sequential(*features[24:])

        self.score_pool3 = nn.Conv2d(256, num_classes, kernel_size=1)
        self.score_pool4 = nn.Conv2d(512, num_classes, kernel_size=1)
        self.score_pool3.weight.data.zero_()
        self.score_pool3.bias.data.zero_()
        self.score_pool4.weight.data.zero_()
        self.score_pool4.bias.data.zero_()

        fc6 = nn.Conv2d(512, 4096, kernel_size=7)
        fc6.weight.data.copy_(classifier[0].weight.data.view(4096, 512, 7, 7))
        fc6.bias.data.copy_(classifier[0].bias.data)
        fc7 = nn.Conv2d(4096, 4096, kernel_size=1)
        fc7.weight.data.copy_(classifier[3].weight.data.view(4096, 4096, 1, 1))
        fc7.bias.data.copy_(classifier[3].bias.data)
        score_fr = nn.Conv2d(4096, num_classes, kernel_size=1)
        score_fr.weight.data.zero_()
        score_fr.bias.data.zero_()
        self.score_fr = nn.Sequential(
            fc6, nn.ReLU(inplace=True), nn.Dropout(), fc7, nn.ReLU(inplace=True), nn.Dropout(), score_fr
        )

        self.upscore2 = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=4, stride=2, bias=False)
        self.upscore_pool4 = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=4, stride=2, bias=False)
        self.upscore8 = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=16, stride=8, bias=False)
        self.upscore2.weight.data.copy_(get_upsampling_weight(num_classes, num_classes, 4))
        self.upscore_pool4.weight.data.copy_(get_upsampling_weight(num_classes, num_classes, 4))
        self.upscore8.weight.data.copy_(get_upsampling_weight(num_classes, num_classes, 16))
예제 #9
0
 def __init__(self, train_fe=False, feature_extraction_cnn='vgg', normalization=True, last_layer='', use_cuda=True):
     super(FeatureExtraction, self).__init__()
     self.normalization = normalization
     if feature_extraction_cnn == 'vgg':
         self.model = models.vgg16(pretrained=True)
         # keep feature extraction network up to indicated layer
         vgg_feature_layers=['conv1_1','relu1_1','conv1_2','relu1_2','pool1','conv2_1',
                      'relu2_1','conv2_2','relu2_2','pool2','conv3_1','relu3_1',
                      'conv3_2','relu3_2','conv3_3','relu3_3','pool3','conv4_1',
                      'relu4_1','conv4_2','relu4_2','conv4_3','relu4_3','pool4',
                      'conv5_1','relu5_1','conv5_2','relu5_2','conv5_3','relu5_3','pool5']
         if last_layer=='':
             last_layer = 'pool4'
         last_layer_idx = vgg_feature_layers.index(last_layer)
         self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx+1])
     if feature_extraction_cnn == 'resnet101':
         self.model = models.resnet101(pretrained=True)
         resnet_feature_layers = ['conv1',
                                  'bn1',
                                  'relu',
                                  'maxpool',
                                  'layer1',
                                  'layer2',
                                  'layer3',
                                  'layer4']
         if last_layer=='':
             last_layer = 'layer3'
         last_layer_idx = resnet_feature_layers.index(last_layer)
         resnet_module_list = [self.model.conv1,
                               self.model.bn1,
                               self.model.relu,
                               self.model.maxpool,
                               self.model.layer1,
                               self.model.layer2,
                               self.model.layer3,
                               self.model.layer4]
         
         self.model = nn.Sequential(*resnet_module_list[:last_layer_idx+1])
     if feature_extraction_cnn == 'resnet101_v2':
         self.model = models.resnet101(pretrained=True)
         # keep feature extraction network up to pool4 (last layer - 7)
         self.model = nn.Sequential(*list(self.model.children())[:-3])
     if feature_extraction_cnn == 'densenet201':
         self.model = models.densenet201(pretrained=True)
         # keep feature extraction network up to denseblock3
         # self.model = nn.Sequential(*list(self.model.features.children())[:-3])
         # keep feature extraction network up to transitionlayer2
         self.model = nn.Sequential(*list(self.model.features.children())[:-4])
     if not train_fe:
         # freeze parameters
         for param in self.model.parameters():
             param.requires_grad = False
     # move to GPU
     if use_cuda:
         self.model = self.model.cuda()
예제 #10
0
    def __init__(self, descriptor_name):
        super(Net, self).__init__()

        # if descriptor_name == 'vgg16':
        #     self.select = ['30']
        #     self.vgg16 = models.vgg16(pretrained=True)
        #     self.sequence = []
        #     for name, layer in self.vgg16.features._modules.items():
        #         self.sequence += [layer]
        #     for name, layer in self.vgg16.classifier._modules.items():
        #         self.sequence += [layer]
        #         break
        #     self.model = nn.Sequential(*self.sequence)

        if descriptor_name == 'vgg16':
            self.select = ['30']
            self.vgg16 = models.vgg16(pretrained=True)
            self.sequence = []
            for name, layer in self.vgg16.features._modules.items():
                self.sequence += [layer]
            for name, layer in self.vgg16.classifier._modules.items():
                if name == '6':
                    break
                self.sequence += [layer]
            layer = nn.Linear(4096, 10)
            # init.xavier_normal(layer.weight.data, gain = 1)
            self.sequence += [layer]

            self.model = nn.Sequential(*self.sequence)

        elif descriptor_name == 'vgg19':
            self.select = ['36']
            self.vgg19 = models.vgg19(pretrained=True)
            self.sequence = []
            for name, layer in self.vgg19.features._modules.items():
                self.sequence += [layer]
            for name, layer in self.vgg19.classifier._modules.items():
                self.sequence += [layer]
                break
            self.model = nn.Sequential(*self.sequence)

        elif descriptor_name == 'resnet50':
            self.select = ['avgpool']
            self.model = models.resnet50(pretrained=True)
            self.model.fc = nn.Linear(2048, 10)
            
        elif descriptor_name == 'resnet101':
            self.select = ['avgpool']
            self.model = models.resnet101(pretrained=True)

        elif descriptor_name == 'resnet152':
            self.select = ['avgpool']
            self.model = models.resnet152(pretrained=True)
            self.model.fc = nn.Linear(2048, 10)
예제 #11
0
  def _init_head_tail(self):
    self.vgg = models.vgg16()
    # Remove fc8
    self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier._modules.values())[:-1])

    # Fix the layers before conv3:
    for layer in range(10):
      for p in self.vgg.features[layer].parameters(): p.requires_grad = False

    # not using the last maxpool layer
    self._layers['head'] = nn.Sequential(*list(self.vgg.features._modules.values())[:-1])
예제 #12
0
    def __init__(self, model=None):
        super(ModifiedVGG16Model, self).__init__()

        model = models.vgg16(pretrained=True)
        self.features = model.features

        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(25088, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, 2))
예제 #13
0
파일: vgg.py 프로젝트: chu-data-lab/GOGGLES
    def __init__(self, freeze=True):
        super(Vgg16, self).__init__()
        self._is_cuda = False

        self.input_size = 224

        features = list(models.vgg16(pretrained=True).features)
        self._features = nn.ModuleList(features).eval()

        self._is_frozen = freeze
        self.freeze(freeze)

        self._config = None
        self._parse_config()
예제 #14
0
파일: vgg.py 프로젝트: Biocodings/examples
 def __init__(self, requires_grad=False):
     super(Vgg16, self).__init__()
     vgg_pretrained_features = models.vgg16(pretrained=True).features
     self.slice1 = torch.nn.Sequential()
     self.slice2 = torch.nn.Sequential()
     self.slice3 = torch.nn.Sequential()
     self.slice4 = torch.nn.Sequential()
     for x in range(4):
         self.slice1.add_module(str(x), vgg_pretrained_features[x])
     for x in range(4, 9):
         self.slice2.add_module(str(x), vgg_pretrained_features[x])
     for x in range(9, 16):
         self.slice3.add_module(str(x), vgg_pretrained_features[x])
     for x in range(16, 23):
         self.slice4.add_module(str(x), vgg_pretrained_features[x])
     if not requires_grad:
         for param in self.parameters():
             param.requires_grad = False
def load_arch(arch):
    """
    Load a pretrained network
    """
    if arch == 'vgg16':
        model = models.vgg16(pretrained=True)
        input_size = 25088
    elif arch == 'alexnet':
        model = models.alexnet(pretrained=True)
        input_size = 9216
    elif arch == 'resnet18':
        model = models.resnet18(pretrained=True)
        input_size = 512
    elif arch == 'densenet121':
        model = models.densenet121(pretrained=True)
        input_size = 1024
    else:
        raise ValueError('Please choose one of \'vgg16\', \'alexnet\', \'resnet18\' or , \'densenet121\' for parameter arch.')
        
    for param in model.parameters():
        param.requires_grad = False
    
    return model, input_size
예제 #16
0
#----------------------------------------------------------------------------------------------
#  Copyright (c) Microsoft Corporation. All rights reserved.
#  Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------

import argparse
import os
from six import text_type as _text_type
from mmdnn.conversion.examples.imagenet_test import TestKit
import torch
import torchvision.models as models


NETWORKS_MAP = {
    'inception_v3'      : lambda : models.inception_v3(pretrained=True),
    'vgg16'             : lambda : models.vgg16(pretrained=True),
    'vgg19'             : lambda : models.vgg19(pretrained=True),
    'resnet152'         : lambda : models.resnet152(pretrained=True),
    'densenet'          : lambda : models.densenet201(pretrained=True),
    'squeezenet'        : lambda : models.squeezenet1_1(pretrained=True)
}


def _main():
    parser = argparse.ArgumentParser()

    parser.add_argument('-n', '--network',
                        type=_text_type, help='Model Type', required=True,
                        choices=NETWORKS_MAP.keys())

    parser.add_argument('-i', '--image', type=_text_type, help='Test Image Path')
예제 #17
0
import numpy as np
from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE

# set gpu_id 0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# system monitor
info = psutil.virtual_memory()
monitor_interval = MONITOR_INTERVAL
avg_mem_usage = 0
max_mem_usage = 0
count = 0
total_time = 0

# get the whole model
vgg = vgg16()

start_time = time.time()
vgg = vgg.to(device)
total_time += time.time() - start_time

# training setting
num_iter = NUM_ITERS
batch_size = BATCH_SIZE
optimizer = optim.Adam(vgg.parameters(), lr=LERANING_RATE)

# data generator
gen = random_input_generator(num_iter, batch_size, format='NCHW')

# begin training
예제 #18
0
def rebuild_load_checkpoint(check_model, check_filepath):

    in_args = input_args()

    n_input = None
    n_output = 102

    if check_model == 'densenet':
        densenet161 = models.densenet161(pretrained=True)

        models_select = {'densenet': densenet161}

        model = models_select[check_model]

        for param in model.parameters():
            param.requires_grad = False

        n_input = 2208

        classifier = nn.Sequential(
            OrderedDict([('fc1', nn.Linear(n_input, in_args.hidden_units)),
                         ('relu', nn.ReLU()), ('dropout', nn.Dropout(0.5)),
                         ('fc2', nn.Linear(in_args.hidden_units, n_output)),
                         ('output', nn.LogSoftmax(dim=1))]))

        model.classifier = classifier

        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(model.classifier.parameters(),
                              lr=in_args.lr,
                              momentum=0.9)

    elif check_model == 'vgg':
        vgg16 = models.vgg16(pretrained=True)

        models_select = {'vgg': vgg16}

        model = models_select[check_model]

        for param in model.parameters():
            param.requires_grad = False

        n_input = 4096

        classifier = nn.Sequential(
            OrderedDict([('fc1', nn.Linear(n_input, in_args.hidden_units)),
                         ('relu', nn.ReLU()), ('dropout', nn.Dropout(0.5)),
                         ('fc2', nn.Linear(in_args.hidden_units, n_output)),
                         ('output', nn.LogSoftmax(dim=1))]))

        model.classifier[6] = classifier

        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(model.classifier[6].parameters(),
                              lr=in_args.lr,
                              momentum=0.9)

    start_epoch = 0
    if os.path.isfile(check_filepath):

        print("\n=> loading checkpoint.. '{}'".format(check_filepath))

        checkpoint = torch.load(check_filepath)

        optimizer.load_state_dict(checkpoint['optimizer'])

        model.load_state_dict(checkpoint['state_dict'])

        start_epoch = checkpoint['epochs'] + 1

        model.class_to_idx = checkpoint['class_to_idx']

    print('\nCHECKPOINT MODEL: ', in_args.arch)
    print('\nOPTIMIZER STATE: ', optimizer)
    print('\nEPOCHS TRAINED: ', start_epoch)

    return model, optimizer, start_epoch
예제 #19
0
    def process(self):
        if models is None or T is None or Image is None:
            raise ImportError('Package `torchvision` could not be found.')

        splits = np.load(osp.join(self.raw_dir, 'splits.npz'),
                         allow_pickle=True)
        category_idx = self.categories.index(self.category)
        train_split = list(splits['train'])[category_idx]
        test_split = list(splits['test'])[category_idx]

        image_path = osp.join(self.raw_dir, 'images', 'JPEGImages')
        info_path = osp.join(self.raw_dir, 'images', 'Annotations')
        annotation_path = osp.join(self.raw_dir, 'annotations')

        labels = {}

        vgg16_outputs = []

        def hook(module, x, y):
            vgg16_outputs.append(y)

        vgg16 = models.vgg16(pretrained=True)
        vgg16.eval()
        vgg16.features[20].register_forward_hook(hook)  # relu4_2
        vgg16.features[25].register_forward_hook(hook)  # relu5_1

        transform = T.Compose([
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        train_data_list, test_data_list = [], []
        for i, name in enumerate(chain(train_split, test_split)):
            filename = '_'.join(name.split('/')[1].split('_')[:-1])
            idx = int(name.split('_')[-1].split('.')[0]) - 1

            path = osp.join(info_path, '{}.xml'.format(filename))
            obj = minidom.parse(path).getElementsByTagName('object')[idx]

            trunc = obj.getElementsByTagName('truncated')[0].firstChild.data
            occ = obj.getElementsByTagName('occluded')
            occ = '0' if len(occ) == 0 else occ[0].firstChild.data
            diff = obj.getElementsByTagName('difficult')[0].firstChild.data

            if bool(int(trunc)) or bool(int(occ)) or bool(int(diff)):
                continue

            xmin = float(obj.getElementsByTagName('xmin')[0].firstChild.data)
            xmax = float(obj.getElementsByTagName('xmax')[0].firstChild.data)
            ymin = float(obj.getElementsByTagName('ymin')[0].firstChild.data)
            ymax = float(obj.getElementsByTagName('ymax')[0].firstChild.data)
            box = (xmin, ymin, xmax, ymax)

            dom = minidom.parse(osp.join(annotation_path, name))
            keypoints = dom.getElementsByTagName('keypoint')
            if len(keypoints) < 3:
                continue
            poss, ys = [], []
            for keypoint in keypoints:
                label = keypoint.attributes['name'].value
                if label not in labels:
                    labels[label] = len(labels)
                ys.append(labels[label])
                poss.append(float(keypoint.attributes['x'].value))
                poss.append(float(keypoint.attributes['y'].value))
            y = torch.tensor(ys, dtype=torch.long)
            pos = torch.tensor(poss, dtype=torch.float).view(-1, 2)

            # Adjust bounding box (and positions) + add a small offset because
            # some keypoints lay outside the bounding boxes.
            box = (min(pos[:, 0].min().item(), box[0]) - 20,
                   min(pos[:, 1].min().item(), box[1]) - 20,
                   max(pos[:, 0].max().item(), box[2]) + 20,
                   max(pos[:, 1].max().item(), box[3]) + 20)

            pos[:, 0] = pos[:, 0] - box[0]
            pos[:, 1] = pos[:, 1] - box[1]

            path = osp.join(image_path, '{}.jpg'.format(filename))
            with open(path, 'rb') as f:
                img = Image.open(f).convert('RGB').crop(box)

            img = transform(img)
            size = img.size()[-2:]
            vgg16_outputs.clear()
            with torch.no_grad():
                vgg16(img.unsqueeze(0))

            xs = []
            for out in vgg16_outputs:
                out = F.interpolate(out, size, mode='bilinear',
                                    align_corners=False)
                out = out.squeeze(0).permute(1, 2, 0)
                out = out[pos[:, 1].round().long(), pos[:, 0].round().long()]
                xs.append(out)

            # Reset position.
            pos[:, 0] = pos[:, 0] + box[0] - xmin
            pos[:, 1] = pos[:, 1] + box[1] - ymin

            data = Data(x=torch.cat(xs, dim=-1), pos=pos, y=y)

            if self.pre_filter is not None and not self.pre_filter(data):
                continue
            if self.pre_transform is not None:
                data = self.pre_transform(data)

            if i < len(train_split):
                train_data_list.append(data)
            else:
                test_data_list.append(data)

        torch.save(self.collate(train_data_list), self.processed_paths[0])
        torch.save(self.collate(test_data_list), self.processed_paths[1])
예제 #20
0
 def __init__(self, out_size):
     super(vgg16, self).__init__()
     self.vgg16 = models.vgg16(pretrained=True)
     num_ftrs = self.vgg16.classifier[6].in_features
     self.vgg16.classifier[6] = nn.Sequential(nn.Linear(num_ftrs, out_size),
                                              nn.Sigmoid())
예제 #21
0
def main():

    ###Get input from user
    in_arg = get_input_args()
    print(in_arg)

    train_dir = in_arg.dir + '/train'
    valid_dir = in_arg.dir + '/valid'
    test_dir = in_arg.dir + '/test'

    # Pass transforms in here, then run the next cell to see how the transforms look
    train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
    valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
    test_data = datasets.ImageFolder(test_dir, transform=test_transforms)

    # TODO: Using the image datasets and the trainforms, define the dataloaders
    trainloader = torch.utils.data.DataLoader(train_data,
                                              batch_size=64,
                                              shuffle=True)
    validloader = torch.utils.data.DataLoader(valid_data,
                                              batch_size=64,
                                              shuffle=True)
    testloader = torch.utils.data.DataLoader(test_data, batch_size=64)

    ####Define the model
    resnet18 = models.resnet18(pretrained=True)
    alexnet = models.alexnet(pretrained=True)
    vgg16 = models.vgg16(pretrained=True)
    densenet121 = models.densenet121(pretrained=True)

    models_dic = {
        'resnet': resnet18,
        'alexnet': alexnet,
        'vgg': vgg16,
        'densenet': densenet121
    }

    model_name = in_arg.arch

    ###Load the userdefined model
    model = models_dic[model_name]

    # Freeze parameters so we don't backprop through them
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    for param in model.parameters():
        param.requires_grad = False

    classifier = nn.Sequential(
        OrderedDict([('fc1', nn.Linear(1024, in_arg.hidden_nodes)),
                     ('relu', nn.ReLU()),
                     ('fc2', nn.Linear(in_arg.hidden_nodes,
                                       in_arg.output_nodes)),
                     ('output', nn.LogSoftmax(dim=1))]))

    model.classifier = classifier

    criterion = nn.NLLLoss()

    # Only train the classifier parameters, feature parameters are frozen
    optimizer = optim.Adam(model.classifier.parameters(),
                           lr=in_arg.learning_rate)

    model.to(device)

    epochs = in_arg.epocs
    steps = 0
    running_loss = 0
    print_every = 5

    for i in keep_awake(range(5)):
        for epoch in range(epochs):
            for inputs, labels in trainloader:
                steps += 1
                # Move input and label tensors to the default device
                inputs, labels = inputs.to(device), labels.to(device)

                optimizer.zero_grad()

                logps = model.forward(inputs)
                loss = criterion(logps, labels)
                loss.backward()
                optimizer.step()

                running_loss += loss.item()

                if steps % print_every == 0:
                    valid_loss = 0
                    accuracy = 0
                    model.eval()
                    with torch.no_grad():
                        for inputs, labels in validloader:
                            inputs, labels = inputs.to(device), labels.to(
                                device)
                            logps = model.forward(inputs)
                            batch_loss = criterion(logps, labels)

                            valid_loss += batch_loss.item()

                            # Calculate accuracy
                            ps = torch.exp(logps)
                            top_p, top_class = ps.topk(1, dim=1)
                            equals = top_class == labels.view(*top_class.shape)
                            accuracy += torch.mean(
                                equals.type(torch.FloatTensor)).item()

                    print(
                        f"Epoch {epoch+1}/{epochs}.. "
                        f"Train loss: {running_loss/print_every:.3f}.. "
                        f"Validation loss: {valid_loss/len(testloader):.3f}.. "
                        f"Validation accuracy: {accuracy/len(testloader):.3f}")
                    running_loss = 0
                    model.train()

    ########Save the model
    model.class_to_idx = train_data.class_to_idx

    checkpoint = {
        'input_size': 1024,
        'output_size': in_arg.output_nodes,
        'hidden_layers': [each for each in model.classifier],
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'class_to_idx': model.class_to_idx
    }

    torch.save(checkpoint, in_arg.save_dir)
preprocess = transforms.Compose([
   transforms.Resize(256),
   transforms.CenterCrop(224),
   transforms.ToTensor(),
   transforms.Normalize(
     mean=[0.485, 0.456, 0.406],
     std=[0.229, 0.224, 0.225])
])

# get the labels
LABELS_URL = 'https://s3.amazonaws.com/mlpipes/pytorch-quick-start/labels.json'
labels = {int(key):value for (key, value)
          in requests.get(LABELS_URL).json().items()}

# get the model
model = models.vgg16(pretrained=True).eval()

app = Flask(__name__)
@app.route('/')
def home():
    return "This is a worker."

@app.route('/status', methods = ['GET'])
def getStatus():
    return status

@app.route('/predict', methods = ['POST'])
def predict():

    # record the start time
    start_time = time.time()
예제 #23
0
    def __init__(self, name, nclasses=40, pretraining=True, cnn_name='vgg11', KNU_data=True, use_encdec=False, encdec_name='alexnet', encdim=4096):
        super(SVCNN, self).__init__(name)

        if KNU_data:
            self.classnames = ['BlindFlange', 'Cross', 'Elbow 90', 'Elbow non 90', 'Flange', 'Flange WN',
                               'Olet', 'OrificeFlange', 'Pipe', 'Reducer CONC', 'Reducer ECC',
                               'Reducer Insert', 'Safety Valve', 'Strainer', 'Tee', 'Tee RED',
                               'Valve', 'Wye']
        else:
            self.classnames = ['airplane', 'bathtub', 'bed', 'bench', 'bookshelf', 'bottle', 'bowl', 'car', 'chair',
                               'cone', 'cup', 'curtain', 'desk', 'door', 'dresser', 'flower_pot', 'glass_box',
                               'guitar', 'keyboard', 'lamp', 'laptop', 'mantel', 'monitor', 'night_stand',
                               'person', 'piano', 'plant', 'radio', 'range_hood', 'sink', 'sofa', 'stairs',
                               'stool', 'table', 'tent', 'toilet', 'tv_stand', 'vase', 'wardrobe', 'xbox']

        self.nclasses = nclasses
        self.pretraining = pretraining
        self.cnn_name = cnn_name
        self.use_encdec = use_encdec
        self.encdec_name = encdec_name
        self.encdim = encdim
        self.use_resnet = cnn_name.startswith('resnet')
        self.mean = Variable(torch.FloatTensor([0.485, 0.456, 0.406]), requires_grad=False).cuda()
        self.std = Variable(torch.FloatTensor([0.229, 0.224, 0.225]), requires_grad=False).cuda()

        if self.use_encdec:
            if self.encdec_name == 'simpleNet':
                self.encnet = nn.Sequential(
                    nn.Conv2d(3, 64, kernel_size=5, padding=2),
                    nn.ReLU(inplace=True),
                    nn.MaxPool2d(kernel_size=3, stride=2),
                    nn.Conv2d(64, 128, kernel_size=5, padding=2),
                    nn.ReLU(inplace=True),
                    nn.MaxPool2d(kernel_size=3, stride=2),
                    nn.Conv2d(128, 256, kernel_size=5, padding=2),
                    nn.ReLU(inplace=True),
                    nn.MaxPool2d(kernel_size=3, stride=2),
                )
                self.decnet = nn.Sequential(
                    nn.ConvTranspose2d(256, 128, kernel_size=7, stride= 2, padding=2),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(128, 64, kernel_size=7, stride= 2, padding=2),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(64, 3, kernel_size=6, stride=2, padding=1),
                    nn.ReLU(inplace=True),
                )

            if self.encdec_name == 'alexnet':
                self.encnet = models.alexnet().features
                # (6x6x256 to 224x224x3)
                self.decnet = nn.Sequential(
                    nn.ConvTranspose2d(256, 256, kernel_size=3, padding=1),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(256, 384, kernel_size=3, padding=1),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(384, 192, kernel_size=5, stride=3, padding=1),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(192, 64, kernel_size=9, stride=3, padding=2),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(64, 3, kernel_size=10, stride=4, padding=3),
                    nn.ReLU(inplace=True)
                )
            if self.encdec_name == 'vgg11':
                self.encnet = models.alexnet().features
                # (6x6x256 to 224x224x3)
                self.decnet = nn.Sequential(
                    nn.ConvTranspose2d(256, 256, kernel_size=3, padding=1),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(256, 384, kernel_size=3, padding=1),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(384, 192, kernel_size=5, stride=3, padding=1),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(192, 64, kernel_size=9, stride=3, padding=2),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(64, 3, kernel_size=10, stride=4, padding=3),
                    nn.ReLU(inplace=True)
                )

        if self.use_resnet:
            if self.cnn_name == 'resnet18':
                self.net = models.resnet18(pretrained=self.pretraining)
                self.net.fc = nn.Linear(512,nclasses)
            elif self.cnn_name == 'resnet34':
                self.net = models.resnet34(pretrained=self.pretraining)
                self.net.fc = nn.Linear(512,nclasses)
            elif self.cnn_name == 'resnet50':
                self.net = models.resnet50(pretrained=self.pretraining)
                self.net.fc = nn.Linear(2048,nclasses)
        else:
            if self.cnn_name == 'alexnet':
                self.net_1 = models.alexnet(pretrained=self.pretraining).features
                self.net_2 = models.alexnet(pretrained=self.pretraining).classifier
            elif self.cnn_name == 'vgg11':
                self.net_1 = models.vgg11(pretrained=self.pretraining).features
                self.net_2 = models.vgg11(pretrained=self.pretraining).classifier
            elif self.cnn_name == 'vgg16':
                self.net_1 = models.vgg16(pretrained=self.pretraining).features
                self.net_2 = models.vgg16(pretrained=self.pretraining).classifier
            
            self.net_2._modules['6'] = nn.Linear(4096,nclasses)
예제 #24
0
def build_model(args):
    model_name = args.model_name
    nb_classes = args.nb_classes
    pretrained = args.pretrain
    if model_name == 'vggnet16':
        return VGGNetBaseline(models.vgg16(pretrained=pretrained), nb_classes)
    elif model_name == 'vggnet19':
        return VGGNetBaseline(models.vgg19(pretrained=pretrained), nb_classes)
    elif model_name == 'inceptionv3':
        return InceptionBaseline(models.inception_v3(pretrained=pretrained),
                                 nb_classes)
    elif model_name == 'resnet50':
        return ResNetBaseline(models.resnet50(pretrained=pretrained),
                              nb_classes)
    elif model_name == 'resnet101':
        return ResNetBaseline(models.resnet101(pretrained=pretrained),
                              nb_classes)
    elif model_name == 'resnet152':
        return ResNetBaseline(models.resnet152(pretrained=pretrained),
                              nb_classes)
    elif model_name == 'squeezenet':
        return SqueezeNetBaseline(models.squeezenet1_0(pretrained=pretrained),
                                  nb_classes)
    elif model_name == 'densenet121':
        return DenseNetBaseline(models.densenet121(pretrained=pretrained),
                                nb_classes)
    elif model_name == 'densenet169':
        return DenseNetBaseline(models.densenet169(pretrained=pretrained),
                                nb_classes)
    elif model_name == 'shufflenetv2':
        return ShuffleNetBaseline(
            models.shufflenet_v2_x1_0(pretrained=pretrained), nb_classes)
    elif model_name == 'mobilenetv2':
        return MobileNetBaseline(models.mobilenet_v2(pretrained=pretrained),
                                 nb_classes)
    elif model_name == 'resnext50':
        return ResNeXtBaseline(models.resnext50_32x4d(pretrained=pretrained),
                               nb_classes)
    elif model_name == 'resnext101':
        return ResNeXtBaseline(models.resnext101_32x8d(pretrained=pretrained),
                               nb_classes)
    elif model_name == 'mnasnet':
        return MNASNetBaseline(models.mnasnet1_0(pretrained=pretrained),
                               nb_classes)
    elif model_name == 'lr-vggnet16':
        return RelationNet('vggnet16',
                           nb_classes,
                           num_moda=args.nb_moda,
                           num_units=args.nb_units)
    elif model_name == 'lr-resnet50':
        return RelationNet('resnet50',
                           nb_classes,
                           num_moda=args.nb_moda,
                           num_units=args.nb_units)
    elif model_name == 'svm':
        return MultiOutputClassifier(
            SVC(random_state=0, tol=1e-5, max_iter=100000, verbose=1), -1)
    elif model_name == 'xgboost':
        return MultiOutputClassifier(
            XGBClassifier(booster='gbtree',
                          n_jobs=100,
                          n_estimators=200,
                          verbosity=1,
                          use_label_encoder=False,
                          gpu_id=0), -1)
    elif model_name == 'rf':
        return MultiOutputClassifier(
            RandomForestClassifier(random_state=0, n_estimators=200,
                                   verbose=1), -1)
    elif model_name == 'saff':
        return SAFF(models.vgg16(pretrained=pretrained), nb_classes, 256)
    elif model_name == 'facnn':
        return FACNN(models.vgg16(pretrained=pretrained), nb_classes)
    elif model_name == 'kfb':
        return KFB(models.vgg16(pretrained=pretrained), nb_classes)
    else:
        print(
            'The selected model is not pre-defined! Now got to default model (ResNeXt101)!'
        )
        return ResNeXtBaseline(models.resnext101_32x8d(pretrained=pretrained),
                               nb_classes)
예제 #25
0
    def __init__(self,
                 classes,
                 step1_model_path,
                 fix_cnn_base=False,
                 class_agnostic=False,
                 pretrained=False,
                 base_model='vgg16'):
        super(vgg16_step2, self).__init__()
        self.model_path = 'data/pretrained_model/vgg16_caffe.pth'
        self.dout_base_model = 512
        self.pretrained = pretrained
        self.class_agnostic = class_agnostic
        self.classes = classes
        # loss
        self.RCNN_loss_cls = 0
        self.RCNN_loss_bbox = 0

        #define base
        vgg = models.vgg16()
        if self.pretrained:
            print("Step2: Loading pretrained weights from %s" %
                  (self.model_path))
            state_dict = torch.load(self.model_path)
            vgg.load_state_dict(
                {k: v
                 for k, v in state_dict.items() if k in vgg.state_dict()})
        # not using the last maxpool layer
        self.RCNN_base = nn.Sequential(
            *list(vgg.features._modules.values())[:-1])

        # define rpn
        self.RCNN_rpn = _RPN(self.dout_base_model)
        #init weight of rpn
        state_dict_rpn = torch.load(step1_model_path)
        self.RCNN_rpn.load_state_dict({
            k.replace('RCNN_rpn.', ''): v
            for k, v in state_dict_rpn['model'].items() if 'RCNN_rpn' in k
        })
        for key, value in dict(self.RCNN_rpn.named_parameters()).items():
            value.requires_grad = False

        # define detector
        self.detector = _detector(self.classes,
                                  self.class_agnostic,
                                  pretrained,
                                  base_model=base_model)

        #init weight of detector
        def normal_init(m, mean, stddev, truncated=False):
            """
            weight initalizer: truncated normal and random normal.
            """
            # x is a parameter
            if truncated:
                m.weight.data.normal_().fmod_(2).mul_(stddev).add_(
                    mean)  # not a perfect approximation
            else:
                m.weight.data.normal_(mean, stddev)
                m.bias.data.zero_()

        normal_init(self.detector.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
        normal_init(self.detector.RCNN_bbox_pred, 0, 0.001,
                    cfg.TRAIN.TRUNCATED)
def main():
    parser = argparse.ArgumentParser(description='Testing')
    parser.add_argument('--obj', type=str, default='.')
    parser.add_argument('--data_type', type=str, default='mvtec')
    parser.add_argument('--data_path', type=str, default='.')
    parser.add_argument('--checkpoint_dir', type=str, default='.')
    parser.add_argument("--grayscale",
                        action='store_true',
                        help='color or grayscale input image')
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--img_resize', type=int, default=128)
    parser.add_argument('--crop_size', type=int, default=128)
    parser.add_argument('--seed', type=int, default=None)
    args = parser.parse_args()
    args.save_dir = './' + args.data_type + '/' + args.obj + '/vgg_feature' + '/seed_{}/'.format(
        args.seed)
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # load model and dataset
    args.input_channel = 1 if args.grayscale else 3
    model = VAE(input_channel=args.input_channel, z_dim=100).to(device)
    checkpoint = torch.load(args.checkpoint_dir)
    model.load_state_dict(checkpoint['model'])
    teacher = models.vgg16(pretrained=True).to(device)
    for param in teacher.parameters():
        param.requires_grad = False

    img_size = args.crop_size if args.img_resize != args.crop_size else args.img_resize
    kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}

    test_dataset = MVTecDataset(args.data_path,
                                class_name=args.obj,
                                is_train=False,
                                resize=img_size)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              **kwargs)

    scores, test_imgs, recon_imgs, gt_list, gt_mask_list = test(
        model, teacher, test_loader)
    scores = np.asarray(scores)
    max_anomaly_score = scores.max()
    min_anomaly_score = scores.min()
    scores = (scores - min_anomaly_score) / (max_anomaly_score -
                                             min_anomaly_score)
    gt_mask = np.asarray(gt_mask_list)
    precision, recall, thresholds = precision_recall_curve(
        gt_mask.flatten(), scores.flatten())
    a = 2 * precision * recall
    b = precision + recall
    f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
    threshold = thresholds[np.argmax(f1)]

    fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten())
    per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())
    print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))

    plt.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (args.obj, per_pixel_rocauc))
    plt.legend(loc="lower right")
    save_dir = args.save_dir + '/' + f'seed_{args.seed}' + '/' + 'pictures_{:.4f}'.format(
        threshold)
    os.makedirs(save_dir, exist_ok=True)
    plt.savefig(os.path.join(save_dir, args.obj + '_roc_curve.png'), dpi=100)

    plot_fig(args, test_imgs, recon_imgs, scores, gt_mask_list, threshold,
             save_dir)
예제 #27
0
benign_path = os.path.join(processed_path, "no_cancer")
malignant_path = os.path.join(processed_path, "cancer")
feature_path = os.path.join(data_path, "calculated_features")
segmentation_path = os.path.join(data_path, "segmentation")
os.makedirs(feature_path, exist_ok=True)
#%%
# used for converting to the range VGG16 is used to
mean = np.asarray([0.485, 0.456, 0.406])
std = np.asarray([0.229, 0.224, 0.225])

device = torch.device("cuda")
#%%

list_of_img_names = os.listdir(benign_path)

model = models.vgg16(pretrained=True).to(device).eval()

img_features = np.empty((len(list_of_img_names), 25088))
cd_features = -np.ones((len(list_of_img_names), 2, 25088))  # rel, irrel
avg_layer = torch.nn.AdaptiveAvgPool2d((7, 7))
from skimage.morphology import square
my_square = square(20)
with torch.no_grad():
    for i in tqdm(range(len(list_of_img_names))):
        img = Image.open(oj(benign_path, list_of_img_names[i]))
        img_torch = torch.from_numpy(
            ((np.asarray(img) / 255.0 - mean) / std).swapaxes(0, 2).swapaxes(
                1, 2))[None, :].cuda().float()
        img.close()
        img_features[i] = avg_layer(
            model.features(img_torch)).view(-1).cpu().numpy()
예제 #28
0
 def __init__(self):
     super(Vgg16, self).__init__()
     features = list(vgg16(pretrained=True).features)[:23]
     # features的第3,8,15,22层分别是: relu1_2,relu2_2,relu3_3,relu4_3
     self.features = nn.ModuleList(features).eval()
예제 #29
0
    def __call__(self, inputs, layer, repeat=1):
        return inputs.repeat(repeat, 1, 1, 1), None


params = {
    'repeat': {
        'type': 'slider',
        'min': 1,
        'max': 100,
        'value': 2,
        'step': 1,
        'params': {}
    }
}

visualisation = partial(WebInterface.from_visualisation,
                        RepeatInput,
                        params=params,
                        name='Visualisation')
# create a model
model = vgg16(pretrained=True)
# open some images
cat = Image.open("./cat.jpg")
dog_and_cat = Image.open("./dog_and_cat.jpg")
# resize the image and make it a tensor
to_input = Compose([Resize((224, 224)), ToTensor()])
# call mirror with the inputs and the model
mirror([to_input(cat), to_input(dog_and_cat)],
       model,
       visualisations=[visualisation])
예제 #30
0
 def __init__(self, layers=6):
     super(PerceptualLoss, self).__init__()
     self.vgg = tvmodels.vgg16(pretrained=True).features[:layers]
     self.vgg.eval()
     for p in self.vgg.parameters():
         p.requires_grad_(False)
예제 #31
0
 def __init__(self, model=None, pretrained=False):
     super(Three2I_colorization, self).__init__()
     self.model = model
     if model == 'vgg16':
         self.model_f = models.vgg16(pretrained=pretrained).features
         self.model_r = models.vgg16(pretrained=pretrained).features
         self.model_t = models.vgg16(pretrained=pretrained).features
         self.model_i = models.vgg16(pretrained=pretrained).features
         self.mlp1 = nn.Sequential(nn.Linear(512 * 3 * 6 * 6, 4096),
                                   nn.ReLU(inplace=True))
         self.mlp2 = nn.Sequential(nn.Linear(512 * 6 * 6, 4096),
                                   nn.ReLU(inplace=True))
         self.mlp3 = nn.Sequential(nn.Linear(2 * 4096, 4096),
                                   nn.ReLU(inplace=True),
                                   nn.Linear(4096, 1))
         nn.init.normal_(self.mlp1[0].weight, 0, 0.01)
         nn.init.constant_(self.mlp1[0].bias, 0)
         nn.init.normal_(self.mlp2[0].weight, 0, 0.01)
         nn.init.constant_(self.mlp2[0].bias, 0)
         nn.init.normal_(self.mlp3[0].weight, 0, 0.01)
         nn.init.constant_(self.mlp3[0].bias, 0)
         nn.init.normal_(self.mlp3[2].weight, 0, 0.01)
         nn.init.constant_(self.mlp3[2].bias, 0)
     if model == 'resnet50':
         self.model_f = nn.Sequential(
             models.resnet50(pretrained=pretrained).conv1,
             models.resnet50(pretrained=pretrained).bn1,
             nn.ReLU(inplace=True),
             nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
             models.resnet50(pretrained=pretrained).layer1,
             models.resnet50(pretrained=pretrained).layer2,
             models.resnet50(pretrained=pretrained).layer3,
             models.resnet50(pretrained=pretrained).layer4,
             nn.AdaptiveAvgPool2d((1, 1)))
         self.model_r = nn.Sequential(
             models.resnet50(pretrained=pretrained).conv1,
             models.resnet50(pretrained=pretrained).bn1,
             nn.ReLU(inplace=True),
             nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
             models.resnet50(pretrained=pretrained).layer1,
             models.resnet50(pretrained=pretrained).layer2,
             models.resnet50(pretrained=pretrained).layer3,
             models.resnet50(pretrained=pretrained).layer4,
             nn.AdaptiveAvgPool2d((1, 1)))
         self.model_t = nn.Sequential(
             models.resnet50(pretrained=pretrained).conv1,
             models.resnet50(pretrained=pretrained).bn1,
             nn.ReLU(inplace=True),
             nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
             models.resnet50(pretrained=pretrained).layer1,
             models.resnet50(pretrained=pretrained).layer2,
             models.resnet50(pretrained=pretrained).layer3,
             models.resnet50(pretrained=pretrained).layer4,
             nn.AdaptiveAvgPool2d((1, 1)))
         self.model_i = nn.Sequential(
             models.resnet50(pretrained=pretrained).conv1,
             models.resnet50(pretrained=pretrained).bn1,
             nn.ReLU(inplace=True),
             nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
             models.resnet50(pretrained=pretrained).layer1,
             models.resnet50(pretrained=pretrained).layer2,
             models.resnet50(pretrained=pretrained).layer3,
             models.resnet50(pretrained=pretrained).layer4,
             nn.AdaptiveAvgPool2d((1, 1)))
         self.mlp1 = nn.Sequential(nn.Linear(512 * 3 * 4, 1024),
                                   nn.ReLU(inplace=True))
         self.mlp2 = nn.Sequential(nn.Linear(512 * 4, 1024),
                                   nn.ReLU(inplace=True))
         self.mlp3 = nn.Linear(2 * 1024, 1)
         nn.init.normal_(self.mlp3.weight, 0, 0.01)
         nn.init.constant_(self.mlp3.bias, 0)
     nn.init.normal_(self.mlp1[0].weight, 0, 0.01)
     nn.init.constant_(self.mlp1[0].bias, 0)
     nn.init.normal_(self.mlp2[0].weight, 0, 0.01)
     nn.init.constant_(self.mlp2[0].bias, 0)
예제 #32
0
def saveFeature(imgFolder, opt, model='resnet34', workers=4, batch_size=64):
    '''
        model: inception_v3, vgg13, vgg16, vgg19, resnet18, resnet34,
               resnet50, resnet101, or resnet152
    '''
    g = Globals()

    mkdir(g.default_feature_dir + opt.data)
    feature_dir = g.default_feature_dir + opt.data + "/" + lastFolder(imgFolder)
    
    conv_path = '{}_{}_conv.pth'.format(feature_dir, model)
    class_path = '{}_{}_class.pth'.format(feature_dir, model)
    smax_path = '{}_{}_smax.pth'.format(feature_dir, model)

    if (os.path.exists(conv_path) and  os.path.exists(class_path) and os.path.exists(class_path)):
        print("Feature already generated before. Now pass.")
        return

    if hasattr(opt, 'feat_model') and opt.feat_model is not None:
        model = opt.feat_model
    if model == 'vgg' or model == 'vgg16':
        vgg = models.vgg16(pretrained=True).cuda().eval()

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving vgg features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = vgg.features(input)
            fconv_out = fconv.mean(3).mean(2).squeeze()
            fconv = fconv.view(fconv.size(0), -1)
            flogit = vgg.classifier(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv_out.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

    elif model.find('resnet') >= 0:
        if model == 'resnet34_cifar':
            # Please load your own model. Example here:
            # c = torch.load(
            #     '/home/gh349/xqt/wide-resnet.pytorch/checkpoint/cifar10/gan-resnet-34.t7')
            # resnet = c['net']
            pass
            print('Using resnet34 trained on cifar10.')
            raise NotImplementedError()

        elif model == 'resnet34_random':
            # Please load your own model. Example here:
            # resnet = torch.load(
            #     '/home/gh349/xqt/wide-resnet.pytorch/checkpoint/cifar10/random_resnet34.t7')
            pass
            print('Using resnet34 with random weights.')
            raise NotImplementedError()

        else:
            resnet = getattr(models, 'resnet34')(pretrained=True)
            print('Using resnet34 with pretrained weights.')

        resnet.cuda().eval()
        resnet_feature = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                       resnet.maxpool, resnet.layer1,
                                       resnet.layer2, resnet.layer3, resnet.layer4)
        input = Variable(torch.FloatTensor().cuda())

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
            # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving resnet features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = resnet_feature(input)
            fconv = fconv.mean(3).mean(2).squeeze()
            flogit = resnet.fc(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

        mkdir(g.default_feature_dir)
        feature_dir = g.default_feature_dir + \
            opt.data + "/" + lastFolder(imgFolder)
        mkdir(g.default_feature_dir + opt.data)

        torch.save(feature_conv, feature_dir + '_' + model + '_conv.pth')
        torch.save(feature_class, feature_dir + '_' + model + '_class.pth')
        torch.save(feature_smax, feature_dir + '_' + model + '_smax.pth')
        return feature_conv, feature_class, feature_smax

    elif model == 'inception' or model == 'inception_v3':
        inception = models.inception_v3(
            pretrained=True, transform_input=False).cuda().eval()
        inception_feature = nn.Sequential(inception.Conv2d_1a_3x3,
                                          inception.Conv2d_2a_3x3,
                                          inception.Conv2d_2b_3x3,
                                          nn.MaxPool2d(3, 2),
                                          inception.Conv2d_3b_1x1,
                                          inception.Conv2d_4a_3x3,
                                          nn.MaxPool2d(3, 2),
                                          inception.Mixed_5b,
                                          inception.Mixed_5c,
                                          inception.Mixed_5d,
                                          inception.Mixed_6a,
                                          inception.Mixed_6b,
                                          inception.Mixed_6c,
                                          inception.Mixed_6d,
                                          inception.Mixed_7a,
                                          inception.Mixed_7b,
                                          inception.Mixed_7c,
                                          ).cuda().eval()

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving resnet features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = inception_feature(input)
            fconv = fconv.mean(3).mean(2).squeeze()
            flogit = inception.fc(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

    else:
        raise NotImplementedError


    torch.save(feature_conv, '{}_{}_conv.pth'.format(feature_dir, model))
    torch.save(feature_class, '{}_{}_class.pth'.format(feature_dir, model))
    torch.save(feature_smax, '{}_{}_smax.pth'.format(feature_dir, model))
    return feature_conv, feature_class, feature_smax
예제 #33
0
def evaluate_metrics(p,
                     img_dict,
                     model='resnet',
                     times=1,
                     metrics=[],
                     outpath_root='.',
                     labs_vs_gt=None):
    GT = labs_vs_gt[0]
    labs = labs_vs_gt[1]
    num_imgs = len(img_dict)
    model = model
    if model == 'resnet':
        arch = models.resnet18(pretrained=True).eval()
    elif model == 'vgg':
        arch = models.vgg16(pretrained=True).eval()
    elif model == 'alexnet':
        arch = models.alexnet(pretrained=True).eval()

    if torch.cuda.is_available():
        arch = arch.cuda()

    start = time.time()
    now = start
    times = times
    average_drop, increase_in_confidence = 0.0, 0.0
    deletion, insertion = [], []
    if metrics is not []:
        for _ in range(times):
            for i, (k, img) in enumerate(img_dict.items()):
                outpath = outpath_root + f'{k}_{img}/'
                inp_0 = load_image(p + '/' + img)
                os.mkdir(outpath)
                inp_0.save(f'{outpath}{img}')
                inp = apply_transforms(inp_0)
                if torch.cuda.is_available():
                    inp = inp.cuda()
                #print(f'Before test.run: {round(time.time() - now, 0)}s')
                now = time.time()
                out, scorecam_map = expmap.get_explanation_map(arch=model,
                                                               img=p + '/' +
                                                               img)
                F.to_pil_image(
                    scorecam_map.squeeze(0)).save(f'{outpath}/exp_map.png')
                #print(f'After test.run: {round(time.time() - now, 0)}s')
                now = time.time()
                if torch.cuda.is_available():
                    scorecam_map = scorecam_map.cuda()
                #print(f'Before arch: {round(time.time() - now, 0)}s')
                now = time.time()
                out_sal = FF.softmax(arch(inp * scorecam_map), dim=1)
                #print(f'After arch: {round(time.time() - now, 0)}s')
                now = time.time()
                # print(type(out_sal),out_sal.shape)
                Y_i_c = out.max(1)[0].item()
                class_idx = out.max(1)[-1].item()
                class_name = labs[class_idx]
                gt_name = GT[str(img[-13:-5])][0].split()[1]
                O_i_c = out_sal[:, class_idx][0].item()
                # print(f'#-------------------------------------------------------------------#')
                # print(f'{Y_i_c},{out.max(1)[-1].item()},\n{O_i_c},{out_sal.max(1)[-1].item()}\n')
                # print(f'{Y_i_c},{O_i_c},{max(0.0,Y_i_c-O_i_c)},{max(0,Y_i_c-O_i_c)/Y_i_c}')
                # print('#-------------------------------------------------------------------#')
                if 'average_drop' in metrics and 'increase_in_confidence' in metrics:
                    average_drop, increase_in_confidence = ADIC.average_drop_and_increase_of_confidence(
                        average_drop, increase_in_confidence, Y_i_c, O_i_c)
                if 'deletion' in metrics and 'insertion' in metrics:
                    precision = 100
                    deletion, insertion = DAI.deletion_and_insertion(
                        deletion,
                        insertion,
                        inp,
                        scorecam_map,
                        arch,
                        step=1 / precision)
                    #print(deletion, insertion)

                    #deletion_score = round(torch.tensor(deletion).sum().item() / precision,3)
                    #insertion_score = round(torch.tensor(insertion).sum().item() / precision,3)
                    deletion_score = round(
                        SKM.auc(
                            torch.arange(0, 1, 1 / precision).numpy(),
                            torch.tensor(deletion).numpy()), 3)
                    insertion_score = round(
                        SKM.auc(
                            torch.arange(0, 1, 1 / precision).numpy(),
                            torch.tensor(insertion).numpy()), 3)
                    plot(torch.arange(0, 1, 1 / precision),
                         [deletion, insertion],
                         label=[
                             f'deletion={deletion_score}',
                             f'insertion={insertion_score}'
                         ],
                         path=f'{outpath}plot_{k}.png',
                         title=f'label={class_name}, GT={gt_name}')

                    print(f'The final deletion is: {deletion_score}')
                    print(f'The final insertion is: {insertion_score}')
                    deletion, insertion = [], []
                print(f'After one img: {int(time.time() - now)}s')
                now = time.time()

            print(f'In {num_imgs} images')
            if 'average_drop' in metrics and 'increase_in_confidence' in metrics:
                average_drop *= 100 / num_imgs
                increase_in_confidence *= 100 / num_imgs
                print(f'The final AVG drop is: {round(average_drop, 2)}%')
                print(
                    f'The final Increase in Confidence is: {round(increase_in_confidence, 2)}%'
                )

        print(f'Execution time: {int(time.time() - start)}s')
예제 #34
0
    PretrainedClassifier(
        name="AlexNet",
        classifier=ImageNetClassifier(model=models.alexnet(pretrained=True)),
        paper="https://papers.nips.cc/paper/2012/file/"
        "c399862d3b9d6b76c8436e924a68c45b-Paper.pdf"),
    PretrainedClassifier(name="Inception v3",
                         classifier=ImageNetClassifier(
                             model=models.inception_v3(pretrained=True)),
                         paper="https://arxiv.org/abs/1512.00567"),
    PretrainedClassifier(
        name="GoogleNet",
        classifier=ImageNetClassifier(model=models.googlenet(pretrained=True)),
        paper="https://arxiv.org/abs/1409.4842"),
    PretrainedClassifier(
        name="VGG-16",
        classifier=ImageNetClassifier(model=models.vgg16(pretrained=True)),
        paper="https://arxiv.org/abs/1409.1556"),
    PretrainedClassifier(name="Wide ResNet 50-2",
                         classifier=ImageNetClassifier(
                             model=models.wide_resnet50_2(pretrained=True)),
                         paper="https://arxiv.org/abs/1512.03385"),
    PretrainedClassifier(
        name="ResNet18",
        classifier=ImageNetClassifier(model=models.resnet18(pretrained=True)),
        paper="https://arxiv.org/abs/1512.03385")
]
"""
PretrainedClassifiersList = [
    (classifier.name, index)
    for index, classifier in enumerate(pretrained_classifiers, 1)
]
예제 #35
0
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
from tqdm import tqdm
from config import cfg
from utils.encrypt_model import cryptography

if cfg.MODEL.NAME == 'vgg16':
    model = models.vgg16(pretrained=True)  #.cuda()
elif cfg.MODEL.NAME == 'resnet18':
    model = models.resnet18(pretrained=True)
elif cfg.MODEL.NAME == 'resnet50':
    model = models.resnet50(pretrained=True)
elif cfg.MODEL.NAME == 'resnet101':
    model = models.resnet101(pretrained=True)
else:
    raise Exception(
        '{} is not supported currently, or you can define your own model'.
        format(cfg.MODEL.NAME))

if cfg.DEVICE.CUDA:
    model = model.cuda()

normalize = transforms.Normalize(mean=cfg.VAL_DATASET.MEAN,
                                 std=cfg.VAL_DATASET.STD)

val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
예제 #36
0
train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)

val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_size,
                                             shuffle=False)

# 辞書オブジェクトにまとめる
dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}

# 学習済みのVGG-16モデルをロード

# VGG-16モデルのインスタンスを生成
use_pretrained = True  # 学習済みのパラメータを使用
net = models.vgg16(pretrained=use_pretrained)

# VGG16の最後の出力層の出力ユニットをアリとハチの2つに付け替える
net.classifier[6] = nn.Linear(in_features=4096, out_features=2)

# 訓練モードに設定
net.train()

print('ネットワーク設定完了:学習済みの重みをロードし、訓練モードに設定しました')

# 損失関数の設定
criterion = nn.CrossEntropyLoss()

# ファインチューニングで学習させるパラメータを、変数params_to_updateの1~3に格納する

params_to_update_1 = []
예제 #37
0
    def __init__(
        self,
        classification_model_name: str = 'resnext101_32x8d',
        detection_model_name: str = 'fasterrcnn_resnet50_fpn',
    ):

        input_size = 224

        # Classification Model
        if classification_model_name == 'resnet18':
            self.model_classification = models.resnet18(pretrained=True)
        elif classification_model_name == 'resnet34':
            self.model_classification = models.resnet34(pretrained=True)
        elif classification_model_name == 'resnet50':
            self.model_classification = models.resnet50(pretrained=True)
        elif classification_model_name == 'resnet101':
            self.model_classification = models.resnet101(pretrained=True)
        elif classification_model_name == 'resnet152':
            self.model_classification = models.resnet152(pretrained=True)
        elif classification_model_name == 'alexnet':
            self.model_classification = models.alexnet(pretrained=True)
        elif classification_model_name == 'squeezenet1_0':
            self.model_classification = models.squeezenet1_0(pretrained=True)
        elif classification_model_name == 'vgg16':
            self.model_classification = models.vgg16(pretrained=True)
        elif classification_model_name == 'densenet161':
            self.model_classification = models.densenet161(pretrained=True)
        elif classification_model_name == 'densenet201':
            self.model_classification = models.densenet201(pretrained=True)
        elif classification_model_name == 'inception_v3':
            import spacy
            input_size = 299
            self.model_classification = models.inception_v3(pretrained=True)
        elif classification_model_name == 'googlenet':
            import spacy
            self.model_classification = models.googlenet(pretrained=True)
        elif classification_model_name == 'shufflenet_v2_x1_0':
            self.model_classification = models.shufflenet_v2_x1_0(
                pretrained=True)
        elif classification_model_name == 'mobilenet_v2':
            self.model_classification = models.mobilenet_v2(pretrained=True)
        elif classification_model_name == 'resnext50_32x4d':
            self.model_classification = models.resnext50_32x4d(pretrained=True)
        elif classification_model_name == 'resnext101_32x8d':
            self.model_classification = models.resnext101_32x8d(
                pretrained=True)
        elif classification_model_name == 'wide_resnet50_2':
            self.model_classification = models.wide_resnet50_2(pretrained=True)
        elif classification_model_name == 'mnasnet1_0':
            self.model_classification = models.mnasnet1_0(pretrained=True)
        else:
            raise ValueError('Incompatible classification model name')

        # Detection Model
        if detection_model_name == 'fasterrcnn_resnet50_fpn':
            self.model_detection = models.detection.fasterrcnn_resnet50_fpn(
                pretrained=True)
        elif detection_model_name == 'maskrcnn_resnet50_fpn':
            self.model_detection = models.detection.maskrcnn_resnet50_fpn(
                pretrained=True)
        elif detection_model_name == 'keypointrcnn_resnet50_fpn':
            self.model_detection = models.detection.keypointrcnn_resnet50_fpn(
                pretrained=True)
        else:
            raise ValueError('Incompatible detection model name')

        # Detect if we have a GPU available
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.model_classification = self.model_classification.to(device)
        self.model_detection = self.model_detection.to(device)

        # Put models in evaluation mode
        self.model_classification.eval()
        self.model_detection.eval()

        # Define tensor transform
        self.transform_classification = transforms.Compose([
            transforms.Resize(input_size),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        self.transform_detection = transforms.Compose([transforms.ToTensor()])

        # Load Labels
        with open('iCerebro/imagenet_labels.txt') as f:
            self.labels = [line.strip()[10:] for line in f.readlines()]

        self.COCO_INSTANCE_CATEGORY_NAMES = [
            '__background__', 'person', 'bicycle', 'car', 'motorcycle',
            'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
            'fire hydrant', 'N/A', 'stop sign', 'parking meter', 'bench',
            'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
            'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
            'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
            'sports ball', 'kite', 'baseball bat', 'baseball glove',
            'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A',
            'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana',
            'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog',
            'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
            'N/A', 'dining table', 'N/A', 'N/A', 'toilet', 'N/A', 'tv',
            'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
            'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock',
            'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
        ]
        self.COCO_PERSON_KEYPOINT_NAMES = [
            'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear',
            'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',
            'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee',
            'right_knee', 'left_ankle', 'right_ankle'
        ]
예제 #38
0
    args = PIActiveLearningModel.add_model_specific_args(args)
    params = args.parse_args()

    active_set = ActiveLearningDataset(
        CIFAR10(params.data_root,
                train=True,
                transform=PIModel.train_transform,
                download=True),
        pool_specifics={'transform': PIModel.test_transform})
    active_set.label_randomly(500)

    print("Active set length: {}".format(len(active_set)))
    print("Pool set length: {}".format(len(active_set.pool)))

    heuristic = get_heuristic(params.heuristic)
    model = vgg16(pretrained=False, num_classes=10)
    weights = load_state_dict_from_url(
        'https://download.pytorch.org/models/vgg16-397923af.pth')
    weights = {k: v for k, v in weights.items() if 'classifier.6' not in k}
    model.load_state_dict(weights, strict=False)
    model = PIActiveLearningModel(network=model,
                                  active_dataset=active_set,
                                  hparams=params)

    dp = 'dp' if params.gpus > 1 else None
    trainer = BaalTrainer(
        max_epochs=params.epochs,
        default_root_dir=params.data_root,
        gpus=params.gpus,
        distributed_backend=dp,
        # The weights of the model will change as it gets
#print("Our model:\n\n", trained_model, '\n')
#print("The state dict keys: \n\n", trained_model.state_dict().keys())
#print("Recalling the Input and Output Size: \n")
#print("  Input Size: ", num_of_input_features)
#print("  Output Size: ", num_of_desired_outputs)

#TODO:  Saving the Checkpoint will be unique to the model architecture...
checkpoint_file_name = 'my_model_chckpnt.pt'

torch.save(
    {
        'input_size': num_of_input_features,
        'output_size': num_of_desired_outputs,
        'epoch': epoch + 1,
        'batch_size': 64,
        'model': models.vgg16(pretrained=True),
        'classifier': trained_classifier,
        'optimizer': optimizer.state_dict(),
        'state_dict': trained_model.state_dict(),
        'loss': loss,
        'class_to_idx': image_datasets.class_to_idx
    }, checkpoint_file_name)

print('Trained Model Checkpoint Saved...\n')
print('File         :', checkpoint_file_name)
print('Access time  :', time.ctime(os.path.getatime(checkpoint_file_name)))
print('Modified time:', time.ctime(os.path.getmtime(checkpoint_file_name)))
print('Change time  :', time.ctime(os.path.getctime(checkpoint_file_name)))
print('Size         :', os.path.getsize(checkpoint_file_name))

# ## Loading the checkpoint
예제 #40
0
 def pretrained_state_dict(cls):
     weights = vgg16(pretrained=True).state_dict().values()
     return dict(zip(cls().state_dict().keys(), weights))
import torch
import numpy as np
import torch.nn as nn
import torchvision.models as models
import matplotlib.image as img
import PIL.Image
  
# csv file name 
filename = "ImageID.csv"
fname="Y.csv"
  
# initializing the titles and rows list 
fields = [] 
rows = [] 
imdata=None
model = models.vgg16(pretrained=True)
new_classifier = nn.Sequential(*list(model.classifier.children())[:-1])
model.classifier = new_classifier
print(*list(model.children()))
#print(imdata.shape)
  
# reading csv file 
with open(filename, 'r') as csvfile: 
  # creating a csv reader object 
  csvreader = csv.reader(csvfile) 
  
  # extracting each data row one by one 
  i=0
  j=0
  for row in csvreader: 
    try:
    def __init__(self):
        """Declare all needed layers."""
        nn.Module.__init__(self)

        # Convolution and pooling layers of VGG-16.
        self.features = models.vgg16(pretrained=False).features
예제 #43
0
netD_B_L=Discriminator_local(input_channel)

netGAtoB.to(device)
netGBtoA.to(device)
netD_A.to(device)
netD_B.to(device)
netD_A_L.to(device)
netD_B_L.to(device)

criterionGAN=nn.MSELoss()
criterionCycle=nn.L1Loss()
def criterionColor(pred,label):
    blur_rgb1=blur_rgb(pred)
    blur_rgb2=blur_rgb(label)
    return cl(blur_rgb1,blur_rgb2)
vgg_model = vgg16(pretrained=True).features[:16]
vgg_model = vgg_model.to(device)
for param in vgg_model.parameters():
    param.requires_grad = False
criterionPer=LossNetwork(vgg_model)
criterion_identity = torch.nn.L1Loss()

optimizer_G = torch.optim.Adam(itertools.chain(netGAtoB.parameters(), netGBtoA.parameters()),
                                lr=lr, betas=(0.5, 0.999))
optimizer_D_A = torch.optim.Adam(netD_A.parameters(), lr=lr, betas=(0.5, 0.999))
optimizer_D_B = torch.optim.Adam(netD_B.parameters(), lr=lr, betas=(0.5, 0.999))

optimizer_D_A_L = torch.optim.Adam(netD_A_L.parameters(), lr=lr, betas=(0.5, 0.999))
optimizer_D_B = torch.optim.Adam(netD_B_L.parameters(), lr=lr, betas=(0.5, 0.999))

lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(optimizer_G, lr_lambda=LambdaLR(epochs,decay_epoch).step)
import ast
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
import torchvision.models as models
from torch import __version__

resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
vgg16 = models.vgg16(pretrained=True)

models = {'resnet': resnet18, 'alexnet': alexnet, 'vgg': vgg16}

# obtain ImageNet labels
with open('imagenet1000_clsid_to_human.txt') as imagenet_classes_file:
    imagenet_classes_dict = ast.literal_eval(imagenet_classes_file.read())

def classifier(img_path, model_name):
    # load the image
    img_pil = Image.open(img_path)

    # define transforms
    preprocess = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    # preprocess the image
    img_tensor = preprocess(img_pil)
예제 #45
0
파일: test_torch.py 프로젝트: repson/client
 def __init__(self):
     super(VGGConcator, self).__init__()
     self.vgg = models.vgg16(pretrained=False)
     self.vgg.classifier = nn.Sequential(
         *list(self.vgg.classifier.children())[:-1])
예제 #46
0
            # a tensor of shape 28x28
            self.conv_output = x[0, self.selected_filter]
            # Loss function is the mean of the output of the selected layer/filter
            # We try to minimize the mean of the output of that specific filter
            loss = -torch.mean(self.conv_output)
            print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy()))
            # Backward
            loss.backward()
            # Update image
            optimizer.step()
            # Recreate image
            self.created_image = recreate_image(processed_image)
            # Save image
            if i % 5 == 0:
                im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \
                    '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg'
                save_image(self.created_image, im_path)


if __name__ == '__main__':
    cnn_layer = 17
    filter_pos = 5
    # Fully connected layer is not needed
    pretrained_model = models.vgg16(pretrained=True).features
    layer_vis = CNNLayerVisualization(pretrained_model, cnn_layer, filter_pos)

    # Layer visualization with pytorch hooks
    layer_vis.visualise_layer_with_hooks()

    # Layer visualization without pytorch hooks
    # layer_vis.visualise_layer_without_hooks()
이번 장에서는 저장하기나 불러오기를 통해 모델의 상태를 유지(persist)하고 모델의 예측을 실행하는 방법을 알아보겠습니다.
"""

import torch
import torch.onnx as onnx
import torchvision.models as models

#######################################################################
# 모델 가중치 저장하고 불러오기
# ------------------------------------------------------------------------------------------
#
# PyTorch 모델은 학습한 매개변수를 ``state_dict``\ 라고 불리는 내부 상태 사전(internal state dictionary)에 저장합니다.
# 이 상태 값들은 ``torch.save`` 메소드를 사용하여 저장(persist)할 수 있습니다:

model = models.vgg16(pretrained=True)
torch.save(model.state_dict(), 'model_weights.pth')

##########################
# 모델 가중치를 불러오기 위해서는, 먼저 동일한 모델의 인스턴스(instance)를 생성한 다음에 ``load_state_dict()`` 메소드를 사용하여
# 매개변수들을 불러옵니다.

model = models.vgg16()  # 기본 가중치를 불러오지 않으므로 pretrained=True를 지정하지 않습니다.
model.load_state_dict(torch.load('model_weights.pth'))
model.eval()

###########################
# .. note:: 추론(inference)을 하기 전에 ``model.eval()`` 메소드를 호출하여 드롭아웃(dropout)과 배치 정규화(batch normalization)를 평가 모드(evaluation mode)로 설정해야 합니다. 그렇지 않으면 일관성 없는 추론 결과가 생성됩니다.

#######################################################################
# 모델의 형태를 포함하여 저장하고 불러오기
 def __init__(self, fine_tune=False):
     super(fc7_Extractor, self).__init__()
     self.pretrained = models.vgg16(pretrained=True)
     self.fine_tune(fine_tune)
예제 #49
0
============================

In this section we will look at how to persist model state with saving, loading and running model predictions.
"""

import torch
import torchvision.models as models

#######################################################################
# Saving and Loading Model Weights
# --------------------------------
# PyTorch models store the learned parameters in an internal
# state dictionary, called ``state_dict``. These can be persisted via the ``torch.save``
# method:

model = models.vgg16(pretrained=True)
torch.save(model.state_dict(), 'model_weights.pth')

##########################
# To load model weights, you need to create an instance of the same model first, and then load the parameters
# using ``load_state_dict()`` method.

model = models.vgg16(
)  # we do not specify pretrained=True, i.e. do not load default weights
model.load_state_dict(torch.load('model_weights.pth'))
model.eval()

###########################
# .. note:: be sure to call ``model.eval()`` method before inferencing to set the dropout and batch normalization layers to evaluation mode. Failing to do this will yield inconsistent inference results.

#######################################################################
예제 #50
0
import torch
import torchvision.models as models
from torch import nn

vgg16_pretrained = models.vgg16(pretrained=False)


def decoder(input_channel, output_channel, num=3):
    if num == 3:
        decoder_body = nn.Sequential(
            nn.Conv2d(input_channel, input_channel, 3, padding=1),
            nn.Conv2d(input_channel, input_channel, 3, padding=1),
            nn.Conv2d(input_channel, output_channel, 3, padding=1))
    elif num == 2:
        decoder_body = nn.Sequential(
            nn.Conv2d(input_channel, input_channel, 3, padding=1),
            nn.Conv2d(input_channel, output_channel, 3, padding=1))

    return decoder_body


class VGG16_deconv(torch.nn.Module):
    def __init__(self):
        super(VGG16_deconv, self).__init__()

        pool_list = [4, 9, 16, 23, 30]
        for index in pool_list:
            vgg16_pretrained.features[index].return_indices = True

        self.encoder1 = vgg16_pretrained.features[:4]
        self.pool1 = vgg16_pretrained.features[4]
예제 #51
0
            ax = plt.subplot(num_images//2, 2, images_so_far)
            ax.axis('off')
            ax.set_title('predicted: {}'.format(dset_classes[preds[j]]))
            imshow(inputs.cpu().data[j])

            if images_so_far == num_images:
                return

######################################################################
# Finetuning the convnet
# ----------------------
#
# Load a pretrained model and reset final fully connected layer.
#

model_ft = models.vgg16(pretrained=True)

mod_f = list(model_ft.features.children())
#for popidx in range(30, 22, -1):
for popidx in range(30, 15, -1):
    mod_f.pop(popidx)
mod_f.append(nn.MaxPool2d(kernel_size=8, stride=8))
new_features = nn.Sequential(*mod_f)
model_ft.features = new_features

mod = list(model_ft.classifier.children())
mod.pop()
mod.append(nn.Linear(4096, 2))
mod.pop(0)
mod.insert(0, nn.Linear(12544, 4096))
new_classifier = nn.Sequential(*mod)