Esempio n. 1
0
def main(argv=None):
    model = VGG(train_images_dir='data/train/',
                val_images_dir='data/val/',
                test_images_dir='data/test/',
                num_epochs=3,
                train_batch_size=64,
                val_batch_size=2500,
                test_batch_size=10000,
                height_of_image=28,
                width_of_image=28,
                num_channels=1,
                num_classes=10,
                learning_rate=0.00001,
                base_dir='results',
                max_to_keep=20,
                model_name="VGG",
                flatten=False)

    model.create_network()
    model.initialize_network()

    if True:
        model.train_model(1, 1, 1, 1)
    else:
        model.test_model()
Esempio n. 2
0
    def configure_model(self):
        """

        :return:
        """
        arch: str = self.hparams.arch
        batch_norm = self.hparams.batch_norm
        dataset: str = self.hparams.dataset
        hidden_layers: int = self.hparams.hidden_layers
        hidden_size: int = self.hparams.hidden_size
        if arch == 'mlp':
            if dataset == 'mnist':
                return MLP(input_size=784,
                           hidden_size=hidden_size,
                           num_hidden_layers=hidden_layers,
                           batch_norm=batch_norm)
            elif dataset == 'cifar10':
                return MLP(hidden_size=hidden_size,
                           num_hidden_layers=hidden_layers,
                           batch_norm=batch_norm)
            else:
                raise ValueError('invalid dataset specification!')
        elif arch == 'alexnet':
            return AlexNet()
        elif arch == 'vgg11':
            return VGG(vgg_name='VGG11')
        elif arch == 'vgg13':
            return VGG(vgg_name='VGG13')
        elif arch == 'resnet18':
            return ResNet18()
        elif arch == 'resnet34':
            return ResNet34()
        else:
            raise ValueError('Unsupported model!')
Esempio n. 3
0
 def _load_custom(self, model_id):
     # return the model if it's already been constructed
     if model_id in self.custom_models:
         return self.custom_models[model_id]
     model_dir = os.path.join(self.custom_models_dir,
                              'model_{}'.format(model_id))
     print(model_dir)
     if not os.path.exists(model_dir):
         raise OSError(
             "Directory for custom model {} does not exist!".format(
                 model_id))
     config_path = os.path.join(model_dir,
                                'config_{}.json'.format(model_id))
     weights_path = os.path.join(model_dir,
                                 'weights_{}.h5'.format(model_id))
     with open(config_path) as f:
         config = json.load(f)
     # print('Hyperparameters: ', config['hparams'], type(config['hparams']))
     # print('Train Time: {}'.format(config['train_time']))
     # train_acc, test_acc = config['train_acc'], config['test_acc']
     # print('Train Accuracy : {} | Test Accuracy {}'.format(train_acc, test_acc))
     hparams = argparse.Namespace(**config['hparams'])
     model = VGG(hparams)
     model.load_weights(weights_path)
     self.custom_models[model_id] = model
     return model
Esempio n. 4
0
def run_tests():
    h = load_history("nov24_history")
    trajectory = h['trajectory']
    thetas = [trajectory[0], trajectory[0]]
    res = average_with_weights(thetas, [0.5, 0.5])
    # print(res)

    model = VGG("VGG16")
    model.load_params(res)
Esempio n. 5
0
def construct_model(config_path, weights_path):
    # Recover JSON contents
    with open(config_path) as config_file:
        config = json.load(config_file)
        print('Hyperparameters: ', config['hparams'], type(config['hparams']))
        print('Train Time: {}'.format(config['train_time']))
        train_acc, test_acc = config['train_acc'], config['test_acc']
        print('Train Accuracy : {} | Test Accuracy {}'.format(
            train_acc, test_acc))
        args = Namespace(**config['hparams'])
    model = VGG(args)
    model.load_weights(weights_path)
    return model
    def get_model():
        if mode == '':
            # a 25 layers deep VGG-style network with batchnorm
            k = 32
            model = VGG(input_shape=x_train.shape[1:],
                        nbstages=4,
                        nblayers=[6] * 4,
                        nbfilters=[1 * k, 2 * k, 4 * k, 8 * k],
                        nbclasses=y_train.shape[1],
                        use_bias=False,
                        batchnorm_training=False,
                        kernel_initializer='he_uniform')
        elif mode == 'fast':
            k = 16
            # a 13 layers deep VGG-style network with batchnorm
            model = VGG(input_shape=x_train.shape[1:],
                        nbstages=4,
                        nblayers=[3] * 4,
                        nbfilters=[1 * k, 2 * k, 4 * k, 8 * k],
                        nbclasses=y_train.shape[1],
                        use_bias=False,
                        batchnorm_training=False,
                        kernel_initializer='he_uniform')

        weights_location = 'model_initial_weights/cifar10_initial_weights' + mode + '.h5'
        if 'cifar10_initial_weights' + mode + '.h5' not in os.listdir(
                'model_initial_weights'):
            model.save_weights(weights_location)
        else:
            model.load_weights(weights_location)

        return model
Esempio n. 7
0
def main(args):
    imgs = load_image(args.input, args.ref)

    vgg = VGG(model_type='vgg19').to(device)
    swapper = Swapper().to(device)

    map_in = vgg(imgs['bic'].to(device), TARGET_LAYERS)
    map_ref = vgg(imgs['ref'].to(device), TARGET_LAYERS)
    map_ref_blur = vgg(imgs['ref_blur'].to(device), TARGET_LAYERS)

    with torch.no_grad(), timer('Feature swapping'):
        maps, weights, correspondences = swapper(map_in, map_ref, map_ref_blur)

    model = SRNTT(use_weights=args.use_weights).to(device)
    model.load_state_dict(torch.load(args.weight))

    img_hr = imgs['hr'].to(device)
    img_lr = imgs['lr'].to(device)
    maps = {
        k: torch.tensor(v).unsqueeze(0).to(device)
        for k, v in maps.items()
    }
    weights = torch.tensor(weights).reshape(1, 1, *weights.shape).to(device)

    with torch.no_grad(), timer('Inference'):
        _, img_sr = model(img_lr, maps, weights)

    psnr = PSNR()(img_sr.clamp(0, 1), img_hr.clamp(0, 1)).item()
    ssim = SSIM()(img_sr.clamp(0, 1), img_hr.clamp(0, 1)).item()
    print(f'[Result] PSNR:{psnr:.2f}, SSIM:{ssim:.4f}')

    save_image(img_sr.clamp(0, 1), './out.png')
Esempio n. 8
0
def main(args):
    dataroot = Path(args.dataroot)
    save_dir = dataroot / 'map'
    save_dir.mkdir(exist_ok=True)

    dataset = SwappingDataset(
        dataroot=dataroot, input_size=40 if 'CUFED' in dataroot.name else 80)
    dataloader = DataLoader(dataset)
    model = VGG(model_type='vgg19').to(device)
    swapper = Swapper(args.patch_size, args.stride).to(device)

    for i, batch in enumerate(tqdm(dataloader), 1):
        img_in = batch['img_in'].to(device)
        img_ref = batch['img_ref'].to(device)
        img_ref_blur = batch['img_ref_blur'].to(device)

        map_in = model(img_in, TARGET_LAYERS)
        map_ref = model(img_ref, TARGET_LAYERS)
        map_ref_blur = model(img_ref_blur, TARGET_LAYERS)

        maps, weights, correspondences = swapper(map_in, map_ref, map_ref_blur)

        np.savez_compressed(save_dir / f'{batch["filename"][0]}.npz',
                            relu1_1=maps['relu1_1'],
                            relu2_1=maps['relu2_1'],
                            relu3_1=maps['relu3_1'],
                            weights=weights,
                            correspondences=correspondences)

        if args.debug and i == 10:
            break
Esempio n. 9
0
    def __init__(self, use_weights=False):
        super(TextureLoss, self).__init__()
        self.use_weights = use_weights

        self.model = VGG(model_type='vgg19')
        self.register_buffer('a', torch.tensor(-20., requires_grad=False))
        self.register_buffer('b', torch.tensor(.65, requires_grad=False))
def model_selection(conf):
    """
    This will load the model and return the model instance
    :param conf: Configurator
    :return:
    """
    if conf["architecture"] == "vgg":
        conf["model"] = VGG(conf["in_channels"], conf["out_channels"],
                            conf["base_channels"], conf["n_layers"],
                            conf["input_shape"])
    elif conf["architecture"] == "ensemble":
        conf["model"] = Ensemble_Network(conf["in_channels"],
                                         conf["base_channels"],
                                         conf["n_layers"],
                                         conf["out_channels"])
    elif conf["architecture"] == "locally_connected":
        conf["model"] = Locally_Connected_Network(conf["in_channels"],
                                                  conf["out_channels"],
                                                  conf["base_channels"],
                                                  conf["n_layers"])
    elif conf["architecture"] == "fully_connected":
        conf["model"] = Fully_Connected(conf["in_channels"],
                                        conf["out_channels"],
                                        conf["base_channels"],
                                        conf["n_layers"])
    return conf
Esempio n. 11
0
def build_model(config):
    if config['model'] == 'ResNet18':
        model = ResNet18(color_channel=config['color_channel'])
    elif config['model'] == 'VGG11':
        model = VGG('VGG11', color_channel=config['color_channel'])
    elif config['model'] == 'VGG13':
        model = VGG('VGG13', color_channel=config['color_channel'])
    else:
        print('wrong model option')
        model = None
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=config['lr'],
                          momentum=config['momentum'],
                          weight_decay=config['weight_decay'])

    return model, loss_function, optimizer
def run(args):
    model = L.Classifier(VGG())
    train, test = chainer.datasets.get_cifar10()

    optimizer = chainer.optimizers.MomentumSGD(lr=0.05)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(5e-4))

    train_iters = [
        chainer.iterators.MultiprocessIterator(i,
                                               args.batch_size,
                                               n_processes=args.loader_jobs)
        for i in chainer.datasets.split_dataset_n_random(train, len(args.gpu))
    ]
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batch_size,
                                                 repeat=False,
                                                 shuffle=False)

    updater = chainer.training.updaters.MultiprocessParallelUpdater(
        train_iters, optimizer, devices=args.gpu)

    trainer = chainer.training.Trainer(updater, (args.epochs, 'epoch'),
                                       out=args.out)

    if args.ema_rate != 0.0:
        print("use ema (%f)" % args.ema_rate)
        ema = ExponentialMovingAverage(target=model,
                                       rate=args.ema_rate,
                                       device=args.gpu[0])
        optimizer.add_hook(ema)

        eval_model = ema.shadow_target

        trainer.extend(ema)
    else:
        print("no ema")
        eval_model = model

    # here `eval_model` is passed to the evaluator instead of ordinal `model`
    trainer.extend(
        extensions.Evaluator(test_iter, eval_model, device=args.gpu[0]))

    # add ordinary extensions
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'elapsed_time', 'main/loss', 'validation/main/loss',
            'main/accuracy', 'validation/main/accuracy'
        ]))
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.snapshot(), trigger=(1, 'epoch'))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Esempio n. 13
0
def pt2stru(pt):
    if "lenet" in pt:
        return LeNet()
    elif "l0net" in pt:
        return L0Net(mean=1)
    elif "VGG" in pt:
        if "l0" in pt:
            return L0VGG(cifar10_network, loc=g_mean, temp=g_temp)
        else:
            return VGG(cifar10_network)
Esempio n. 14
0
def get_model(name: str):
    name = name.lower()
    if name == 'vgg11':
        return VGG('VGG11')
    elif name == 'resnet18':
        return ResNet18()
    elif name == 'resnet34':
        return ResNet34()
    elif name == 'resnet50':
        return ResNet50()
Esempio n. 15
0
def build_internal_nn(model_name, *args, **kwargs):
    if model_name == "VGG19":
        return VGG('VGG19')
    elif model_name == "GoogLeNet":
        return GoogLeNet(*args, **kwargs)
    elif model_name == "MobileNetV2":
        return MobileNetV2(*args, **kwargs)
    elif model_name == "SENet18":
        return SENet18(*args, **kwargs)
    else:
        raise ValueError("Unknown model name : {}".format(model_name))
Esempio n. 16
0
def model_build(resume):
    print('==> Building model..')
    net = VGG('VGG16')
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    # print(device)
    # use_cuda = torch.cuda.is_available()
    net = net.to(device)
    # 如果GPU可用,使用GPU
    if device == 'cuda':

        # parallel use GPU
        net = torch.nn.DataParallel(net)
        # speed up slightly
        cudnn.benchmark = True
    #else:
    #net = VGG('VGG16')
    if resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isdir(
            'checkpoint'), 'Error: no checkpoint directory found!'
        # .pth格式模型加载
        #checkpoint = torch.load('./checkpoint/ckpt.pth', map_location=torch.device('cpu'))
        #net.load_state_dict(checkpoint['net'])
        #best_acc = checkpoint['acc']
        #start_epoch = checkpoint['epoch']

        # .pkl格式模型加载
        #net.load_state_dict(torch.load('./checkpoint/ckpt.pkl', map_location=torch.device('cpu')))

        net_dict = torch.load('./checkpoint/ckpt.pkl',
                              map_location=torch.device('cpu'))
        # 如果提示module.出错放开下面的代码
        new_state_dict = OrderedDict()
        for k, v in net_dict.items():
            name = k[7:]  # remove `module.`
            new_state_dict[name] = v
        # load params
        net.load_state_dict(new_state_dict)

    return net, device
Esempio n. 17
0
def main():
    # check the configurations
    use_cuda = torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')

    # prepare data for training
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    trainset = torchvision.datasets.CIFAR10(root='../data',
                                            train=True,
                                            download=True,
                                            transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              shuffle=True,
                                              batch_size=128,
                                              num_workers=4,
                                              pin_memory=True)

    testset = torchvision.datasets.CIFAR10(root='../data',
                                           train=False,
                                           download=True,
                                           transform=transform)
    testloader = torch.utils.data.DataLoader(testset,
                                             shuffle=False,
                                             batch_size=128,
                                             num_workers=4,
                                             pin_memory=True)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    # initilizae the model
    net = VGG().cuda() if use_cuda else VGG()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=0.05,
                          momentum=0.9,
                          weight_decay=5e-4)

    # training or loading the neural network
    # model_train(net, trainloader, criterion, optimizer, epochs=5)
    net.load_state_dict(
        torch.load('resources/vgg16_cifar10.bin', map_location='cpu'))
    print('Neural network ready.')

    # evaluate the model performance
    accuracy, _ = model_eval(net, testloader, criterion)
    print('Accuracy of the network on the clean test images: %d %%' %
          (100 * accuracy))

    accuracy, _ = model_eval(net,
                             testloader,
                             criterion,
                             attack_method=illcm_attack)
    print('Accuracy of the network on the adversarial test images: %d %%' %
          (100 * accuracy))
Esempio n. 18
0
def load_model(config):
    if config['model'] == 'ResNet18':
        model = ResNet18(color_channel=config['color_channel'])
    elif config['model'] == 'VGG11':
        model = VGG('VGG11', color_channel=config['color_channel'])
    elif config['model'] == 'VGG13':
        model = VGG('VGG13', color_channel=config['color_channel'])
    else:
        print('wrong model option')
        model = None
    model_path = config['dir_path'] + '/models/' + config['data'] + '_' + config['model'] + '_t1=' + \
                 str(config['t1']) + '_R=' + config['R'] + "_" + config['fixed'] + '.pt'
    model.load_state_dict(torch.load(model_path))
    model.cuda()
    return model
Esempio n. 19
0
def get_network(name, baseline=True, **kwargs):
    mean = kwargs.get("mean")
    temp = kwargs.get("temp")
    if name == "mnist":
        if baseline: return LeNet()
        else:
            try:
                return L0Net(mean=mean, temp=temp)
            except KeyError:
                print("No Key Named 'mean'")
    elif name == "cifar10":
        if baseline: return VGG(cifar10_network)
        else:
            return L0VGG(cifar10_network, loc=mean, temp=temp)
Esempio n. 20
0
def get_model(name, device):
    """
    Returns required classifier and autoencoder
    :param name:
    :return: Autoencoder, Classifier
    """
    if name == 'lenet':
        model = LeNet(in_channels=channels).to(device)
    elif name == 'alexnet':
        model = AlexNet(channels=channels, num_classes=10).to(device)
    elif name == 'vgg':
        model = VGG(in_channels=channels, num_classes=10).to(device)

    autoencoder = CAE(in_channels=channels).to(device)
    return model, autoencoder
Esempio n. 21
0
def net(netname,num_classes,Train=True,Dataset=None):
    netname=netname.lower()
    if netname == "dnet34-34":
        return Resnet.Dnet34_34(num_classes)
    elif netname=="dnet50-34":
        return Resnet.Dnet50_34(num_classes)
    elif netname=="dnet34-18":
        return Resnet.Dnet34_18(num_classes)
    elif netname=="dnet18-18":
        return Resnet.Dnet18_18(num_classes)
    elif netname=="resnet18":
        return Resnet.Resnet18(num_classes)
    elif netname=="resnet34":
        return Resnet.Resnet34(num_classes)
    elif netname=="resnet50":
        return Resnet.Resnet50(num_classes)
    elif netname=="vgg19":
        return VGG.VGG19(num_classes)
    elif netname=="vgg16":
        return VGG.VGG16(num_classes)
    elif netname=="resnet50-vgg16":
        return VGG.Resnet50_VGG16(num_classes)
    else:
        raise RuntimeError('Unspported networks')
Esempio n. 22
0
    def __init__(self, model,dataset_index=0,video_target = None):

        if args.video == None:
            
            self.video_target = video_target
            customset_train = CustomDataset(path = args.dataset_path,subset_type="training",dataset_index=dataset_index,video_target = video_target)
            customset_test = CustomDataset(path = args.dataset_path,subset_type="testing",dataset_index=dataset_index, video_target = video_target)
        
            self.trainloader = torch.utils.data.DataLoader(dataset=customset_train,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
            self.testloader = torch.utils.data.DataLoader(dataset=customset_test,batch_size=args.batch_size,shuffle=False,num_workers=args.num_workers)    
        else:
            video_dataset = VideoDataset(video=args.video, batch_size=args.batch_size,
                                        frame_skip=int(args.frame_skip),image_folder=args.extract_frames_path, use_existing=args.use_existing_frames)
            
            self.videoloader = torch.utils.data.DataLoader(dataset=video_dataset, batch_size=1,shuffle=False,num_workers=args.num_workers)

   
        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG()
        elif (model == "resnet"):
            self.model = ResNet()

        if args.pretrained_model != None:
            if args.pretrained_finetuning == False:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                print "DEBUG : Make it load only part of the resnet model"
                #print(self.model)
                #self.model.load_state_dict(torch.load(args.pretrained_model))
                #for param in self.model.parameters():
                #    param.requires_grad = False
                self.model.fc = nn.Linear(512, 1000)
                #print(self.model)
                self.model.load_state_dict(torch.load(args.pretrained_model))
                self.model.fc = nn.Linear(512,3)
                #print(self.model)
                
        self.model.cuda()        
        print "Using weight decay: ",args.weight_decay
        self.optimizer = optim.SGD(self.model.parameters(), weight_decay=float(args.weight_decay),lr=0.01, momentum=0.9,nesterov=True)
        self.criterion = nn.CrossEntropyLoss().cuda()
Esempio n. 23
0
def build_model(device, model_name, num_classes=10):
    """构建模型:vgg、vggnonorm、resnet、preactresnet、googlenet、densenet、
                resnext、mobilenet、mobilenetv2、dpn、shufflenetg2、senet、shufflenetv2

    :param device: 'cuda' if you have a GPU, 'cpu' otherwise
    :param model_name: One of the models available in the folder 'models'
    :param num_classes: 10 or 100 depending on the chosen dataset
    :return: The model architecture
    """
    print('==> Building model..')
    model_name = model_name.lower()
    if model_name == 'vgg':
        net = VGG('VGG19', num_classes=num_classes)
    elif model_name == 'vggnonorm':
        net = VGG('VGG19', num_classes=num_classes, batch_norm=False)
    elif model_name == 'resnet':
        net = ResNet18(num_classes=num_classes)
    elif model_name == 'preactresnet':
        net = PreActResNet18()
    elif model_name == 'googlenet':
        net = GoogLeNet()
    elif model_name == 'densenet':
        net = DenseNet121()
    elif model_name == 'resnext':
        net = ResNeXt29_2x64d()
    elif model_name == 'mobilenet':
        net = MobileNet()
    elif model_name == 'mobilenetv2':
        net = MobileNetV2()
    elif model_name == 'dpn':
        net = DPN92()
    elif model_name == 'shufflenetg2':
        net = ShuffleNetG2()
    elif model_name == 'senet':
        net = SENet18()
    elif model_name == 'shufflenetv2':
        net = ShuffleNetV2(1)
    else:
        raise ValueError('Error: the specified model is incorrect ({})'.format(model_name))

    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True
    return net
Esempio n. 24
0
    def __init__(self, model,dataset_index=0, path = None):

        self.sampler = self.weighted_sampling(dataset_index=dataset_index,path=path)

        customset_train = CustomDatasetViewpoint(path = path,subset_type="training",dataset_index=dataset_index)
        customset_test = CustomDatasetViewpoint(path = path,subset_type="testing",dataset_index=dataset_index)

        self.trainloader = torch.utils.data.DataLoader(pin_memory=True,dataset=customset_train,sampler=self.sampler,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
        self.trainloader_acc = torch.utils.data.DataLoader(dataset=customset_train,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
        self.testloader_acc = torch.utils.data.DataLoader(dataset=customset_test,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)

        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG(num_classes=2)
        elif (model == "resnet"):
            self.model = ResNet()

        if args.pretrained_model != None:
            if args.pretrained_same_architecture:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                if args.arch == "vgg":
                    self.model.soft = None
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,1000))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,2))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.soft = nn.LogSoftmax()
                else:
                    self.model.fc = nn.Linear(512, 1000)
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    self.model.fc = nn.Linear(512,2)     
   
        self.optimizer = optim.Adam(self.model.parameters(), weight_decay=float(args.weight_decay), lr=0.0001)
Esempio n. 25
0
    def __init__(self, model, dataset_index=0, path=None, viewpoints=3):

        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG(num_classes=2)
        elif (model == "resnet"):
            self.model = ResNet()
        elif (model == "ED"):
            self.model_ED = EncoderDecoderViewpoints()

        self.model_vgg = VGG_viewpoints(num_classes=3).cuda()
        self.model_ed = EncoderDecoderViewpoints().cuda()

        self.model_vgg = nn.DataParallel(self.model_vgg,
                                         device_ids=[0, 1, 2, 3]).cuda()
        self.model_vgg.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_prepared/model_epoch_2.pth"
            ))
        mod = list(self.model_vgg.module.classifier.children())
        mod.pop()
        mod.pop()
        mod.pop()
        new_classifier = torch.nn.Sequential(*mod)
        self.model_vgg.module.new_classifier = new_classifier
        print self.model_vgg

        # Trained ED loading, comment to disable

        self.model_ed.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_disjointed/model_ed_epoch_20.pth"
            ))

        print self.model_ed
    def get_model():
        k = 32
        model = VGG(
            input_shape=x_train.shape[1:],
            nbstages=5,
            nblayers=[2] * 5,
            nbfilters=[1 * k, 2 * k, 4 * k, 8 * k, 16 * k],
            nbclasses=y_train.shape[1],
            use_bias=False,
            batchnorm_training=False,  #use_batchnorm = False,
            kernel_initializer='he_uniform',
            batchnorm_momentum=0.9
        )  ### because training sometimes stops after very few epochs (~15)

        weights_location = 'model_initial_weights/tinyImagenet_initial_weights_batchnorm.h5'
        if 'tinyImagenet_initial_weights_batchnorm.h5' not in os.listdir(
                'model_initial_weights'):
            model.save_weights(weights_location)
        else:
            model.load_weights(weights_location)

        return model
Esempio n. 27
0
def main():
    # Scale and initialize the parameters
    best_prec1 = 0
    if not configs.full_epoch:
        configs.TRAIN.epochs = int(math.ceil(configs.TRAIN.epochs / configs.ADV.n_repeats))

    configs.ADV.fgsm_step /= configs.DATA.max_color_value
    configs.ADV.clip_eps /= configs.DATA.max_color_value
    
    # Create output folder
    if not os.path.isdir(os.path.join('trained_models', configs.output_name)):
        os.makedirs(os.path.join('trained_models', configs.output_name))
    
    # Log the config details
    logger.info(pad_str(' ARGUMENTS '))
    for k, v in configs.items(): print('{}: {}'.format(k, v))
    logger.info(pad_str(''))

    
    # Create the model
    # if configs.pretrained:
    #     print("=> using pre-trained model '{}'".format(configs.TRAIN.arch))
    #     model = models.__dict__[configs.TRAIN.arch](pretrained=True)
    # else:
    #     print("=> creating model '{}'".format(configs.TRAIN.arch))
    #     model = models.__dict__[configs.TRAIN.arch]()

    print('loading arma model: ')
    if configs.model =='res':
        model = ResNet_('ResNet18',True,'CIFAR10',0,3,3)
    else:
        model = VGG('VGG16',True,'CIFAR10',0,3,3)

    # Wrap the model into DataParallel
    model = torch.nn.DataParallel(model).cuda()
    
    # Criterion:
    criterion = nn.CrossEntropyLoss().cuda()
    
    # Optimizer:
    optimizer = torch.optim.SGD(model.parameters(), configs.TRAIN.lr,
                                momentum=configs.TRAIN.momentum,
                                weight_decay=configs.TRAIN.weight_decay)
    
    # Resume if a valid checkpoint path is provided
    if configs.resume:
        if os.path.isfile(configs.resume):
            print("=> loading checkpoint '{}'".format(configs.resume))
            checkpoint = torch.load(configs.resume)
            configs.TRAIN.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(configs.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(configs.resume))


    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])


    train_dataset = datasets.CIFAR10('../data', train=True, download=True,transform=transform_train)
    test_dataset = datasets.CIFAR10('../data', train=False, transform=transform_test)

    train_loader = torch.utils.data.DataLoader(
                train_dataset, batch_size=256, shuffle=True,
                num_workers=16, pin_memory=True, drop_last=True)

    val_loader = torch.utils.data.DataLoader(test_dataset,batch_size=256,\
        shuffle=False,num_workers=16, pin_memory=True)


    # Initiate data loaders
    # traindir = os.path.join(configs.data, 'train')
    # valdir = os.path.join(configs.data, 'val')
    
    # train_dataset = datasets.ImageFolder(
    #     traindir,
    #     transforms.Compose([
    #         transforms.RandomResizedCrop(configs.DATA.crop_size),
    #         transforms.RandomHorizontalFlip(),
    #         transforms.ToTensor(),
    #     ]))

    # train_loader = torch.utils.data.DataLoader(
    #     train_dataset, batch_size=configs.DATA.batch_size, shuffle=True,
    #     num_workers=configs.DATA.workers, pin_memory=True, sampler=None)
    
    normalize = transforms.Normalize(mean=configs.TRAIN.mean,
                                    std=configs.TRAIN.std)

    # val_loader = torch.utils.data.DataLoader(
    #     datasets.ImageFolder(valdir, transforms.Compose([
    #         transforms.Resize(configs.DATA.img_size),
    #         transforms.CenterCrop(configs.DATA.crop_size),
    #         transforms.ToTensor(),
    #     ])),
    #     batch_size=configs.DATA.batch_size, shuffle=False,
    #     num_workers=configs.DATA.workers, pin_memory=True)

    # If in evaluate mode: perform validation on PGD attacks as well as clean samples
    if configs.evaluate:
        logger.info(pad_str(' Performing PGD Attacks '))
        for pgd_param in configs.ADV.pgd_attack:
            validate_pgd(val_loader, model, criterion, pgd_param[0], pgd_param[1], configs, logger)
        validate(val_loader, model, criterion, configs, logger)
        return
    
    save_folder = os.path.join('trained_models', configs.output_name)
    for epoch in range(configs.TRAIN.start_epoch, configs.TRAIN.epochs):
        adjust_learning_rate(configs.TRAIN.lr, optimizer, epoch, \
            configs.ADV.n_repeats,adjust_epoch_factor=configs.adj_lr_factor)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, configs, logger)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': configs.TRAIN.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer' : optimizer.state_dict(),
        }, is_best, os.path.join('trained_models', configs.output_name))
        
    # Automatically perform PGD Attacks at the end of training
    logger.info(pad_str(' Performing PGD Attacks '))

    f = open(save_folder+'/log.txt','w')

    for pgd_param in configs.ADV.pgd_attack:
        acc = validate_pgd(val_loader, model, criterion, pgd_param[0], pgd_param[1], configs, logger)
        f.write(str(pgd_param),' ',acc,'\n')

    f.close()
Esempio n. 28
0
def main(num_epochs=200,
         learning_rate=0.005,
         momentum=0.5,
         log_interval=500,
         *args,
         **kwargs):

    train_loader, test_loader = loaders.loader(batch_size_train=100,
                                               batch_size_test=1000)

    # Train the model
    total_step = len(train_loader)
    curr_lr1 = learning_rate

    model1 = VGG().to(device)

    # Loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)

    # Train the model
    total_step = len(train_loader)

    best_accuracy1 = 0

    for epoch in range(num_epochs):
        for i, (images, labels) in enumerate(train_loader):
            images = images.to(device)
            labels = labels.to(device)

            # Forward
            outputs = model1(images)
            loss1 = criterion(outputs, labels)

            # Backward and optimize
            optimizer1.zero_grad()
            loss1.backward()
            optimizer1.step()

            if i == 499:
                print(
                    "Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}".format(
                        epoch + 1, num_epochs, i + 1, total_step,
                        loss1.item()))

        # Test the model
        model1.eval()

        with torch.no_grad():
            correct1 = 0
            total1 = 0

            for images, labels in test_loader:
                images = images.to(device)
                labels = labels.to(device)

                outputs = model1(images)
                _, predicted = torch.max(outputs.data, 1)
                total1 += labels.size(0)
                correct1 += (predicted == labels).sum().item()

            if best_accuracy1 >= correct1 / total1:
                curr_lr1 = learning_rate * np.asscalar(
                    pow(np.random.rand(1), 3))
                update_lr(optimizer1, curr_lr1)
                print('Test Accuracy of NN: {} % Best: {} %'.format(
                    100 * correct1 / total1, 100 * best_accuracy1))
            else:
                best_accuracy1 = correct1 / total1
                net_opt1 = model1
                print('Test Accuracy of NN: {} % (improvement)'.format(
                    100 * correct1 / total1))

            model1.train()
Esempio n. 29
0
    torch.manual_seed(args.seed)
    if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            #global_model = CNNCifar(args=args)
            global_model = VGG('VGG11')
        elif args.dataset == "kvasir":
            global_model = VGG_kvasir('VGG11')
        elif args.dataset == "covid":
            global_model = CovidNet()

    elif args.model == "resnet50":
        global_model = resnet50(num_classes=3)
    elif args.model == "Mobile_Net":
        global_model = mobilenet_v2(num_classes=3)
    elif args.model == "resnetxt":
        global_model = resnext50_32x4d(num_classes=3)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
Esempio n. 30
0
def main():
    parser = argparse.ArgumentParser(description='Style transfer')
    parser.add_argument('--image',
                        '-i',
                        type=str,
                        default=None,
                        help='image path e.g. image.jpg')
    parser.add_argument('--style',
                        '-s',
                        type=str,
                        default=None,
                        help='style path e.g. picasso.jpg')

    args = parser.parse_args()

    #
    # Initialise
    #

    # ----------------- get data -----------------------------------------------------------
    prep = prep_data(512)
    postpa, postpb = post()

    # ----------------- get model -----------------------------------------------------------
    vgg = VGG()
    vgg.load_state_dict(torch.load(model_dir + 'vgg_conv.pth'))
    for param in vgg.parameters():
        param.requires_grad = False
    if torch.cuda.is_available():
        vgg.cuda()

    # ----------------- load images -----------------------------------------------------------
    img_dirs = [image_dir, image_dir]
    img_names = [args.style, args.image]
    imgs = [Image.open(img_dirs[i] + name) for i, name in enumerate(img_names)]
    imgs_torch = [prep(img) for img in imgs]
    if torch.cuda.is_available():
        imgs_torch = [Variable(img.unsqueeze(0).cuda()) for img in imgs_torch]
    else:
        imgs_torch = [Variable(img.unsqueeze(0)) for img in imgs_torch]
    style_image, content_image = imgs_torch

    opt_img = Variable(content_image.data.clone(), requires_grad=True)

    # ----------------- define layers -----------------------------------------------------------
    style_layers = ['r11', 'r21', 'r31', 'r41', 'r51']
    content_layers = ['r42']
    loss_layers = style_layers + content_layers
    loss_fns = [GramMSELoss()] * len(style_layers) + [nn.MSELoss()
                                                      ] * len(content_layers)
    if torch.cuda.is_available():
        loss_fns = [loss_fn.cuda() for loss_fn in loss_fns]

    #these are good weights settings:
    style_weights = [1e3 / n**2 for n in [64, 128, 256, 512, 512]]
    content_weights = [1e0]
    weights = style_weights + content_weights

    #compute optimization targets
    style_targets = [
        GramMatrix()(A).detach() for A in vgg(style_image, style_layers)
    ]
    content_targets = [A.detach() for A in vgg(content_image, content_layers)]
    targets = style_targets + content_targets

    #
    # Low res
    #

    print("processing low res...")

    out_img = train(opt_img,
                    vgg,
                    weights,
                    loss_fns,
                    targets,
                    optim,
                    loss_layers,
                    postpa,
                    postpb,
                    low_res=True)

    #
    # high res
    #

    print("processing high res...")

    #prep hr images
    prep_hr = prep_data(800)
    imgs_torch = [prep_hr(img) for img in imgs]
    if torch.cuda.is_available():
        imgs_torch = [Variable(img.unsqueeze(0).cuda()) for img in imgs_torch]
    else:
        imgs_torch = [Variable(img.unsqueeze(0)) for img in imgs_torch]
    style_image, content_image = imgs_torch

    #now initialise with upsampled lowres result
    opt_img = prep_hr(out_img).unsqueeze(0)
    opt_img = Variable(opt_img.type_as(content_image.data), requires_grad=True)

    #compute hr targets
    style_targets = [
        GramMatrix()(A).detach() for A in vgg(style_image, style_layers)
    ]
    content_targets = [A.detach() for A in vgg(content_image, content_layers)]
    targets = style_targets + content_targets

    out_img_hr = train(opt_img,
                       vgg,
                       weights,
                       loss_fns,
                       targets,
                       optim,
                       loss_layers,
                       postpa,
                       postpb,
                       low_res=False)

    out_img_hr.save(
        f'outputs/{str(img_names[1]).split(".")[0]}_{str(img_names[0]).split(".")[0]}_out_hr.jpg'
    )

    print(
        f'output saved to: outputs/{str(img_names[1]).split(".")[0]}_{str(img_names[0]).split(".")[0]}_out_hr.jpg'
    )