示例#1
0
    def load_model(self):
        if self.cuda:
            self.device = torch.device('cuda')
            cudnn.benchmark = True
        else:
            self.device = torch.device('cpu')

        self.model = LeNet().to(self.device)
        # self.model = AlexNet().to(self.device)
        # self.model = VGG11().to(self.device)
        # self.model = VGG13().to(self.device)
        # self.model = VGG16().to(self.device)
        # self.model = VGG19().to(self.device)
        # self.model = GoogLeNet().to(self.device)
        # self.model = resnet18().to(self.device)
        # self.model = resnet34().to(self.device)
        # self.model = resnet50().to(self.device)
        # self.model = resnet101().to(self.device)
        # self.model = resnet152().to(self.device)
        # self.model = DenseNet121().to(self.device)
        # self.model = DenseNet161().to(self.device)
        # self.model = DenseNet169().to(self.device)
        # self.model = DenseNet201().to(self.device)
        # self.model = WideResNet(depth=28, num_classes=10).to(self.device)

        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
                                                        milestones=[75, 150],
                                                        gamma=0.5)
        self.criterion = nn.CrossEntropyLoss().to(self.device)
示例#2
0
def measure_computation_fraction_lenet(train_loader):
    """Measure percentage of computation time spent in each layer of LeNet.
    """
    model = LeNet(n_channels=n_channels, size=32).to(device)
    loader = train_loader
    it = iter(loader)
    data, target = next(it)
    data, target = data.to(device), target.to(device)
示例#3
0
def pt2stru(pt):
    if "lenet" in pt:
        return LeNet()
    elif "l0net" in pt:
        return L0Net(mean=1)
    elif "VGG" in pt:
        if "l0" in pt:
            return L0VGG(cifar10_network, loc=g_mean, temp=g_temp)
        else:
            return VGG(cifar10_network)
示例#4
0
    def test_lenet(self):
        from models import LeNet

        n_outputs = 10
        model = LeNet(num_classes=n_outputs)
        model.eval()
        x = torch.randn(20,3,32,32)
        outputs = model(x)

        self.assertTrue(outputs.shape[0] == x.shape[0])
        self.assertTrue(outputs.shape[1] == n_outputs)
示例#5
0
    def test_get_mods(self):
        from models import LeNet
        from altmin import get_mods

        model = LeNet()
        model.eval()
        x = torch.randn(20, 3, 32, 32)
        outputs = model(x)

        model_mods = get_mods(model)

        self.assertTrue(
            len(model.features) + len(model.classifier) >= len(model_mods))
 def _load_model(self):
     self.model = LeNet()
     self.current_iteration = 0
     if os.path.exists(self.args.model_path):
         try:
             print("Loading model from: {}".format(self.args.model_path))
             self.model.load_state_dict(torch.load(self.args.model_path))
             self.current_iteration = joblib.load("{}.iter".format(
                 self.args.model_path))
         except Exception as e:
             print(
                 "Exception: {}\nCould not load model from {} - starting from scratch"
                 .format(e, self.args.model_path))
示例#7
0
def get_network(name, baseline=True, **kwargs):
    mean = kwargs.get("mean")
    temp = kwargs.get("temp")
    if name == "mnist":
        if baseline: return LeNet()
        else:
            try:
                return L0Net(mean=mean, temp=temp)
            except KeyError:
                print("No Key Named 'mean'")
    elif name == "cifar10":
        if baseline: return VGG(cifar10_network)
        else:
            return L0VGG(cifar10_network, loc=mean, temp=temp)
示例#8
0
def get_model(name, device):
    """
    Returns required classifier and autoencoder
    :param name:
    :return: Autoencoder, Classifier
    """
    if name == 'lenet':
        model = LeNet(in_channels=channels).to(device)
    elif name == 'alexnet':
        model = AlexNet(channels=channels, num_classes=10).to(device)
    elif name == 'vgg':
        model = VGG(in_channels=channels, num_classes=10).to(device)

    autoencoder = CAE(in_channels=channels).to(device)
    return model, autoencoder
示例#9
0
    def test_get_codes(self):
        from models import LeNet
        from altmin import get_mods, get_codes

        model = LeNet()
        model.eval()
        x = torch.randn(20, 3, 32, 32)
        outputs = model(x)

        model_mods = get_mods(model)
        out1, codes = get_codes(model_mods, x)
        out2 = model_mods(x)

        self.assertAlmostEqual((outputs - out1).abs().mean().item(), 0)
        self.assertAlmostEqual((out1 - out2).abs().mean().item(), 0)
示例#10
0
def alignment_lenet(augmentations):
    """Compute the kernel target alignment on LeNet. Since the feature map is
    initialized to be random and then trained, unlike kernels where feature map
    is fixed, kernel target alignment doesn't predict the accuracy at all.
    """
    for augmentation in augmentations:
        print(augmentation.name)
        model_base = LeNet().to(device)
        optimizer = sgd_opt_from_model(model_base)
        # Train LeNet for 1 epoch first
        _ = train_all_epochs(train_loader, valid_loader, model_base, optimizer, 1)
        model = LeNetAug().to(device)
        model.load_state_dict(model_base.state_dict())
        loader = loader_from_dataset(augmentation.dataset)
        print(kernel_target_alignment_augmented(loader, model))
示例#11
0
def get_model(name, input_size=None, output=None):
    name = name.lower()
    if name == 'lenet-300-100':
        model = LeNet_300_100(input_size, output)
    elif name == 'lenet-5':
        model = LeNet(input_size, output)
    elif 'vgg' in name:
        # if 'bn' in name:
        if name == 'vgg11':
            model = vgg11(pretrained=False, num_classes=output)
        elif name == 'vgg16':
            model = vgg16(pretrained=False, num_classes=output)
        else:
            assert False

        for n, m in model.named_modules():
            if hasattr(m, 'bias') and not isinstance(m, _BatchNorm):
                if m.bias is not None:
                    if m.bias.sum() == 0:
                        m.bias = None

    elif 'alexnet' in name:
        model = AlexNet(num_classes=output)

        for n, m in model.named_modules():
            if hasattr(m, 'bias') and not isinstance(m, _BatchNorm):
                if m.bias is not None:
                    if m.bias.sum() == 0:
                        m.bias = None
    elif 'resnet' in name:
        if name == 'resnet20':
            model = resnet20(num_classes=output)
        elif name == 'resnet32':
            model = resnet32(num_classes=output)
        else:
            assert False

        for n, m in model.named_modules():
            if hasattr(m, 'bias') and not isinstance(m, _BatchNorm):
                if m.bias is not None:
                    if m.bias.sum() == 0:
                        m.bias = None

    else:
        assert False

    return model
示例#12
0
def main(config):
    logger = prepare_logger(config)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # get loaders
    if not config.is_train_source:
        target_loader = get_loader(type="MNIST",
                                   train=False,
                                   batch_size=config.batch_size)

    source_train_loader = get_loader(type="SVHN",
                                     train=True,
                                     batch_size=config.batch_size)
    source_test_loader = get_loader(type="SVHN",
                                    train=False,
                                    batch_size=config.batch_size)

    # build source classifier
    model_src = LeNet(config.num_gpus).to(device)
    if (not config.is_train_source) or config.is_finetune:
        model_src.load_state_dict(torch.load(config.model_dir))

    # train source classifier
    if config.is_train_source:
        logger.info("train source classifier..")
        train_source(model_src, source_train_loader, source_test_loader,
                     config, logger)
        logger.info("evaluate source classifier..")
        logger.info("test accurracy in source domain: %f\n" %
                    (evaluate(model_src, source_test_loader)))

    else:
        # initialize target classifer with source classifer
        model_trg = torch.load(open("./pretrained/lenet-source.pth", "rb"))

        # build discriminator
        D = Discriminator(config.num_gpus)

        # adaptation process
        logger.info("start adaptation process..")
        adapt_target_domain(D, model_src, model_trg, source_train_loader,
                            target_loader, config)
        logger.info("evaluate target classifier..")
        logger.info("accurracy in target domain: %f\n" %
                    (evaluate(model_trg, target_loader)))
示例#13
0
def main():
    args = parse_args()

    paths = Paths()
    checkpoints_path = str(paths.CHECKPOINTS_PATH)
    logging_path = str(paths.LOG_PATH)

    callbacks = [PrintCallback()]
    checkpoint_callback = ModelCheckpoint(filepath=checkpoints_path +
                                          '/{epoch}-{val_acc:.3f}',
                                          save_top_k=True,
                                          verbose=True,
                                          monitor='val_acc',
                                          mode='max',
                                          prefix='')
    early_stop_callback = EarlyStopping(monitor='val_acc',
                                        mode='max',
                                        verbose=False,
                                        strict=False,
                                        min_delta=0.0,
                                        patience=2)
    gpus = gpu_count()
    log_save_interval = args.log_save_interval
    logger = TensorBoardLogger(save_dir=logging_path, name='tuna-log')
    logger.log_hyperparams(args)
    max_epochs = args.epochs

    model = LeNet(hparams=args, paths=paths)
    trainer = Trainer(
        callbacks=callbacks,
        checkpoint_callback=checkpoint_callback,
        early_stop_callback=early_stop_callback,
        fast_dev_run=True,
        gpus=gpus,
        log_save_interval=log_save_interval,
        logger=logger,
        max_epochs=max_epochs,
        min_epochs=1,
        show_progress_bar=True,
        weights_summary='full',
    )
    trainer.fit(model)
示例#14
0
    torchvision.transforms.CenterCrop(28),  # 从图片中间切出224*224的图片
    torchvision.transforms.ToTensor(),  # 将图片(Image)转成Tensor, 归一化至[0, 1]
    torchvision.transforms.Normalize(mean=[.5, .5, .5],
                                     std=[.5, .5, .5])  # 标准化至[-1, 1], 规定均值和标准差
])

test_dataset = DOGCAT(root='../Pytorch-Tutorial/datasets/dogcat_2',
                      train=False,
                      transform=transform)

test_loader = Data.DataLoader(dataset=test_dataset,
                              batch_size=100,
                              shuffle=False,
                              num_workers=2)

net = LeNet()
print(net)

if os.path.isfile('saves/dogcat_lenet_params.pkl'):
    net.load_state_dict(torch.load('saves/dogcat_lenet_params.pkl'))
else:
    print("dogcat_lenet_params.pkl don't exists.")
    exit()

# Test the Model
total = 0
correct = 0
for images, labels in test_loader:
    images = Variable(images)
    outputs = net(images)
    _, predicted = torch.max(outputs.data, 1)
 def model_gen_fun():
     model = LeNet(num_classes=1, num_channels=1).eval()
     return model
示例#16
0
文件: cifar-stl.py 项目: AuMgLi/LLDEN
def main():
    if not os.path.isdir(CHECKPOINT):
        os.makedirs(CHECKPOINT)

    print('==> Preparing dataset')

    trainloader, testloader = load_CIFAR(batch_size=BATCH_SIZE,
                                         num_workers=NUM_WORKERS)

    CLASSES = []
    AUROCs = []
    auroc = AverageMeter()

    for t, cls in enumerate(ALL_CLASSES):

        print('\nTask: [%d | %d]\n' % (t + 1, len(ALL_CLASSES)))

        CLASSES = [cls]

        print("==> Creating model")
        model = LeNet(num_classes=1)

        if CUDA:
            model = model.cuda()
            model = nn.DataParallel(model)
            cudnn.benchmark = True

        print('    Total params: %.2fK' %
              (sum(p.numel() for p in model.parameters()) / 1000))

        criterion = nn.BCELoss()
        optimizer = optim.SGD(model.parameters(),
                              lr=LEARNING_RATE,
                              momentum=MOMENTUM,
                              weight_decay=WEIGHT_DECAY)

        print("==> Learning")

        best_loss = 1e10
        learning_rate = LEARNING_RATE

        for epoch in range(EPOCHS):

            # decay learning rate
            if (epoch + 1) % EPOCHS_DROP == 0:
                learning_rate *= LR_DROP
                for param_group in optimizer.param_groups:
                    param_group['lr'] = learning_rate

            print('Epoch: [%d | %d]' % (epoch + 1, EPOCHS))

            train_loss = train(trainloader,
                               model,
                               criterion,
                               CLASSES,
                               CLASSES,
                               optimizer=optimizer,
                               use_cuda=CUDA)
            test_loss = train(testloader,
                              model,
                              criterion,
                              CLASSES,
                              CLASSES,
                              test=True,
                              use_cuda=CUDA)

            # save model
            is_best = test_loss < best_loss
            best_loss = min(test_loss, best_loss)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'loss': test_loss,
                    'optimizer': optimizer.state_dict()
                }, CHECKPOINT, is_best)

        print("==> Calculating AUROC")

        filepath_best = os.path.join(CHECKPOINT, "best.pt")
        checkpoint = torch.load(filepath_best)
        model.load_state_dict(checkpoint['state_dict'])

        new_auroc = calc_avg_AUROC(model, testloader, CLASSES, CLASSES, CUDA)
        auroc.update(new_auroc)

        print('New Task AUROC: {}'.format(new_auroc))
        print('Average AUROC: {}'.format(auroc.avg))

        AUROCs.append(auroc.avg)

    print('\nAverage Per-task Performance over number of tasks')
    for i, p in enumerate(AUROCs):
        print("%d: %f" % (i + 1, p))
def main():
    # Data Loader (Input Pipeline)
    print('loading dataset...')
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               num_workers=args.num_workers,
                                               drop_last=False,
                                               shuffle=False)

    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=batch_size,
                                             num_workers=args.num_workers,
                                             drop_last=False,
                                             shuffle=False)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              num_workers=args.num_workers,
                                              drop_last=False,
                                              shuffle=False)
    # Define models
    print('building model...')
    if args.dataset == 'mnist':
        clf1 = LeNet()
    if args.dataset == 'fashionmnist':
        clf1 = resnet.ResNet18_F(10)
    if args.dataset == 'cifar10':
        clf1 = resnet.ResNet34(10)
    if args.dataset == 'svhn':
        clf1 = resnet.ResNet34(10)

    clf1.cuda()
    optimizer = torch.optim.SGD(clf1.parameters(),
                                lr=args.lr,
                                weight_decay=args.weight_decay)

    with open(txtfile, "a") as myfile:
        myfile.write('epoch train_acc val_acc test_acc\n')

    epoch = 0
    train_acc = 0
    val_acc = 0
    # evaluate models with random weights
    test_acc = evaluate(test_loader, clf1)
    print('Epoch [%d/%d] Test Accuracy on the %s test data: Model1 %.4f %%' %
          (epoch + 1, args.n_epoch_1, len(test_dataset), test_acc))
    # save results
    with open(txtfile, "a") as myfile:
        myfile.write(
            str(int(epoch)) + ' ' + str(train_acc) + ' ' + str(val_acc) + ' ' +
            str(test_acc) + ' ' + "\n")

    best_acc = 0.0
    # training
    for epoch in range(1, args.n_epoch_1):
        # train models
        clf1.train()
        train_acc = train(clf1, train_loader, epoch, optimizer,
                          nn.CrossEntropyLoss())
        # validation
        val_acc = evaluate(val_loader, clf1)
        # evaluate models
        test_acc = evaluate(test_loader, clf1)

        # save results
        print(
            'Epoch [%d/%d] Train Accuracy on the %s train data: Model %.4f %%'
            % (epoch + 1, args.n_epoch_1, len(train_dataset), train_acc))
        print('Epoch [%d/%d] Val Accuracy on the %s val data: Model %.4f %% ' %
              (epoch + 1, args.n_epoch_1, len(val_dataset), val_acc))
        print(
            'Epoch [%d/%d] Test Accuracy on the %s test data: Model %.4f %% ' %
            (epoch + 1, args.n_epoch_1, len(test_dataset), test_acc))
        with open(txtfile, "a") as myfile:
            myfile.write(
                str(int(epoch)) + ' ' + str(train_acc) + ' ' + str(val_acc) +
                ' ' + str(test_acc) + ' ' + "\n")

        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(clf1.state_dict(), model_save_dir + '/' + 'model.pth')

    print('Matrix Factorization is doing...')
    clf1.load_state_dict(torch.load(model_save_dir + '/' + 'model.pth'))
    A = respresentations_extract(train_loader, clf1, len(train_dataset),
                                 args.dim, batch_size)
    A_val = respresentations_extract(val_loader, clf1, len(val_dataset),
                                     args.dim, batch_size)
    A_total = np.append(A, A_val, axis=0)
    W_total, H_total, error = train_m(A_total, args.basis, args.iteration_nmf,
                                      1e-5)
    for i in range(W_total.shape[0]):
        for j in range(W_total.shape[1]):
            if W_total[i, j] < 1e-6:
                W_total[i, j] = 0.
    W = W_total[0:len(train_dataset), :]
    W_val = W_total[len(train_dataset):, :]
    print('Transition Matrix is estimating...Wating...')
    logits_matrix = probability_extract(train_loader, clf1, len(train_dataset),
                                        args.num_classes, batch_size)
    idx_matrix_group, transition_matrix_group = estimate_matrix(
        logits_matrix, model_save_dir)
    logits_matrix_val = probability_extract(val_loader, clf1, len(val_dataset),
                                            args.num_classes, batch_size)
    idx_matrix_group_val, transition_matrix_group_val = estimate_matrix(
        logits_matrix_val, model_save_dir)
    func = nn.MSELoss()

    model = Matrix_optimize(args.basis, args.num_classes)
    optimizer_1 = torch.optim.Adam(model.parameters(), lr=0.001)
    basis_matrix_group = basis_matrix_optimize(model, optimizer_1, args.basis,
                                               args.num_classes, W,
                                               transition_matrix_group,
                                               idx_matrix_group, func,
                                               model_save_dir, args.n_epoch_4)

    basis_matrix_group_val = basis_matrix_optimize(
        model, optimizer_1, args.basis, args.num_classes, W_val,
        transition_matrix_group_val, idx_matrix_group_val, func,
        model_save_dir, args.n_epoch_4)

    for i in range(basis_matrix_group.shape[0]):
        for j in range(basis_matrix_group.shape[1]):
            for k in range(basis_matrix_group.shape[2]):
                if basis_matrix_group[i, j, k] < 1e-6:
                    basis_matrix_group[i, j, k] = 0.

    optimizer_ = torch.optim.SGD(clf1.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay,
                                 momentum=args.momentum)

    best_acc = 0.0
    for epoch in range(1, args.n_epoch_2):
        # train model
        clf1.train()

        train_acc = train_correction(clf1, train_loader, epoch, optimizer_, W,
                                     basis_matrix_group, batch_size,
                                     args.num_classes, args.basis)
        # validation
        val_acc = val_correction(clf1, val_loader, epoch, W_val,
                                 basis_matrix_group_val, batch_size,
                                 args.num_classes, args.basis)

        # evaluate models
        test_acc = evaluate(test_loader, clf1)
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(clf1.state_dict(), model_save_dir + '/' + 'model.pth')
        with open(txtfile, "a") as myfile:
            myfile.write(
                str(int(epoch)) + ' ' + str(train_acc) + ' ' + str(val_acc) +
                ' ' + str(test_acc) + ' ' + "\n")
        # save results
        print(
            'Epoch [%d/%d] Train Accuracy on the %s train data: Model %.4f %%'
            % (epoch + 1, args.n_epoch_2, len(train_dataset), train_acc))
        print('Epoch [%d/%d] Val Accuracy on the %s val data: Model %.4f %% ' %
              (epoch + 1, args.n_epoch_2, len(val_dataset), val_acc))
        print(
            'Epoch [%d/%d] Test Accuracy on the %s test data: Model %.4f %% ' %
            (epoch + 1, args.n_epoch_2, len(test_dataset), test_acc))

    clf1.load_state_dict(torch.load(model_save_dir + '/' + 'model.pth'))
    optimizer_r = torch.optim.Adam(clf1.parameters(),
                                   lr=args.lr_revision,
                                   weight_decay=args.weight_decay)
    nn.init.constant_(clf1.T_revision.weight, 0.0)

    for epoch in range(1, args.n_epoch_3):
        # train models
        clf1.train()
        train_acc = train_revision(clf1, train_loader, epoch, optimizer_r, W,
                                   basis_matrix_group, batch_size,
                                   args.num_classes, args.basis)
        # validation
        val_acc = val_revision(clf1, val_loader, epoch, W_val,
                               basis_matrix_group, batch_size,
                               args.num_classes, args.basis)
        # evaluate models
        test_acc = evaluate(test_loader, clf1)
        with open(txtfile, "a") as myfile:
            myfile.write(
                str(int(epoch)) + ' ' + str(train_acc) + ' ' + str(val_acc) +
                ' ' + str(test_acc) + ' ' + "\n")

        # save results
        print(
            'Epoch [%d/%d] Train Accuracy on the %s train data: Model %.4f %%'
            % (epoch + 1, args.n_epoch_3, len(train_dataset), train_acc))
        print('Epoch [%d/%d] Val Accuracy on the %s val data: Model %.4f %% ' %
              (epoch + 1, args.n_epoch_3, len(val_dataset), val_acc))
        print(
            'Epoch [%d/%d] Test Accuracy on the %s test data: Model %.4f %% ' %
            (epoch + 1, args.n_epoch_3, len(test_dataset), test_acc))
        if not os.path.exists(directory):
            os.makedirs(directory)

    writer_loss = SummaryWriter(gen_path(loss_path))
    writer_acc = SummaryWriter(gen_path(acc_path))

    trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
    dataset_train = MNIST('./data/mnist/', train=True, download=True, transform=trans_mnist)
    dataset_test = MNIST('./data/mnist/', train=False, download=True, transform=trans_mnist)
    # sample users
    dict_users = split_noniid_shuffle(dataset_train, args.num_nodes)

    img_size = dataset_train[0][0].shape
    print(img_size)

    net_glob = LeNet().to(args.device)
    print(net_glob.fc1.weight.type())
    print(net_glob)
    net_glob.train()

    # copy weights
    w_glob = net_glob.state_dict()
    w_glob_grad = w_glob

    # training
    #loss_train = []
    
    w_locals = [w_glob for i in range(args.num_nodes)]

    for iter in range(args.epochs):
        loss_locals = []
示例#19
0
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
from test import test_img
from torch.utils.tensorboard import SummaryWriter

if __name__ == '__main__':
    args = args_parser()
    args.device = torch.device('cuda:{}'.format(args.gpu))

    batch_size = 256
    trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
    train_dataset = MNIST('./data/mnist/', train=True, download=True, transform=trans_mnist)
    test_dataset = MNIST('./data/mnist/', train=False, download=True, transform=trans_mnist)
    train_loader = DataLoader(train_dataset, batch_size=batch_size)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)
    model = LeNet().to(args.device)
    sgd = SGD(model.parameters(), lr=1e-1)
    cross_error = CrossEntropyLoss()
    epoch = 100

    writer = SummaryWriter('./runs/t_centerlize')
    for _epoch in range(epoch):
        epoch_loss = []
        for idx, (train_x, train_label) in enumerate(train_loader):
            train_x, train_label = train_x.to(args.device), train_label.to(args.device)
            #label_np = np.zeros((train_label.shape[0], 10))
            sgd.zero_grad()
            predict_y = model(train_x.float())
            _error = cross_error(predict_y, train_label.long())
            _error.backward()
            sgd.step()
        args.dataset,
        batch_size=args.batch_size,
        conv_net=True,
        data_augmentation=args.data_augmentation,
        num_workers=num_workers)
    if args.data_augmentation:
        print('    data augmentation')

    window_size = train_loader.dataset.data[0].shape[0]
    if len(train_loader.dataset.data[0].shape) == 3:
        num_input_channels = train_loader.dataset.data[0].shape[2]
    else:
        num_input_channels = 1

    model = LeNet(num_input_channels=num_input_channels,
                  window_size=window_size,
                  bias=True).to(device)

# Multi-GPU
if num_workers > 1:
    model = nn.DataParallel(model)
criterion = nn.CrossEntropyLoss()

if __name__ == "__main__":

    # Save everything in a `ddict`
    SAV = ddict(args=args.__dict__)

    # Store training and test performance after each training epoch
    SAV.perf = ddict(tr=[], te=[])
示例#21
0
# test_type = 'single task classification'
test_type = 'multi tasks classification'

if test_type == 'single task classification':

    root = './data'
    download = True  # download MNIST dataset or not

    trans = transforms.Compose(
        [transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
    train_set = dset.MNIST(root=root, train=True,
                           transform=trans, download=download)
    test_set = dset.MNIST(root=root, train=False, transform=trans)

    model = LeNet()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

    model.compile(optimizer, criterion, metrics=['top1', 'top2'])

elif test_type == 'multi tasks classification':

    train_set = CategoricalDatasetMultiTasks(N, D_in, D_out1, D_out2)
    test_set = CategoricalDatasetMultiTasks(
        int(N * 0.25), D_in, D_out1, D_out2)

    model = MultiTasksClassification(D_in, H1, H2, D_out1, D_out2)

    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
示例#22
0
def main(args):
    num_classes = 10
    size = [28, 28]  # size of images
    ctype = ecvl.ColorType.GRAY

    in_ = eddl.Input([1, size[0], size[1]])
    out = LeNet(in_, num_classes)
    net = eddl.Model([in_], [out])
    eddl.build(net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"],
               ["categorical_accuracy"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "mnist")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugRotate([-5, 5]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0),
    ])
    dataset_augs = ecvl.DatasetAugmentations([training_augs, None, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs, ctype)
    x_train = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y_train = Tensor([args.batch_size, len(d.classes_)])
    num_samples = len(d.GetSplit())
    num_batches = num_samples // args.batch_size
    indices = list(range(args.batch_size))

    print("Training")
    for i in range(args.epochs):
        eddl.reset_loss(net)
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetCurrentBatch()
        for j in range(num_batches):
            print("Epoch %d/%d (batch %d/%d) - " %
                  (i + 1, args.epochs, j + 1, num_batches),
                  end="",
                  flush=True)
            d.LoadBatch(x_train, y_train)
            x_train.div_(255.0)
            tx, ty = [x_train], [y_train]
            eddl.train_batch(net, tx, ty, indices)
            eddl.print_loss(net, j)
            print()

    print("Saving weights")
    eddl.save(net, "mnist_checkpoint.bin", "bin")

    print("Evaluation")
    d.SetSplit(ecvl.SplitType.test)
    num_samples = len(d.GetSplit())
    num_batches = num_samples // args.batch_size
    for i in range(num_batches):
        print("batch %d / %d - " % (i, num_batches), end="", flush=True)
        d.LoadBatch(x_train, y_train)
        x_train.div_(255.0)
        eddl.evaluate(net, [x_train], [y_train])
示例#23
0
fmnist_train = datasets.FashionMNIST(FMNIST_DATA_HOME,
                                     train=True,
                                     transform=train_transform,
                                     download=True)
fmnist_test = datasets.FashionMNIST(FMNIST_DATA_HOME,
                                    train=False,
                                    transform=test_transform,
                                    download=True)

m = 5000
train_data = Subset(fmnist_train, m)
val_data = Subset(fmnist_train, np.arange(m, m + 200))
val_data2 = Subset(fmnist_train, np.arange(m - 200, m))
test_data = fmnist_test

net = LeNet(in_channels=1)
optimizer = SGD(net.parameters(), lr=0.003, momentum=0.9, weight_decay=5e-4)

batch_size = 64
data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
epochs = 100
for epoch in range(epochs):
    if epoch % 2 == 0:
        gc.collect()
    print("Epoch %d" % (epoch + 1))
    for i, batch in enumerate(data_loader):
        inputs, labels = batch
        inputs = H.tensor(inputs.numpy())
        labels = H.tensor(labels.numpy())

        net.zero_grad()
示例#24
0
def main():
    opt = Parser(train=False).get()

    # dataset and data loader
    _, val_loader, adv_val_loader, _, num_classes = \
            load_dataset(opt.dataset, opt.batch_size, opt.data_root,
                         False, 0.0, opt.num_val_samples,
                         workers=4)

    # model
    if opt.arch == 'lenet':
        model = LeNet(num_classes)
    elif opt.arch == 'resnet':
        model = ResNetv2_20(num_classes)
    else:
        raise NotImplementedError

    # move model to device
    model.to(opt.device)

    # load trained weight
    try:
        model.load_state_dict(torch.load(opt.weight_path))
    except:
        model_weight = convert_model_from_parallel(opt.weight_path)
        model.load_state_dict(model_weight)

    # criterion
    criterion = nn.CrossEntropyLoss()

    # advertorch attacker
    if opt.attack == 'pgd':
        attacker = PGDAttack(model,
                             loss_fn=criterion,
                             eps=opt.eps / 255,
                             nb_iter=opt.num_steps,
                             eps_iter=opt.eps_iter / 255,
                             rand_init=True,
                             clip_min=opt.clip_min,
                             clip_max=opt.clip_max,
                             ord=np.inf,
                             targeted=False)
    else:
        raise NotImplementedError

    # trainer
    trainer = Trainer(opt, model, criterion, attacker)
    trainer.print_freq = -1

    # validation
    val_losses, val_acc1s, val_acc5s = \
        trainer.validate(val_loader)
    aval_losses, aval_acc1s, aval_acc5s = \
        trainer.adv_validate(adv_val_loader)

    print('[model] {}'.format(opt.weight_path))
    print('[standard]\n'
          'loss: {:.4f} | acc1: {:.2f}% | acc5: {:.2f}%'
          '\n[adversarial]\n'
          'loss: {:.4f} | acc1: {:.2f}% | acc5: {:.2f}%'.format(
              val_losses['val'].avg, val_acc1s['val'].avg,
              val_acc5s['val'].avg, aval_losses['aval'].avg,
              aval_acc1s['aval'].avg, aval_acc5s['aval'].avg))
示例#25
0
testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=100,
                                         shuffle=False,
                                         num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

# Model
print('==> Building model..')
model = LeNet(3, len(classes)).to(device)

if device.type == 'cuda':
    model = torch.nn.DataParallel(model)
    cudnn.benchmark = True

if os.path.isfile(ckpt_file):
    checkpoint = torch.load(ckpt_file)
    model.load_state_dict(checkpoint['net'])
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']

criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(),
                      lr=1e-3,
                      momentum=0.9,
示例#26
0
    returns={'processed_labels'},
    resize=(img_height, img_width))

print("Number of images in the dataset:", train_dataset.get_n_samples())
print("Number of images in the dataset:", validation_dataset.get_n_samples())

# In[8]:

steps_per_epoch = train_dataset.get_n_samples() / train_batch_size
validation_steps = validation_dataset.get_n_samples() / validation_batch_size

# In[9]:

model = LeNet(n_classes=1,
              img_width=img_width,
              img_depth=img_depth,
              img_height=img_height,
              activation=activation)

# In[10]:

model.summary()

# In[11]:

optimizer = Adam(lr=0.001,
                 beta_1=0.9,
                 beta_2=0.999,
                 epsilon=None,
                 decay=0.00001,
                 amsgrad=True)
示例#27
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='Adversarial MNIST Example')
    parser.add_argument('--use-pretrained',
                        action='store_true',
                        default=False,
                        help='uses the pretrained model')
    parser.add_argument('--adversarial-training',
                        action='store_true',
                        default=False,
                        help='takes the adversarial training process')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=512,
                        metavar='N',
                        help='input batch size for testing (default: 512)')
    args = parser.parse_args()

    # Define what device we are using
    use_cuda = torch.cuda.is_available()
    print("CUDA Available: ", use_cuda)
    device = torch.device("cuda" if use_cuda else "cpu")

    # MNIST Test dataset and dataloader declaration
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([transforms.ToTensor()])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([transforms.ToTensor()])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    # Initialize the network
    model = LeNet().to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

    if args.use_pretrained:
        print('Loading the pretrained model')
        model.load_state_dict(
            torch.load('resources/lenet_mnist_model.bin', map_location='cpu'))
    else:
        print('Training on the MNIST dataset')
        model_train(model, train_loader, F.nll_loss, optimizer, epochs=10)

    print('Evaluating the neural network')
    # Evaluate the accuracy of the MNIST model on clean examples
    accuracy, _ = model_eval(model, test_loader, F.nll_loss)
    print('Test accuracy on clean examples: ' + str(accuracy))

    # Evaluate the accuracy of the MNIST model on adversarial examples
    accuracy, _ = model_eval(model,
                             test_loader,
                             F.nll_loss,
                             attack_method=fgsm_attack)
    print('Test accuracy on adversarial examples: ' + str(accuracy))

    if args.adversarial_training:
        print("Repeating the process, with adversarial training")
        # Perform adversarial training
        model_train(model,
                    train_loader,
                    F.nll_loss,
                    optimizer,
                    epochs=10,
                    attack_method=fgsm_attack)

        # Evaluate the accuracy of the adversarially trained MNIST model on
        # clean examples
        accuracy, _ = model_eval(model, test_loader, F.nll_loss)
        print('Test accuracy on clean examples: ' + str(accuracy))

        # Evaluate the accuracy of the adversarially trained MNIST model on
        # adversarial examples
        accuracy_adv, _ = model_eval(model,
                                     test_loader,
                                     F.nll_loss,
                                     attack_method=fgsm_attack)
        print('Test accuracy on adversarial examples: ' + str(accuracy_adv))
示例#28
0
def main():

    use_cuda = torch.cuda.is_available() and not args.no_cuda
    device = torch.device('cuda' if use_cuda else 'cpu')
    print(device)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if use_cuda:
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True

    rgb = False
    if args.mode == 'rgb':
        rgb = True

    if args.gray_scale:
        rgb = False

    if args.tracking_data_mod is True:
        args.input_size = 192

    # DATALOADER

    train_dataset = GesturesDataset(model=args.model, csv_path='csv_dataset', train=True, mode=args.mode, rgb=rgb,
                                    normalization_type=1,
                                    n_frames=args.n_frames, resize_dim=args.input_size,
                                    transform_train=args.train_transforms, tracking_data_mod=args.tracking_data_mod)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers)

    validation_dataset = GesturesDataset(model=args.model, csv_path='csv_dataset', train=False, mode=args.mode, rgb=rgb, normalization_type=1,
                                   n_frames=args.n_frames, resize_dim=args.input_size, tracking_data_mod=args.tracking_data_mod)
    validation_loader = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.n_workers)

    # paramteri per la rete

    in_channels = args.n_frames if not rgb else args.n_frames * 3
    n_classes = args.n_classes

    if args.model == 'LeNet':
        model = LeNet(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == 'AlexNet':
        model = AlexNet(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == 'AlexNetBN':
        model = AlexNetBN(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == "Vgg16":
        model = Vgg16(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == "Vgg16P":
        model = models.vgg16(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(3, 3), stride=1, padding=1)
        model.classifier._modules['6'] = nn.Linear(4096, n_classes)
        # model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "ResNet18P":
        model = models.resnet18(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model._modules['conv1'] = nn.Conv2d(in_channels, 64, 7, stride=2, padding=3)
        model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "ResNet34P":
        model = models.resnet34(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model._modules['conv1'] = nn.Conv2d(in_channels, 64, 7, stride=2, padding=3)
        model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "DenseNet121P":
        model = models.densenet121(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1024, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet161P":
        model = models.densenet161(pretrained=args.pretrained)
        # for params in model.parameters():
        #     params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=96, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=2208, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet169P":
        model = models.densenet169(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1664, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet201P":
        model = models.densenet201(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1920, out_features=n_classes, bias=True)
        model = model.to(device)
    # RNN
    elif args.model == 'LSTM' or args.model == 'GRU':
        model = Rnn(rnn_type=args.model, input_size=args.input_size, hidden_size=args.hidden_size,
                    batch_size=args.batch_size,
                    num_classes=args.n_classes, num_layers=args.n_layers,
                    final_layer=args.final_layer).to(device)
    # C3D

    elif args.model == 'C3D':
        if args.pretrained:
            model = C3D(rgb=rgb, num_classes=args.n_classes)


            # modifico parametri
            print('ok')

            model.load_state_dict(torch.load('c3d_weights/c3d.pickle', map_location=device), strict=False)
            # # for params in model.parameters():
            #     # params.requires_grad = False

            model.conv1 = nn.Conv3d(1 if not rgb else 3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
            # tolgo fc6 perchè 30 frames
            model.fc6 = nn.Linear(16384, 4096)  # num classes 28672 (112*200)
            model.fc7 = nn.Linear(4096, 4096)  # num classes
            model.fc8 = nn.Linear(4096, n_classes)  # num classes

            model = model.to(device)


    # Conv-lstm
    elif args.model == 'Conv-lstm':
        model = ConvLSTM(input_size=(args.input_size, args.input_size),
                         input_dim=1 if not rgb else 3,
                         hidden_dim=[64, 64, 128],
                         kernel_size=(3, 3),
                         num_layers=args.n_layers,
                         batch_first=True,
                         ).to(device)
    elif args.model == 'DeepConvLstm':
        model = DeepConvLstm(input_channels_conv=1 if not rgb else 3, input_size_conv=args.input_size, n_classes=12,
                             n_frames=args.n_frames, batch_size=args.batch_size).to(device)

    elif args.model == 'ConvGRU':
        model = ConvGRU(input_size=40, hidden_sizes=[64, 128],
                        kernel_sizes=[3, 3], n_layers=2).to(device)

    else:
        raise NotImplementedError

    if args.opt == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
        # optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum)

    elif args.opt == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    loss_function = nn.CrossEntropyLoss().to(device)

    start_epoch = 0
    if args.resume:
        checkpoint = torch.load("/projects/fabio/weights/gesture_recog_weights/checkpoint{}.pth.tar".format(args.model))
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']

        print("Resuming state:\n-epoch: {}\n{}".format(start_epoch, model))

    #name experiment
    personal_name = "{}_{}_{}".format(args.model, args.mode, args.exp_name)
    info_experiment = "{}".format(personal_name)
    log_dir = "/projects/fabio/logs/gesture_recog_logs/exps"
    weight_dir = personal_name
    log_file = open("{}/{}.txt".format("/projects/fabio/logs/gesture_recog_logs/txt_logs", personal_name), 'w')
    log_file.write(personal_name + "\n\n")
    if personal_name:
        exp_name = (("exp_{}_{}".format(time.strftime("%c"), personal_name)).replace(" ", "_")).replace(":", "-")
    else:
        exp_name = (("exp_{}".format(time.strftime("%c"), personal_name)).replace(" ", "_")).replace(":", "-")
    writer = SummaryWriter("{}".format(os.path.join(log_dir, exp_name)))

    # add info experiment
    writer.add_text('Info experiment',
                    "model:{}"
                    "\n\npretrained:{}"
                    "\n\nbatch_size:{}"
                    "\n\nepochs:{}"
                    "\n\noptimizer:{}"
                    "\n\nlr:{}"
                    "\n\ndn_lr:{}"
                    "\n\nmomentum:{}"
                    "\n\nweight_decay:{}"
                    "\n\nn_frames:{}"
                    "\n\ninput_size:{}"
                    "\n\nhidden_size:{}"
                    "\n\ntracking_data_mode:{}"
                    "\n\nn_classes:{}"
                    "\n\nmode:{}"
                    "\n\nn_workers:{}"
                    "\n\nseed:{}"
                    "\n\ninfo:{}"
                    "".format(args.model, args.pretrained, args.batch_size, args.epochs, args.opt, args.lr, args.dn_lr, args.momentum,
                              args.weight_decay, args.n_frames, args.input_size, args.hidden_size, args.tracking_data_mod,
                              args.n_classes, args.mode, args.n_workers, args.seed, info_experiment))

    trainer = Trainer(model=model, loss_function=loss_function, optimizer=optimizer, train_loader=train_loader,
                      validation_loader=validation_loader,
                      batch_size=args.batch_size, initial_lr=args.lr,  device=device, writer=writer, personal_name=personal_name, log_file=log_file,
                      weight_dir=weight_dir, dynamic_lr=args.dn_lr)


    print("experiment: {}".format(personal_name))
    start = time.time()
    for ep in range(start_epoch, args.epochs):
        trainer.train(ep)
        trainer.val(ep)

    # display classes results
    classes = ['g0', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7', 'g8', 'g9', 'g10', 'g11']
    for i in range(args.n_classes):
        print('Accuracy of {} : {:.3f}%%'.format(
            classes[i], 100 * trainer.class_correct[i] / trainer.class_total[i]))

    end = time.time()
    h, rem = divmod(end - start, 3600)
    m, s, = divmod(rem, 60)
    print("\nelapsed time (ep.{}):{:0>2}:{:0>2}:{:05.2f}".format(args.epochs, int(h), int(m), s))


    # writing accuracy on file

    log_file.write("\n\n")
    for i in range(args.n_classes):
        log_file.write('Accuracy of {} : {:.3f}%\n'.format(
            classes[i], 100 * trainer.class_correct[i] / trainer.class_total[i]))
    log_file.close()
示例#29
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    feature_blob = np.zeros([1, 16, 8, 8])

    model = LeNet()

    def hook(module, input, output):
        global feature_blob
        feature_blob = output

    model._modules.get('conv2').register_forward_hook(hook)
    model = torch.nn.DataParallel(model).cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    for epoch in range(1, args.epochs + 1):
        train(args, model, train_loader, optimizer, epoch)
        cam(model, epoch)
        test(args, model, test_loader)

    generate_gif()
示例#30
0
def main():
    opt = Parser().get()

    experiment = None
    if opt.comet:
        experiment = Experiment()
        experiment.set_name(opt.exp_name)
        experiment.log_parameters(opt.__dict__)
        experiment.add_tags(opt.add_tags)

    # dataset and data loader
    train_loader, val_loader, adv_val_loader, _, num_classes = \
            load_dataset(opt.dataset, opt.batch_size, opt.data_root,
                         opt.noise, opt.noise_std, opt.num_val_samples,
                         workers=4)

    # model
    if opt.arch == 'lenet':
        model = LeNet(num_classes)
    elif opt.arch == 'resnet':
        model = ResNetv2_20(num_classes)
    else:
        raise NotImplementedError

    # weight init
    if opt.weight_init == 'he':
        model.apply(init_he)

    # move model to device
    model.to(opt.device)
    if opt.gpu_ids:
        model = nn.DataParallel(model, device_ids=opt.gpu_ids)

    # criterion
    criterion = nn.CrossEntropyLoss()

    # advertorch attacker
    if opt.attack == 'pgd':
        attacker = LinfPGDAttack(model,
                                 loss_fn=criterion,
                                 eps=opt.eps / 255,
                                 nb_iter=opt.num_steps,
                                 eps_iter=opt.eps_iter / 255,
                                 rand_init=True,
                                 clip_min=opt.clip_min,
                                 clip_max=opt.clip_max,
                                 targeted=False)
    else:
        raise NotImplementedError

    # optimizer
    if opt.optim == 'Adam':
        optimizer = optim.Adam(model.parameters(),
                               opt.lr,
                               eps=1e-6,
                               weight_decay=opt.wd)
    elif opt.optim == 'SGD':
        optimizer = optim.SGD(model.parameters(), opt.lr, weight_decay=opt.wd)
    else:
        raise NotImplementedError

    # scheduler
    if opt.scheduler_step:
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=opt.scheduler_step,
                                              gamma=opt.scheduler_gamma)
    else:
        scheduler = None

    # timer
    timer = Timer(opt.num_epochs, 0)

    # trainer
    trainer = Trainer(opt, model, criterion, attacker, optimizer)

    # epoch iteration
    for epoch in range(1, opt.num_epochs + 1):
        trainer.epoch = epoch
        if scheduler:
            scheduler.step(epoch - 1)  # scheduler's epoch is 0-indexed.

        # training
        train_losses, train_acc1s, train_acc5s = \
                trainer.train(train_loader)

        # validation
        val_losses, val_acc1s, val_acc5s = \
                trainer.validate(val_loader)
        if opt.adv_val_freq != -1 and epoch % opt.adv_val_freq == 0:
            aval_losses, aval_acc1s, aval_acc5s = \
                trainer.adv_validate(adv_val_loader)
        else:
            aval_losses, aval_acc1s, aval_acc5s = \
                    dict(), dict(), dict()

        losses = dict(**train_losses, **val_losses, **aval_losses)
        acc1s = dict(**train_acc1s, **val_acc1s, **aval_acc1s)
        acc5s = dict(**train_acc5s, **val_acc5s, **aval_acc5s)
        report_epoch_status(losses, acc1s, acc5s, trainer.num_loss, epoch, opt,
                            timer, experiment)

    save_path = os.path.join('ckpt', opt.dataset, 'models',
                             opt.exp_name + '.pth')
    trainer.save_model(save_path)