Esempio n. 1
0
else:
    net = WideResNet(args.layers,
                     num_classes,
                     args.widen_factor,
                     dropRate=args.droprate)

start_epoch = 0

# Restore model if desired
if args.load != '':
    for i in range(1000 - 1, -1, -1):
        model_name = os.path.join(
            args.load, args.dataset + '_' + args.model + '_baseline_epoch_' +
            str(i) + '.pt')
        if os.path.isfile(model_name):
            net.load_state_dict(torch.load(model_name))
            print('Model restored! Epoch:', i)
            start_epoch = i + 1
            break
    if start_epoch == 0:
        assert False, "could not resume"

if args.ngpu > 1:
    net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

if args.ngpu > 0:
    net.cuda()
    torch.cuda.manual_seed(1)

cudnn.benchmark = True  # fire on all cylinders
Esempio n. 2
0
    test_data = dset.CIFAR10('/data/sauravkadavath/cifar10-dataset/', train=False, transform=test_transform)
    num_classes = 10
else:
    test_data = dset.CIFAR100('/data/sauravkadavath/cifar10-dataset/', train=False, transform=test_transform)
    num_classes = 100


test_loader = torch.utils.data.DataLoader(
    test_data, batch_size=args.batch_size, shuffle=False,
    num_workers=args.prefetch, pin_memory=True)

# Create model
if args.model == 'allconv':
    net = AllConvNet(num_classes).cuda()

net.load_state_dict(torch.load(args.load))

adversary = attacks.PGD(epsilon=8./255, num_steps=10, step_size=0.5/255).cuda()

# def train():
#     net.train()  # enter train mode
#     loss_avg = 0.0
#     for bx, by in tqdm(train_loader):

#         bx, by, = bx.cuda(), by.cuda()

#         adv_bx = adversary(net, bx, by)

#         # print(torch.max(bx), torch.min(bx), torch.mean(bx))

#         # forward