print(len(train_loader),len(val_loader),len(test_loader))

m = models.vgg19(pretrained=True).to(DEVICE)
image_mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
image_std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
model = NormalizedModel(model=m, mean=image_mean, std=image_std).to(DEVICE)

# weight = './weights/imagenet_resnet152_jpeg/Imagenetacc0.9642857142857143_20.pth'
# loaded_state_dict = torch.load(weight)
# model.load_state_dict(loaded_state_dict)

if torch.cuda.device_count() > 1:
    model = torch.nn.DataParallel(model)

optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.adv == 0:
    scheduler = lr_scheduler.StepLR(optimizer, step_size=args.lr_step, gamma=args.lr_decay)
else:
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[60, 120, 160], gamma=0.2)

attacker = DDN(steps=args.steps, device=DEVICE)

max_loss = torch.log(torch.tensor(10.)).item()  # for callback
best_acc = 0
best_epoch = 0

valacc_final = 0


max_loss = torch.log(torch.tensor(1000.)).item()  # for callback
Beispiel #2
0
                             shuffle=True,
                             num_workers=args.workers,
                             pin_memory=True)
test_loader = data.DataLoader(test_set,
                              batch_size=100,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

m = wide_resnet(num_classes=10, depth=28, widen_factor=10, dropRate=args.drop)
model = NormalizedModel(model=m, mean=image_mean, std=image_std).to(
    DEVICE)  # keep images in the [0, 1] range
if torch.cuda.device_count() > 1:
    model = torch.nn.DataParallel(model)

optimizer = SGD(model.parameters(),
                lr=args.lr,
                momentum=args.momentum,
                weight_decay=args.weight_decay)
if args.adv == 0:
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=args.lr_step,
                                    gamma=args.lr_decay)
else:
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=[60, 120, 160],
                                         gamma=0.2)

max_loss = torch.log(torch.tensor(10.)).item()  # for callback
best_acc = 0
best_epoch = 0