Exemple #1
0
     # 根据model_name, 切换要攻击的model
 elif args.model_name != '':
     model = get_model_for_defense(args.model_name).to(device)
 else:
     model = get_custom_model(args.model, args.model_path).to(device)
 # 攻击任务:Change to your attack function here
 # Here is a attack baseline: PGD attack
 # model = nn.DataParallel(model, device_ids=[0, 1])
 model = WrappedModel(model, args.subbatch_size)
 attack = get_attacker(
     args.attacker, args.step_size, args.epsilon,
     args.perturb_steps
 )
 model.eval()
 if args.dataset == 'cifar10':
     test_loader = get_test_cifar(args.batch_size)
 elif args.dataset == 'mnist':
     test_loader = get_test_mnist(args.batch_size)
 elif args.dataset == 'rand_mnist':
     test_loader = get_test_mnist(1)
     xs = [x for (x,), _ in test_loader]
     ys = numpy.load("rand_mnist.npy")
     ds = list(zip(xs, ys))
     test_loader = DataLoader(ds, args.batch_size)
 else:
     test_loader = get_test_imagenet(args.batch_size)
 if args.targeted == 'untargeted':
     # non-targeted attack
     natural_acc, robust_acc, distance = eval_model_with_attack(
         model, test_loader, attack, args.epsilon, device, args.dataset)
     print(
Exemple #2
0
         y_cpu = y.detach().cpu().numpy()
         x, y = x.to(dev), y.to(dev)
         opt.zero_grad()
         logit_2 = cnn(x)
         loss = F.cross_entropy(logit_2, y)
         loss.backward()
         losses.append(loss.item())
         accs.append(
             (logit_2.detach().cpu().numpy().argmax(-1) == y_cpu).mean())
         prog.set_description("Epoch: %d, Loss: %.4f, Acc: %.4f" %
                              (epoch, numpy.mean(losses), numpy.mean(accs)))
         opt.step()
     torch.save(cnn, "exp_randperm_cifar_wrn28.pt")
 opt = torch.optim.Adam(cnn.fc.parameters(), eps=1e-2)
 dl = DataLoader(ds, 100, True)
 test_dl = get_test_cifar(100)
 for epoch in range(100):
     prog = tqdm.tqdm(dl)
     accs = []
     losses = []
     for x, y in prog:
         y_cpu = y.detach().cpu().numpy()
         x, y = x.to(dev), y.to(dev)
         opt.zero_grad()
         logit_2 = cnn(x)
         loss = F.cross_entropy(logit_2, y)
         loss.backward()
         losses.append(loss.item())
         accs.append(
             (logit_2.detach().cpu().numpy().argmax(-1) == y_cpu).mean())
         prog.set_description("Epoch: %d, Loss: %.4f, Acc: %.4f" %
Exemple #3
0
# import vgg

plotlib.style.use('seaborn')
plotlib.rcParams['ps.useafm'] = True
fsize = 24
tsize = 28
parameters = {
    'axes.labelsize': tsize,
    'axes.titlesize': tsize,
    'xtick.labelsize': fsize,
    'ytick.labelsize': fsize,
    'legend.fontsize': fsize
}
plotlib.rcParams.update(parameters)

test_loader = get_test_cifar(1)
model = get_model_for_attack('model2')
'''model = vgg.vgg13_bn()
ensemble = torch.load('vgg13bn_regm2.dat', map_location=torch.device('cpu'))
model.load_state_dict(ensemble)'''
model.eval()
for x, y in test_loader:

    def func(cx):
        logits = model(cx)
        # labels = torch.eye(10).to(y.device)[y]
        return torch.nn.functional.cross_entropy(logits, y)

    def safe_jac(cx):
        jac = torch.autograd.functional.jacobian(func, cx)
        jac = torch.randn_like(jac) * 1e-7 + jac