def generate_large_eps_adversarial_examples(net, batch_generator, eps, nb_iter, save_dir): attack = IPGD(eps, eps / 4.0, nb_iter) if not os.path.exists(save_dir): os.mkdir(save_dir) pbar = tqdm(enumerate(batch_generator)) net.eval() for i, sample in pbar: data = sample['data'].cuda() label = sample['label'].cuda() imgs = attack.attack(net, data, label) imgs = imgs.detach().cpu().numpy() * 255 imgs = imgs.astype(np.uint8) for j, img in enumerate(imgs): index = j + i * data.size(0) save_path = os.path.join(save_dir, '{}.png'.format(index)) img = np.transpose(img, (1, 2, 0)) imwrite(save_path, img)
def evalGivenEps(net, batch_generator, eps, nb_iter): defense_accs = AvgMeter() net.eval() attack = IPGD(eps, eps / 2.0, nb_iter) pbar = tqdm(batch_generator) for data, label in pbar: data = data.cuda() label = label.cuda() defense_accs.update(attack.get_batch_accuracy(net, data, label)) pbar.set_description('Evulating Roboustness') return defense_accs.mean
def evalRoboustness(net, batch_generator): defense_accs = AvgMeter() epsilons = [4, 8, 12, 16, 20, 24] nb_iters = [40, 80, 120] Attacks = [] for e in epsilons: e = e / 255.0 for nb in nb_iters: Attacks.append(IPGD(e, e//2, nb)) net.eval() pbar = tqdm(batch_generator) for data, label in pbar: data = data.cuda() label = label.cuda() choices = np.random.randint(low = 0, high = 17, size = 4) for c in choices: defense_accs.update(Attacks[c].get_batch_accuracy(net, data, label)) pbar.set_description('Evulating Roboustness') return defense_accs.mean
def train(epoch_num, model, train_loader, optimizer, scheduler, criterion, writer, adv_ratio=0): # attack_method): correct = 0 total = 0 train_loss = 0. scheduler.step() for j, data in enumerate(tqdm(train_loader)): images, labels = data # 分别是(N_batch, 3, 32, 32)和(N_batch, 32) if CUDA: images, labels = images.cuda(), labels.cuda() if torch.rand(1).item() < adv_ratio: images = IPGD(model, images, labels, criterion=criterion, CUDA=CUDA) pred = model(images) optimizer.zero_grad() loss = criterion(pred, labels) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = torch.max(pred.data, 1) # dim=1 predicted是最大值的下标 correct += predicted.eq(labels.data).sum().item() total += len(labels) acc = correct / total print('\nepoch:', epoch_num, 'train loss: %.3f' % (train_loss / (j + 1)) + ' accuracy:', acc) # use tensorboardX if writer: writer.add_scalar('train loss', train_loss / (j + 1), epoch_num * len(train_loader) + j) writer.add_scalar('train acc', acc, epoch_num * len(train_loader) + j)
def evalRoboustness(net, batch_generator): defense_accs = AvgMeter() epsilons = [4, 8, 12, 16, 20, 24] nb_iters = [40, 80, 120] Attacks = [] for e in epsilons: for nb in nb_iters: Attacks.append(IPGD(e, e//2, nb)) net.eval() pbar = tqdm(batch_generator) for mn_batch in pbar: data = torch.tensor(mn_batch['data'], dtype=torch.float32).cuda() label = torch.tensor(mn_batch['label'], dtype=torch.int64).cuda() choices = np.random.randint(low = 0, high = 17, size = 4) for c in choices: defense_accs.update(Attacks[c].get_batch_accuracy(net, data, label)) pbar.set_description('Evulating Roboustness')
# ---------- for epoch in range(opt.n_epochs): for i, (imgs, labels) in enumerate(dataloader): # Adversarial ground truths valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False) fake = Variable(Tensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False) # Configure input real_imgs = Variable(imgs.type(Tensor)) if cuda: labels = labels.cuda() # TODO 把炼dcgan的输入换成对抗样本 adv_exps = IPGD(model, real_imgs, labels, criterion=criterion, CUDA=cuda) # ----------------- # Train Generator # ----------------- optimizer_G.zero_grad() # Sample noise as generator input z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim)))) # Generate a batch of images gen_imgs = generator(z) # Loss measures generator's ability to fool the discriminator g_loss = adversarial_loss(discriminator(gen_imgs), valid)
parser.add_argument( '--iter', default=120, type=int, help= 'the number of iterations take to generate adversarial examples for using IPGD' ) parser.add_argument( '--eps', default=40, type=int, help='the maximum boundary of adversarial perturbations') parser.add_argument('-d', type=int, default=0) args = parser.parse_args() DEVICE = torch.device('cuda:{}'.format(args.d)) save_dir = os.path.join('../adv_exps/imgs/', args.resume.split('/')[-2]) if not os.path.exists(save_dir): os.mkdir(save_dir) ds_val = create_test_dataset(32) net = cifar_resnet18(3) print('loading at {}'.format(args.resume)) checkpoint = torch.load(args.resume) net.load_state_dict(checkpoint['state_dict']) net.to(DEVICE) print(save_dir) PgdAttack = IPGD(eps=args.eps / 255.0, sigma=1 / 255.0, nb_iter=args.iter, norm=np.inf) generate_large_adv(net, ds_val, PgdAttack, DEVICE, save_dir)
def get_result(net, dl, DEVICE, net_name='', dl_name='raw'): PgdAttack = IPGD(eps=8 / 255.0, sigma=2 / 255.0, nb_iter=40, norm=np.inf, DEVICE=DEVICE) save_path = os.path.join('../Maps/', dl_name, net_name) if not os.path.exists(save_path): os.mkdir(save_path) labels = [] net.eval() mean = torch.tensor( np.array([0.485, 0.456, 0.406]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]) std = torch.tensor( np.array([0.229, 0.224, 0.225]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]) mean = mean.to(DEVICE) std = std.to(DEVICE) for i, (batch_img, batch_label) in enumerate(dl): batch_img = batch_img.to(DEVICE) batch_label = batch_label.to(DEVICE) batch_img = PgdAttack.attack(net, batch_img, batch_label).detach() if i > 5: break for j in range(int(batch_img.size(0))): img = batch_img[j] label = batch_label[j] #img = img.to(DEVICE) #label = label.to(DEVICE) #print(img.size()) grad_map = GetSmoothGrad(net, img, label, DEVICE, stdev_spread=0.10) #print(grad_map.shape) clip_and_save_single_img(grad_map, i * batch_img.size(0) + j, save_dir=save_path) #print(grad.shape) #simg = (img + mean) * std simg = img * std + mean #print('rb', simg.max(), simg.min()) simg = torch.clamp(simg, 0, 1) #print('r', simg.max(), simg.min()) simg = simg.detach().cpu().numpy() * 255.0 #print(simg.shape) #print(simg.shape) simg = simg[0] simg = np.transpose(simg, (1, 2, 0)).astype(np.uint8) #print('r', simg.max(), simg.min()) #imwrite(os.path.join(save_bench, '{}.png'.format(i * batch_img.size(0) + j)), simg) #io.imsave(os.path.join(save_bench, '{}.png'.format(i * batch_img.size(0) + j)), simg) #print(i * batch_img.size(0) + j) #grad = imread(os.path.join(save_path, '{}-smooth.png'.format(i * batch_img.size(0) + j))) grad = io.imread(os.path.join( save_path, '{}-smooth.png'.format(i * batch_img.size(0) + j)), as_gray=False) # if gray # grad = grad[:, :, np.newaxis] # grad = np.repeat(grad, 3, axis = 2) gray_grad = np.mean(grad, axis=-1, keepdims=True) gray_grad = gray_grad.astype(np.uint8) gray_grad = np.repeat(gray_grad, 3, axis=2) pair_img = np.concatenate((gray_grad, grad, simg), axis=1) #imwrite(os.path.join(save_path, '{}-pair.png'.format(i * batch_img.size(0) + j)), pair_img) io.imsave( os.path.join(save_path, '{}-pair.png'.format(i * batch_img.size(0) + j)), pair_img)
torch.backends.cudnn.benchmark = True ds_train = Dataset(dataset_name = 'train') ds_train.load() ds_train = EpochDataset(ds_train) ds_val = Dataset(dataset_name = 'val') ds_val.load() ds_val = EpochDataset(ds_val) net = cifar_resnet18() net.cuda() criterion = nn.CrossEntropyLoss().cuda() optimizer = optim.SGD(net.parameters(), lr = get_learing_rate(0), momentum = 0.9, weight_decay=args.weight_decay) PgdAttack = IPGD(eps = args.eps, sigma = args.eps // 2, nb_iter = args.iter, norm = np.inf) best_prec = 0.0 if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) check_point = torch.load(args.resume) args.start_epoch = check_point['epoch'] net.load_state_dict(check_point['state_dict']) best_prec = check_point['best_prec'] print('Modeled loaded from {} with metrics:'.format(args.resume)) print(results) else: print("=> no checkpoint found at '{}'".format(args.resume))
net = models.resnet18(pretrained=True) #net.avgpool = nn.AdaptiveAvgPool2d(1) #net.fc.out_features = 200 net.fc = nn.Linear(512, 257) net.to(DEVICE) criterion = nn.CrossEntropyLoss().to(DEVICE) optimizer = optim.SGD(net.parameters(), lr=get_learing_rate(0), momentum=0.9, weight_decay=args.weight_decay) args.eps = args.eps / 255.0 PgdAttack = IPGD(eps=args.eps, sigma=args.eps / 2.0, nb_iter=args.iter, norm=np.inf, DEVICE=DEVICE) best_prec = 0.0 if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) check_point = torch.load(args.resume) args.start_epoch = check_point['epoch'] net.load_state_dict(check_point['state_dict']) best_prec = check_point['best_prec'] print('Modeled loaded from {} with metrics:'.format(args.resume)) print(results) else:
param_group['lr'] = lr torch.backends.cudnn.benchmark = True ds_train = create_train_dataset(args.batch_size) ds_val = create_test_dataset(args.batch_size) net = models.resnet18(pretrained = True) #net.avgpool = nn.AdaptiveAvgPool2d(1) net.fc = nn.Linear(512, 257) net.to(DEVICE) criterion = nn.CrossEntropyLoss().to(DEVICE) optimizer = optim.SGD(net.parameters(), lr = get_learing_rate(0), momentum = 0.9, weight_decay=args.weight_decay) #args.eps = args.eps / 255.0 PgdAttack = IPGD(eps = args.eps, sigma = args.eps / (2.0 * 255), nb_iter = args.iter, norm = 2, DEVICE = DEVICE) best_prec = 0.0 if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) check_point = torch.load(args.resume) args.start_epoch = check_point['epoch'] net.load_state_dict(check_point['state_dict']) best_prec = check_point['best_prec'] print('Modeled loaded from {} with metrics:'.format(args.resume)) print(results) else: print("=> no checkpoint found at '{}'".format(args.resume))
else: # train optimizer = torch.optim.Adam(model.parameters(), ) for i in range(args.train_epoch): correct = 0 total = 0 train_loss = 0. for j, data in enumerate(tqdm(train_loader)): images, labels = data if CUDA: images, labels = images.cuda(), labels.cuda() if torch.rand(1).item() < args.adv_ratio: images = IPGD(model, images, labels, criterion=criterion, CUDA=CUDA) images = images.reshape((-1, 784)) pred = model(images) optimizer.zero_grad() loss = criterion(pred, labels) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = torch.max(pred.data, 1) correct += predicted.eq(labels.data).sum().item() total += len(labels) acc = correct / total print('\nepoch:', i, 'train loss:', train_loss / (j + 1), 'accuracy:', acc)
for i in range(args.train_epoch): train(epoch_num=i, model=model, train_loader=train_loader, optimizer=optimizer, scheduler=scheduler, criterion=criterion, writer=writer, adv_ratio=args.adv_ratio) # save state_dict if args.save: torch.save(model.state_dict(), args.save_path) # test or attack correct = 0 total = 0 for j, data in enumerate(tqdm(test_loader)): images, labels = data if CUDA: images, labels = images.cuda(), labels.cuda() if args.attack: # attack if args.attack == 'FGSM': pred = model(FGSM(model, images, labels, criterion=criterion, CUDA=CUDA)) elif args.attack == 'IPGD': pred = model(IPGD(model, images, labels, criterion=criterion, CUDA=CUDA)) else: pred = model(images) # test _, predicted = torch.max(pred.data, 1) correct += predicted.eq(labels.data).sum().item() total += len(labels) acc = correct / total print('correct:', correct, 'total:', total, 'accuracy:', acc)