def run(): torch.multiprocessing.freeze_support() use_cuda=True image_nc=1 epochs = 60 batch_size = 128 BOX_MIN = 0 BOX_MAX = 1 # Define what device we are using print("CUDA Available: ",torch.cuda.is_available()) device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu") pretrained_model = "./MNIST_target_model.pth" targeted_model = MNIST_target_net().to(device) targeted_model.load_state_dict(torch.load(pretrained_model)) targeted_model.eval() model_num_labels = 10 # MNIST train dataset and dataloader declaration mnist_dataset = torchvision.datasets.MNIST('./dataset', train=True, transform=transforms.ToTensor(), download=True) dataloader = DataLoader(mnist_dataset, batch_size=batch_size, shuffle=True, num_workers=1) advGAN = AdvGAN_Attack(device, targeted_model, model_num_labels, image_nc, BOX_MIN, BOX_MAX) advGAN.train(dataloader, epochs)
if not os.path.exists(models_path): os.makedirs(models_path) targeted_model = targeted_model.cuda() targeted_model = nn.DataParallel(targeted_model) targeted_model.eval() model_num_labels = 101 cfg = get_opts(data_name) # MNIST train dataset and dataloader declaration if data_name == 'cifar10': trainset = torchvision.datasets.CIFAR10(root=os.path.join(args.dir, 'data/cifar10', train=True, transform=cfg['transform_train'], download=True)) elif data_name == 'imagenette': trainset = ImageFolder(os.path.join(args.dir, 'imagenette2/train'), transform=cfg['transform_train']) elif data_name == 'caltech101': trainset = ImageFolder(os.path.join(args.dir, 'Caltech101/train'), transform=cfg['transform_train']) train_loader = DataLoader( trainset, batch_size=4, shuffle=True, num_workers=4, pin_memory=True ) advGAN = AdvGAN_Attack(device, targeted_model, model_num_labels, image_nc, BOX_MIN, BOX_MAX, models_path) advGAN.train(train_loader, epochs)
if not os.path.exists(models_path): os.makedirs(models_path) if not os.path.exists(models_path + model_name): os.makedirs(models_path + model_name) # Define what device we are using print("CUDA Available: ",torch.cuda.is_available()) device = torch.device("cuda" if ( use_cuda and torch.cuda.is_available()) else "cpu") pretrained_model = "./MNIST_target_model.pth" targeted_model = MNIST_target_net().to(device) targeted_model.load_state_dict( torch.load(pretrained_model, map_location=device)) targeted_model.eval() model_num_labels = 10 # MNIST train dataset and dataloader declaration mnist_dataset = torchvision.datasets.MNIST('./dataset', train=True, transform=transforms.ToTensor(), download=True) dataloader = DataLoader(mnist_dataset, batch_size=batch_size, shuffle=True, num_workers=1) advGAN = AdvGAN_Attack(device, targeted_model, model_num_labels, image_nc, BOX_MIN, BOX_MAX, eps, pgd_iter, models_path, out_path, model_name, writer, args.E_lr, args.defG_lr) advGAN.train(dataloader, args.epochs)
target_model.eval() model_num_labels = 10 normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), normalize, ]) # MNIST train dataset and dataloader declaration # mnist_dataset = torchvision.datasets.MNIST('./dataset', train=True, transform=transforms.ToTensor(), download=True) # transform = transforms.Compose([transforms.ToTensor()]) # dataloader = DataLoader( # datasets.MNIST('./dataset/MNIST', # train=True, download=True, transform=transform), # batch_size=batch_size, shuffle=True) # cifar10 dataloader = DataLoader(datasets.CIFAR10('../cifar-10-batches-py', train=True, download=True, transform=transform), batch_size=batch_size, shuffle=True) advGAN = AdvGAN_Attack(device, target_model, model_num_labels, image_nc, BOX_MIN, BOX_MAX, args) advGAN.train(dataloader, args.epoch)
epochs=EPOCHS_TARGET_MODEL, train_dataloader=train_dataloader, test_dataloader=test_dataloader, dataset_size=test_set_size) print('TARGET LOADED!') # train AdvGAN print('\nTRAINING ADVGAN...') advGAN = AdvGAN_Attack(device, target_model, n_labels, n_channels, target=TARGET, lr=LR, l_inf_bound=l_inf_bound, alpha=ALPHA, beta=BETA, gamma=GAMMA, kappa=KAPPA, c=C, n_steps_D=N_STEPS_D, n_steps_G=N_STEPS_G, is_relativistic=IS_RELATIVISTIC) advGAN.train(train_dataloader, EPOCHS) # load the trained AdvGAN print('\nLOADING TRAINED ADVGAN!') adv_GAN_path = './checkpoints/AdvGAN/G_epoch_{}.pth'.format(EPOCHS) adv_GAN = models.Generator(n_channels, n_channels, TARGET).to(device) adv_GAN.load_state_dict(torch.load(adv_GAN_path)) adv_GAN.eval()
def main(): device = torch.device("cuda:0" if FLAGS.cuda else "cpu") print('Loading data...\n') train_transform, _ = _transforms_catsdogs(FLAGS) train_data = dset.ImageFolder(root=FLAGS.data_dir, transform=train_transform) assert train_data num_train = len(train_data) indices = list(range(num_train)) random.shuffle(indices) split = int(np.floor(FLAGS.data_split * num_train)) train_loader = torch.utils.data.DataLoader( train_data, batch_size=FLAGS.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), num_workers=2) valid_loader = torch.utils.data.DataLoader( train_data, batch_size=FLAGS.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler( indices[split:num_train]), num_workers=2) if FLAGS.train_single: print('Transfer training model {}...\n'.format(FLAGS.model)) model = torch.hub.load('pytorch/vision', FLAGS.model, pretrained=True) for param in model.parameters(): param.requires_grad = False model, param_to_train = transfer_init(model, FLAGS.model, FLAGS.classes) model.to(device) optimizer = torch.optim.SGD(param_to_train, FLAGS.lr, momentum=0.9, weight_decay=5e-4) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1) criterion = nn.CrossEntropyLoss() # Train best_acc = 0.0 for epoch in range(25): model.train() scheduler.step() print('Epoch {}, lr: {}'.format(epoch, scheduler.get_lr()[0])) prefetcher = data_prefetcher(train_loader) data, target = prefetcher.next() batch_idx = 0 while data is not None: optimizer.zero_grad() output = model(data) pred = output.max(1, keepdim=True)[1] loss = criterion(output, target) loss.backward() optimizer.step() correct = pred.eq(target.view_as(pred)).sum().item() if batch_idx % FLAGS.log_interval == 0: print('[{}/{}]\tloss: {:.4f}\tbatch accuracy: {:.4f}%'. format(batch_idx * FLAGS.batch_size, num_train, loss.item(), 100 * correct / data.size(0))) data, target = prefetcher.next() batch_idx += 1 # Eval model.eval() test_loss = 0 test_correct = 0 with torch.no_grad(): valid_prefetcher = data_prefetcher(valid_loader) data, target = valid_prefetcher.next() while data is not None: output = model(data) test_loss += criterion(output, target).item() pred = output.max(1, keepdim=True)[1] test_correct += pred.eq(target.view_as(pred)).sum().item() data, target = valid_prefetcher.next() test_loss /= len(valid_loader) test_correct = 100 * test_correct / (1 - FLAGS.data_split) / num_train print('Eval loss: {:.4f}, accuracy: {:.4f}'.format( test_loss, test_correct)) if (test_correct > best_acc): best_acc = test_correct torch.save( model, os.path.join(FLAGS.model_dir, '{}.pth'.format(FLAGS.model))) print('Best test accuracy for model {}: {:.4f}'.format( FLAGS.model, best_acc)) elif FLAGS.train_ensemble: print('Loading model...\n') model_names = [ 'mobilenet_v2', 'resnet18', 'densenet121', 'googlenet', 'resnext50_32x4d' ] model = ModelEnsemble(model_names, FLAGS.classes, FLAGS.model_dir) model.to(device) optimizer = torch.optim.SGD(model.vote_layer.parameters(), FLAGS.lr, momentum=0.9, weight_decay=5e-4) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1) criterion = nn.CrossEntropyLoss() # Train print('Training ensemble model...\n') # model = torch.load(os.path.join(FLAGS.model_dir, 'ensemble.pth')) for epoch in range(2): model.train() scheduler.step() print('Epoch {}, lr: {}'.format(epoch, scheduler.get_lr()[0])) prefetcher = data_prefetcher(train_loader) data, target = prefetcher.next() batch_idx = 0 while data is not None: optimizer.zero_grad() output = model(data) pred = output.max(1, keepdim=True)[1] loss = criterion(output, target) loss.backward() optimizer.step() correct = pred.eq(target.view_as(pred)).sum().item() if batch_idx % FLAGS.log_interval == 0: print('[{}/{}]\tloss: {:.4f}\tbatch accuracy: {:.4f}%'. format(batch_idx * FLAGS.batch_size, num_train, loss.item(), 100 * correct / data.size(0))) data, target = prefetcher.next() batch_idx += 1 # Eval model.eval() test_loss = 0 test_correct = 0 valid_prefetcher = data_prefetcher(valid_loader) data, target = valid_prefetcher.next() while data is not None: output = model(data) test_loss += criterion(output, target).item() pred = output.max(1, keepdim=True)[1] test_correct += pred.eq(target.view_as(pred)).sum().item() data, target = valid_prefetcher.next() test_loss /= len(valid_loader) print('Eval loss: {:.4f}, accuracy: {:.4f}'.format( test_loss, 100 * test_correct / (1 - FLAGS.data_split) / num_train)) torch.save(model, os.path.join(FLAGS.model_dir, 'ensemble.pth')) # Adversarial attack print('Training GAN for adversarial attack...\n') train_loader = torch.utils.data.DataLoader( train_data, batch_size=16, sampler=torch.utils.data.sampler.SubsetRandomSampler( indices[:split]), num_workers=2) model.eval() advGAN = AdvGAN_Attack(device, model, FLAGS.classes, FLAGS.channels, 0, 1, FLAGS.model_dir) # advGAN.netG = torch.load(os.path.join(FLAGS.model_dir, 'netG_epoch_{}.pth'.format(FLAGS.pretrained_epoch))) advGAN.train(train_loader, FLAGS.epochs) print('Attacking ensemble model...\n') test_loss = 0 test_correct = 0 adv_examples = [] # enough = False with torch.no_grad(): valid_prefetcher = data_prefetcher(valid_loader) data, target = valid_prefetcher.next() while data is not None: # for i in range(64): # adv_ex = data[i].squeeze().detach().cpu().numpy() # adv_examples.append((0, 0, adv_ex)) # break output = model(data) init_pred = output.max(1, keepdim=True)[1] init_pred = init_pred.view_as(target) perturbed_data = advGAN.adv_example(data) output = model(perturbed_data) test_loss += criterion(output, target).item() final_pred = output.max(1, keepdim=True)[1] final_pred = final_pred.view_as(target) test_correct += final_pred.eq(target).sum().item() if len(adv_examples) < 64 and not (final_pred == target).all(): indices = torch.ne(final_pred.ne(target), init_pred.ne(target)).nonzero() for i in range(indices.shape[0]): adv_ex = perturbed_data[ indices[i]].squeeze().detach().cpu().numpy() adv_examples.append( (init_pred[indices[i]].item(), final_pred[indices[i]].item(), adv_ex)) # adv_ex = perturbed_data[indices[i]].squeeze() # adv_examples.append(adv_ex) if (len(adv_examples) >= 64): # enough = True break # if enough: # break data, target = valid_prefetcher.next() test_loss /= len(valid_loader) print('Eval loss: {:.4f}, accuracy: {:.4f}'.format( test_loss, 100 * test_correct / (1 - FLAGS.data_split) / num_train)) # show 64 results if True: cnt = 0 plt.figure(figsize=(8, 10)) for i in range(8): for j in range(8): cnt += 1 plt.subplot(8, 8, cnt) plt.xticks([], []) plt.yticks([], []) orig, adv, ex = adv_examples[i * 8 + j] ex = np.transpose(ex, (1, 2, 0)) plt.title("{} -> {}".format(orig, adv)) plt.imshow(ex) plt.tight_layout() plt.show() else: viz_sample = torch.stack(adv_examples, dim=0) print(viz_sample.shape) vutils.save_image(viz_sample, 'adv_examples.png', nrow=8, normalize=True)
pretrained_model1 = "./MNIST_target_model.pth" pretrained_model2 = "./MNIST_target_modelA.pth" targeted_model1 = MNIST_target_net().to(device) targeted_model1.load_state_dict(torch.load(pretrained_model1)) targeted_model1.eval() targeted_model2 = MNIST_target_netA().to(device) targeted_model2.load_state_dict(torch.load(pretrained_model2)) targeted_model2.eval() model_num_labels = 10 # MNIST train dataset and dataloader declaration mnist_dataset = torchvision.datasets.MNIST('./dataset', train=True, transform=transforms.ToTensor(), download=True) dataloader = DataLoader(mnist_dataset, batch_size=batch_size, shuffle=True, num_workers=1) start_time = time.time() advGAN = AdvGAN_Attack(device, targeted_model1, targeted_model2, model_num_labels, image_nc, BOX_MIN, BOX_MAX) advGAN.train(dataloader, epochs) end_time = time.time() print("advGAN training time is {}".format(end_time - start_time))
device = torch.device("cuda" if ( use_cuda and torch.cuda.is_available()) else "cpu") pretrained_model = "./target_model.pth" target_modeled, input_size = target_net(model_name, num_classes, feature_extract, use_pretrained).initialize_model() targeted_model = target_modeled.to( device) # 除非forward否则精确写出到函数才能to(device) targeted_model.load_state_dict(torch.load(pretrained_model)) targeted_model.eval() # train dataset and dataloader declaration train_data_transform = transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) dataset = torchvision.datasets.CIFAR10('./dataset', train=True, transform=train_data_transform, download=False) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=1) advGAN = AdvGAN_Attack(device, targeted_model, num_classes, BOX_MIN, BOX_MAX) advGAN.train(dataloader, epochs)