def eval(models_config, attacks_config, log_file, cuda_id): model_list = load_model(models_config) attack_list = load_attacks(attacks_config) device = torch.device("cuda:{}".format(cuda_id)) for model_ in model_list: datasetname, modelfile, model = model_[0], model_[1], model_[2] dataset = load_dataset(datasetname) with open(log_file, 'a+') as logf: logf.write('model: {}\n'.format(modelfile)) print('model: {}'.format(modelfile)) model.to(device) model.eval() loader = DataLoader(dataset, batch_size=16, shuffle=True) err, _ = training.epoch(loader, model, device=device, use_tqdm=True) with open(log_file, 'a+') as logf: logf.write('standard acc: {}\n'.format(1 - err)) print('standard acc: {}'.format(1 - err)) for attack_ in attack_list: with open(log_file, 'a+') as logf: logf.write(attack_['name'] + '\n') jstr = json.dumps(attack_['args']) logf.write(jstr + '\n') print(attack_['name']) print(jstr) if attack_['foolbox']: loader = DataLoader(dataset, batch_size=1, shuffle=True) fmodel = foolbox.models.PyTorchModel(model, bounds=(0, 1), num_classes=10, preprocessing=(0, 1), device=device) attack = attack_['method']( fmodel, distance=foolbox.distances.Linfinity) err, _ = attack_foolbox.epoch_foolbox(loader, attack, use_tqdm=True, **attack_['args']) with open(log_file, 'a+') as logf: logf.write('acc: {}\n'.format(1 - err)) print('acc: {}'.format(1 - err)) else: loader = DataLoader(dataset, batch_size=16, shuffle=True) attack = attack_['method'] err, _ = training.epoch_adversarial(loader, model, attack, device=device, use_tqdm=True, **attack_['args']) with open(log_file, 'a+') as logf: logf.write('acc: {}\n'.format(1 - err)) print('acc: {}'.format(1 - err)) model.to('cpu')
def eval(config, cudaid): source_models = load_model(config['source_models']) target_models = load_model(config['target_models']) dataset = load_dataset(config['dataset']) logfilename = config['logfilename'] device = torch.device("cuda:{}".format(cuda_id)) if config['dataset'] == 'MNIST': eps = 0.3 alpha = 0.05 elif config['dataset'] == 'CIFAR': eps = 0.031 alpha = 0.007 elif config['dataset'] == 'SVHN': eps = 0.047 alpha = 0.01 else: raise NotImplementedError test_loader = DataLoader(dataset, batch_size = 25, shuffle=True) for (tname ,target) in target_models: target.to(device) target.eval() err, loss = training.epoch(test_loader, target, device=device, use_tqdm=True) print('target model: {}'.format(tname)) print('std acc: {}'.format(1-err)) with open(logfilename, 'a+') as logf: logf.write('target model: {}\n'.format(tname)) logf.write('std acc: {}\n'.format(1-err)) for (sname, source) in source_models: source.to(device) source.eval() source_err, err1, err2 = training.epoch_transfer_attack(test_loader, source, target, attack=atk.pgd_linf_untargeted, device=device, n_test=5000, use_tqdm=True, epsilon=eps,alpha=alpha,num_iter=20,randomize=True) print('source model: {}'.format(sname)) print('adv acc', 1-err1) with open(logfilename, 'a+') as logf: logf.write('source model: {}'.format(sname)) logf.write('adv acc: {}\n'.format(1-err1)) source.to('cpu') target.to('cpu')
download=True, transform=transform_test) test_loader = DataLoader(cifar_test, batch_size=400, shuffle=True) ## natural print("------------nature-----------") device = torch.device('cuda:0') model = resnet.ResNet18().to(device) print("model loading --------Relu") model.load_state_dict(torch.load('models/resnet18_cifar_80epochs.pth')) eps = 0.031 model.eval() test_err, test_loss = training.epoch(test_loader, model, device=device, use_tqdm=True) print("test", test_err) adv_err, adv_loss = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted, device=device, num_iter=20, use_tqdm=True, epsilon=eps, randomize=True, alpha=0.003) print("PGD:", adv_err) adv_err, adv_loss = training.epoch_adversarial(test_loader,
def train(config, cuda_id): mnist_train = datasets.MNIST("./data", train=True, download=True, transform=transforms.ToTensor()) mnist_test = datasets.MNIST("./data", train=False, download=True, transform=transforms.ToTensor()) train_loader = DataLoader(mnist_train, batch_size=config['train_batch_size'], shuffle=True) test_loader = DataLoader(mnist_test, batch_size=config['test_batch_size'], shuffle=True) eps = config['eps'] alpha = config['alpha'] device = torch.device('cuda:{}'.format(cuda_id)) name = config['model']['name'] if name == 'DNN': model = mnist_model.DNN(hidden_size=config['model']['hidden_size']) elif name == 'spDNN': model = mnist_model.SparseDNN( hidden_size=config['model']['hidden_size'], sp=config['model']['sp'], bias=True) elif name == 'CNN': model = mnist_model.MNIST_CNN( num_channels=config['model']['channels'], hidden_size=config['model']['hidden_size']) elif name == 'spCNN': model = mnist_model.SparseMNIST_CNN( sp1=config['model']['sp1'], sp2=config['model']['sp2'], func='vol', num_channels=config['model']['channels'], hidden_size=config['model']['hidden_size']) else: raise ValueError model.to(device) opt = optim.SGD(model.parameters(), lr=config['lr'], momentum=config['momentum']) logfilename = config['logfilename'] with open(logfilename, 'a+') as logf: jstr = json.dumps(config) logf.write(jstr + '\n') starttime = time.time() for i in range(config['epoch']): if config['adv_train']: train_err, train_loss = training.epoch_adversarial( train_loader, model, attack=attack.pgd_linf_untargeted_mostlikely, device=device, opt=opt, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha) else: train_err, train_loss = training.epoch(train_loader, model, opt, device=device, use_tqdm=False) test_err, test_loss = training.epoch(test_loader, model, device=device, use_tqdm=False) adv_err1, adv_loss1 = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) adv_err2, adv_loss2 = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted2, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) adv_err_ml, adv_loss_ml = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted_mostlikely, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) print('epoch: {}'.format(i)) print('train err: {}, test err: {}, adv1 err: {}, adv2 err: {}'.format( train_err, test_err, adv_err1, adv_err2)) print( 'train err: {}, test err: {}, adv1 err: {}, adv2 err: {}, adv_ml err: {}' .format(train_err, test_err, adv_err1, adv_err2, adv_err_ml)) time_e = (time.time() - starttime) / 60 time_r = (config['epoch'] - (i + 1)) * time_e / (i + 1) print('time elapse: {}, time remaining:{}'.format(time_e, time_r)) with open(logfilename, "a+") as logf: logf.write('epoch: {}\n'.format(i)) logf.write( 'train err: {}, test err: {}, adv1 err: {}, adv2 err: {}, adv_ml err: {}, time_e:{}min\n' .format(train_err, test_err, adv_err1, adv_err2, adv_err_ml, time_e)) torch.save(model.state_dict(), config["savename"]) if 'finetune' in config: activation_list = activation.append_activation_list(model, 1000) opt = optim.SGD(model.parameters(), lr=config['finetune']['lr'], momentum=config['finetune']['momentum']) sp = config['model']['sp1'] for i in range(config['finetune']['epoch']): sp = sp - config['finetune']['sp_step'] for l in activation_list: l.sr = sp if config['adv_train']: train_err, train_loss = training.epoch_adversarial( train_loader, model, attack=attack.pgd_linf_untargeted_mostlikely, device=device, opt=opt, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha) else: train_err, train_loss = training.epoch(train_loader, model, opt, device=device, use_tqdm=False) test_err, test_loss = training.epoch(test_loader, model, device=device, use_tqdm=False) adv_err1, adv_loss1 = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) adv_err2, adv_loss2 = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted2, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) adv_err_ml, adv_loss_ml = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted_mostlikely, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) print('epoch: {}'.format(i)) print('current sp: {}'.format(sp)) print( 'train err: {}, test err: {}, adv1 err: {}, adv2 err: {}, adv_ml err: {}' .format(train_err, test_err, adv_err1, adv_err2, adv_err_ml)) time_e = (time.time() - starttime) / 60 time_r = (config['finetune']['epoch'] - (i + 1)) * time_e / (i + 1) print('time elapse: {}, time remaining:{}'.format(time_e, time_r)) with open(logfilename, "a+") as logf: logf.write('epoch: {}\n'.format(i)) logf.write('current sp: {}'.format(sp)) logf.write( 'train err: {}, test err: {}, adv1 err: {}, adv2 err: {}, adv_ml err: {}, time_e:{}min\n' .format(train_err, test_err, adv_err1, adv_err2, adv_err_ml, time_e)) torch.save(model.state_dict(), config["finetune"]["savepath"] + "_sp{}.pth".format(sp))
def train(config, cuda_id): norm_mean = 0 norm_var = 1 transform_train = transforms.Compose([ # transforms.RandomCrop(32, padding=4), # transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((norm_mean, norm_mean, norm_mean), (norm_var, norm_var, norm_var)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((norm_mean, norm_mean, norm_mean), (norm_var, norm_var, norm_var)), ]) svhn_train = datasets.SVHN("./data", split='train', download=True, transform=transform_train) svhn_test = datasets.SVHN("./data", split='test', download=True, transform=transform_test) train_loader = DataLoader(svhn_train, batch_size=config['train_batch_size'], shuffle=True) test_loader = DataLoader(svhn_test, batch_size=config['test_batch_size'], shuffle=True) eps = config['eps'] alpha = config['alpha'] device = torch.device('cuda:{}'.format(cuda_id)) model = load_model(config['model']) model.to(device) opt = optim.SGD(model.parameters(), lr=config['lr'], momentum=config['momentum']) logfilename = config['logfilename'] with open(logfilename, 'a+') as logf: jstr = json.dumps(config) logf.write(jstr + '\n') starttime = time.time() if 'finetune' in config: activation_list = activation.append_activation_list(model, 10000) sp = config['finetune']['init_sp'] for i in range(config['epoch']): if 'finetune' in config: if i >= config['finetune']['start_epoch']: if i % config['finetune']['adjust_epoch'] == 0: sp = sp - config['finetune']['sp_step'] sp = round(sp, 5) for l in activation_list: l.sr = sp if i == config['epoch1']: for param_group in opt.param_groups: param_group['lr'] = config['epoch1_lr'] if i == config['epoch2']: for param_group in opt.param_groups: param_group['lr'] = config['epoch2_lr'] if 'adv_train' in config: if config['adv_train']['attack'] == 'untarg1': train_err, train_loss = training.epoch_adversarial( train_loader, model, attack=attack.pgd_linf_untargeted, device=device, opt=opt, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha) elif config['adv_train']['attack'] == 'untarg2': train_err, train_loss = training.epoch_adversarial( train_loader, model, attack=attack.pgd_linf_untargeted2, device=device, opt=opt, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha) elif config['adv_train']['attack'] == 'ml': train_err, train_loss = training.epoch_adversarial( train_loader, model, attack=attack.pgd_linf_untargeted_mostlikely, device=device, opt=opt, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha) elif config['adv_train']['attack'] == 'trade': train_err, train_loss = training.epoch_trade(train_loader, model, opt=opt, device=device, step_size=alpha, epsilon=eps, perturb_steps=10, beta=6) else: raise NotImplementedError else: train_err, train_loss = training.epoch(train_loader, model, opt, device=device, use_tqdm=False) test_err, test_loss = training.epoch(test_loader, model, device=device, use_tqdm=False) adv_errs = [] if 'untarg1' in config['test_attack']: adv_err, adv_loss = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) adv_errs.append(adv_err) if 'untarg2' in config['test_attack']: adv_err, adv_loss = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted2, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) adv_errs.append(adv_err) if 'ml' in config['test_attack']: adv_err, adv_loss = training.epoch_adversarial( test_loader, model, attack=attack.pgd_linf_untargeted2, device=device, num_iter=20, use_tqdm=False, epsilon=eps, randomize=True, alpha=alpha, n_test=config['n_test_adv']) adv_errs.append(adv_err) print('epoch: {}'.format(i)) print('train err: {:.5f}, test err: {:.5f}'.format( train_err, test_err)) for adv_err in adv_errs: print('adv err: {:.5f}'.format(adv_err)) time_e = (time.time() - starttime) / 60 time_r = (config['epoch'] - (i + 1)) * time_e / (i + 1) print('time elapse: {:.5f} min, time remaining:{:.5f} min'.format( time_e, time_r)) with open(logfilename, "a+") as logf: logf.write('epoch: {}\n'.format(i)) logf.write('train err: {:.5f}, test err: {:.5f}\n'.format( train_err, test_err)) for adv_err in adv_errs: logf.write('adv err: {:.5f}\n'.format(adv_err)) logf.write('time elapse: {:.5f} min'.format(time_e)) torch.save(model.state_dict(), config["savename"]) if 'finetune' in config: print('current sp: {}'.format(sp)) with open(logfilename, "a+") as logf: logf.write('current sp: {}\n'.format(sp)) torch.save(model.state_dict(), config["finetune"]["savepath"] + "_sp{}.pth".format(sp))
num_iter=args.iters, epsilon=eps, randomize=True, alpha=2 / 255) elif args.which_AT == 'trades': train_err, train_loss = training.epoch_trade(train_loader, model, opt=opt, device=device, num_iter=args.iters, epsilon=eps, alpha=2 / 255, beta=6.0) elif args.which_AT == 'nat': train_err, train_loss = training.epoch(train_loader, model, opt, device=device) else: raise print('AT name ERROR!') else: model.load_state_dict( torch.load('models/resnet18_cifar' + name + '.pth', map_location=device)) train_err, train_loss = training.epoch(train_loader, model, device=device) times = str(datetime.timedelta(seconds=time.time() - start_time)) print('Tr time:', times) start_time = time.time()