self.wrap = wrap self.subbatch = subbatch def forward(self, x): subbatches = torch.split(x, self.subbatch) return torch.cat([self.wrap(sb) for sb in subbatches]) if __name__ == '__main__': args = parse_args() print_attack_main_args(args) device = torch.device(args.device) if ( args.model_name.startswith('model') ): model = get_model_for_attack(args.model_name).to(device) # 根据model_name, 切换要攻击的model elif args.model_name != '': model = get_model_for_defense(args.model_name).to(device) else: model = get_custom_model(args.model, args.model_path).to(device) # 攻击任务:Change to your attack function here # Here is a attack baseline: PGD attack # model = nn.DataParallel(model, device_ids=[0, 1]) model = WrappedModel(model, args.subbatch_size) attack = get_attacker( args.attacker, args.step_size, args.epsilon, args.perturb_steps ) model.eval() if args.dataset == 'cifar10':
parser.add_argument('--perturb_steps', type=int, default=20, help='iterations for pgd attack (default pgd20)') parser.add_argument('--model_name', type=str, default="") parser.add_argument('--model_path', type=str, default="./models/weights/model-wideres-pgdHE-wide10.pt") parser.add_argument('--gpu_id', type=str, default="0") return parser.parse_args() if __name__=='__main__': args = parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id #多卡机设置使用的gpu卡号 gpu_num = max(len(args.gpu_id.split(',')), 1) device = torch.device('cuda') if args.model_name!="": model = get_model_for_attack(args.model_name).to(device) # 根据model_name, 切换要攻击的model model = nn.DataParallel(model, device_ids=[i for i in range(gpu_num)]) else: # 防御任务, Change to your model here model = WideResNet() model.load_state_dict(torch.load('models/weights/wideres34-10-pgdHE.pt')) model = nn.DataParallel(model, device_ids=[i for i in range(gpu_num)]) #攻击任务:Change to your attack function here #Here is a attack baseline: PGD attack attack = PGDAttack(args.step_size, args.epsilon, args.perturb_steps) model.eval() test_loader = get_test_cifar(args.batch_size) natural_acc, robust_acc, distance = eval_model_with_attack(model, test_loader, attack, args.epsilon, device) print(f"Natural Acc: {natural_acc:.5f}, Robust acc: {robust_acc:.5f}, distance:{distance:.5f}")
def run_model(model, test_loader, device, eps): model.eval() abstract_model = None acc = [] with trange(10000) as progbar, torch.no_grad(): for x, y in test_loader: x, y = x.to(device), y.to(device) if abstract_model is None: abstract_model = process_model(model, x) abstract_input = diffai.ai.HybridZonotope( x, torch.ones_like(x) * 1e-7, None ).checkSizes() abstract_logits = abstract_model(None, abstract_input) print(abstract_logits.head.argmax(-1) == y) acc.append(abstract_logits.isSafe(y).float().mean().item()) progbar.set_description("Provable Acc: %.5f" % numpy.mean(acc)) progbar.update(x.shape[0]) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int, default=40) parser.add_argument('--model_name', type=str, default='model6') parser.add_argument('--device', type=str, default='cuda:0') parser.add_argument('--epsilon', type=float, default=8/255) ns = parser.parse_args() model = get_model_for_attack(ns.model_name).to(ns.device) test_loader = get_test_cifar(ns.batch_size) run_model(model, test_loader, ns.device, ns.epsilon)
plotlib.style.use('seaborn') plotlib.rcParams['ps.useafm'] = True fsize = 24 tsize = 28 parameters = { 'axes.labelsize': tsize, 'axes.titlesize': tsize, 'xtick.labelsize': fsize, 'ytick.labelsize': fsize, 'legend.fontsize': fsize } plotlib.rcParams.update(parameters) test_loader = get_test_cifar(1) model = get_model_for_attack('model2') '''model = vgg.vgg13_bn() ensemble = torch.load('vgg13bn_regm2.dat', map_location=torch.device('cpu')) model.load_state_dict(ensemble)''' model.eval() for x, y in test_loader: def func(cx): logits = model(cx) # labels = torch.eye(10).to(y.device)[y] return torch.nn.functional.cross_entropy(logits, y) def safe_jac(cx): jac = torch.autograd.functional.jacobian(func, cx) jac = torch.randn_like(jac) * 1e-7 + jac return jac