def __init__(self, server, device, args, adv_client_idx=0, adv_branch_idx=0): self.server = server server.set_client_dataset() self.device = device self.args = args self.adv_client_idx = adv_client_idx self.adv_branch_idx = adv_branch_idx self.save_dir = osp.join(self.args.save_dir, f"{type(self).__name__}") if not os.path.exists(self.save_dir): os.makedirs(self.save_dir, exist_ok=True) # set adv client model params = self.set_attack_params() ensemble_info = self.prepare_advclient_model() self.prepare_server_model(ensemble_info) # test clean accuracys) # clean_acc = self.check_accuracy( # self.advclient_model, self.server.test_global, self.device # ) # logging.info(f"clean accuracy: {clean_acc * 100:.1f} %") # st() self.attack_fn = LinfPGD(**params) # self.attack_fn = L2CarliniWagnerAttack(**params) self.attack()
# for img, ax in zip(images_arr, axes): # ax.imshow(np.squeeze(img), cmap="gray") # ax.axis("off") # plt.tight_layout() # fig.savefig('beforePGDattack.jpg',bbox_inches='tight', dpi=150) # plt.show() ########################################################### #attacks1=[ #FGSM(), #LinfPGD(), #LinfDeepFoolAttack() #] #apply the PGD attack attack=LinfPGD() epsilons=[0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0] t0=time.process_time() _, advsPGD, success = attack(fmodel, images, labels, epsilons=epsilons) t1=time.process_time() attacktimePGD=t1-t0 # print("done with attack") #print(success) listsuccess=[] for anepval in success: #print(anepval) totalsuccess=0 for abool in anepval: abool1=np.array(abool) #print(abool1)
def main() -> None: # instantiate a model (could also be a TensorFlow or JAX model) model = models.resnet18(pretrained=True).eval() preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3) fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing) # get data and test the model # wrapping the tensors with ep.astensors is optional, but it allows # us to work with EagerPy tensors in the following images, labels = ep.astensors( *samples(fmodel, dataset="imagenet", batchsize=16)) clean_acc = accuracy(fmodel, images, labels) print(f"clean accuracy: {clean_acc * 100:.1f} %") # apply the attack attack = LinfPGD() epsilons = [ 0.0, 0.0002, 0.0005, 0.0008, 0.001, 0.0015, 0.002, 0.003, 0.01, 0.1, 0.3, 0.5, 1.0, ] raw_advs, clipped_advs, success = attack(fmodel, images, labels, epsilons=epsilons) # calculate and report the robust accuracy (the accuracy of the model when # it is attacked) robust_accuracy = 1 - success.float32().mean(axis=-1) print("robust accuracy for perturbations with") for eps, acc in zip(epsilons, robust_accuracy): print(f" Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %") # we can also manually check this # we will use the clipped advs instead of the raw advs, otherwise # we would need to check if the perturbation sizes are actually # within the specified epsilon bound print() print("we can also manually check this:") print() print("robust accuracy for perturbations with") for eps, advs_ in zip(epsilons, clipped_advs): acc2 = accuracy(fmodel, advs_, labels) print(f" Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %") print(" perturbation sizes:") perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2, 3)).numpy() print(" ", str(perturbation_sizes).replace("\n", "\n" + " ")) if acc2 == 0: break
def foolbox_attack(filter=None, filter_preserve='low', free_parm='eps', plot_num=None): # get model. model = get_model() model = nn.DataParallel(model).to(device) model = model.eval() preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3) fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing) if plot_num: free_parm = '' val_loader = get_val_loader(plot_num) else: # Load images. val_loader = get_val_loader(args.attack_batch_size) if 'eps' in free_parm: epsilons = [0.001, 0.003, 0.005, 0.008, 0.01, 0.1] else: epsilons = [0.01] if 'step' in free_parm: steps = [1, 5, 10, 30, 40, 50] else: steps = [args.iteration] for step in steps: # Adversarial attack. if args.attack_type == 'LinfPGD': attack = LinfPGD(steps=step) elif args.attack_type == 'FGSM': attack = FGSM() clean_acc = 0.0 for i, data in enumerate(val_loader, 0): # Samples (attack_batch_size * attack_epochs) images for adversarial attack. if i >= args.attack_epochs: break images, labels = data[0].to(device), data[1].to(device) if step == steps[0]: clean_acc += (get_acc( fmodel, images, labels )) / args.attack_epochs # accumulate for attack epochs. _images, _labels = ep.astensors(images, labels) raw_advs, clipped_advs, success = attack(fmodel, _images, _labels, epsilons=epsilons) if plot_num: grad = torch.from_numpy( raw_advs[0].numpy()).to(device) - images grad = grad.clone().detach_() return grad if filter: robust_accuracy = torch.empty(len(epsilons)) for eps_id in range(len(epsilons)): grad = torch.from_numpy( raw_advs[eps_id].numpy()).to(device) - images grad = grad.clone().detach_() freq = dct.dct_2d(grad) if filter_preserve == 'low': mask = torch.zeros(freq.size()).to(device) mask[:, :, :filter, :filter] = 1 elif filter_preserve == 'high': mask = torch.zeros(freq.size()).to(device) mask[:, :, filter:, filter:] = 1 masked_freq = torch.mul(freq, mask) new_grad = dct.idct_2d(masked_freq) x_adv = torch.clamp(images + new_grad, 0, 1).detach_() robust_accuracy[eps_id] = (get_acc(fmodel, x_adv, labels)) else: robust_accuracy = 1 - success.float32().mean(axis=-1) if i == 0: robust_acc = robust_accuracy / args.attack_epochs else: robust_acc += robust_accuracy / args.attack_epochs if step == steps[0]: print("sample size is : ", args.attack_batch_size * args.attack_epochs) print(f"clean accuracy: {clean_acc * 100:.1f} %") print( f"Model {args.model} robust accuracy for {args.attack_type} perturbations with" ) for eps, acc in zip(epsilons, robust_acc): print( f" Step {step}, Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %" ) print(' -------------------')
def main() -> None: # instantiate a model (could also be a TensorFlow or JAX model) #model = models.resnet18(pretrained=True).eval() #model=torch.load('/data1/zyh/copycat/Framework/cifar_model.pth') model =AlexNet() path = "./cifar_net.pth" #path = '/data1/zyh/copycat/Framework/cifar_model.pth' #model.load_state_dict(torch.load('/data1/zyh/copycat/Framework/cifar_model.pth')) #pretrained_dict = {k: v for k, v in model_pretrained.items() if k in model_dict} #model_dict.update(pretrained_dict) #model.load_state_dict(state_dict) model.load_state_dict(torch.load(path),strict=True) model.eval() print(type(model)) #preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3) preprocessing = dict(mean=[0.5]*3, std=[0.5]*3, axis=-3) fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing) # get data and test the model # wrapping the tensors with ep.astensors is optional, but it allows # us to work with EagerPy tensors in the following #test_dataset = torchvision.datasets.CIFAR10(root='~/.torch/', # train=True, # #transform = transforms.Compose([transforms.Resize((256,256)),transforms.ToTensor()]), # transform = transforms.Compose([transforms.ToTensor()]), # download=True) #test_loader = torch.utils.data.DataLoader(dataset=test_dataset, # batch_size=128, #该参数表示每次读取的批样本个数 # shuffle=False) #该参数表示读取时是否打乱样本顺序 # # 创建迭代器 #data_iter = iter(test_loader) #images, labels = next(data_iter) # 当迭代开始时, 队列和线程开始读取数据 #images, labels = data_iter.next() #images=images.to(device) #labels=labels.to(device) #im=images #images=im.resize(100,3,128,128) images, labels = ep.astensors(*samples(fmodel, dataset="cifar10", batchsize=16)) #images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16)) #print(images.shape) clean_acc = accuracy(fmodel, images, labels) print(f"clean accuracy: {clean_acc * 100:.1f} %") # apply the attack attack = LinfPGD() '''epsilons = [ 0.0, 0.0002, 0.0005, 0.0008, 0.001, 0.0015, 0.002, 0.003, 0.01, 0.1, 0.3, 0.5, 1.0, ]''' epsilons = [ 0.0005, 0.001, 0.002, 0.01, 0.1, ] raw_advs, clipped_advs, success = attack(fmodel, images, labels, epsilons=epsilons) print(type(raw_advs)) print("atest") # calculate and report the robust accuracy (the accuracy of the model when # it is attacked) robust_accuracy = 1 - success.float32().mean(axis=-1) print("robust accuracy for perturbations with") for eps, acc in zip(epsilons, robust_accuracy): print(f" Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %") # we can also manually check this # we will use the clipped advs instead of the raw advs, otherwise # we would need to check if the perturbation sizes are actually # within the specified epsilon bound print() print("we can also manually check this:") print() print("robust accuracy for perturbations with") for eps, advs_ in zip(epsilons, clipped_advs): acc2 = accuracy(fmodel, advs_, labels) print(f" Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %") print(" perturbation sizes:") perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2, 3)).numpy() print(" ", str(perturbation_sizes).replace("\n", "\n" + " ")) if acc2 == 0: break fig = plt.gcf() os.makedirs("./image/",exist_ok=True) for i in range(len(raw_advs)): img_v = raw_advs[i].raw torchvision.utils.save_image(img_v, './image/'+str(i) +'.png')
def main() -> None: # instantiate a model (could also be a TensorFlow or JAX model) #model = models.resnet18(pretrained=True).eval() #model=torch.load('/data1/zyh/copycat/Framework/cifar_model.pth') model = AlexNet() path = "./cifar_net.pth" #path = '/data1/zyh/copycat/Framework/cifar_model.pth' #model.load_state_dict(torch.load('/data1/zyh/copycat/Framework/cifar_model.pth')) #pretrained_dict = {k: v for k, v in model_pretrained.items() if k in model_dict} #model_dict.update(pretrained_dict) #model.load_state_dict(state_dict) model.load_state_dict(torch.load(path), strict=True) model = model.to(device) model.eval() print(type(model)) #preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3) preprocessing = dict(mean=[0.5] * 3, std=[0.5] * 3, axis=-3) fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing) # get data and test the model # wrapping the tensors with ep.astensors is optional, but it allows # us to work with EagerPy tensors in the following test_dataset = torchvision.datasets.CIFAR10( root='~/.torch/', train=False, #transform = transforms.Compose([transforms.Resize((256,256)),transforms.ToTensor()]), transform=transforms.Compose([transforms.ToTensor()]), download=True) # test_dataset .data = test_dataset.data[:128*5] test_loader = torch.utils.data.DataLoader( dataset=test_dataset, batch_size=128, #该参数表示每次读取的批样本个数 shuffle=False) #该参数表示读取时是否打乱样本顺序 # 创建迭代器 #data_iter = iter(test_loader) #images, labels = next(data_iter) # 当迭代开始时, 队列和线程开始读取数据 #images, labels = data_iter.next() #im=images #images=im.resize(100,3,128,128) with torch.no_grad(): all_clean_acc_foolbox = [] ## native predict predict_func(test_loader, model) for ii, (imgs, lbls) in tqdm.tqdm(enumerate(test_loader), total=len(test_loader)): imgs = imgs.to(device) lbls = lbls.to(device) images, labels = ep.astensors(imgs, lbls) ## calc with foolbox pred_lbl_foolbox = fmodel(images) clean_acc_one = accuracy(fmodel, imgs, lbls) all_clean_acc_foolbox.append(clean_acc_one) clean_acc = sum(all_clean_acc_foolbox) / len(all_clean_acc_foolbox) print(f"clean accuracy: {clean_acc * 100:.1f} %") # apply the attack attack = LinfPGD() '''epsilons = [ 0.0, 0.0002, 0.0005, 0.0008, 0.001, 0.0015, 0.002, 0.003, 0.01, 0.1, 0.3, 0.5, 1.0, ]''' epsilons = [ 0.0005, 0.001, 0.002, 0.01, 0.1, ] def attack_one_batch(fmodel, images, labels, iter=0, verbose=True): images, labels = ep.astensors(images, labels) raw_advs, clipped_advs, success = attack(fmodel, images, labels, epsilons=epsilons) if verbose: print("===" * 8, iter, "===" * 8) if verbose: robust_accuracy = 1 - success.float32().mean(axis=-1) print("robust accuracy for perturbations with") for eps, acc in zip(epsilons, robust_accuracy): print(f" Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %") if verbose: fig = plt.gcf() os.makedirs("./image/", exist_ok=True) for i in range(len(raw_advs)): img_v = raw_advs[i].raw torchvision.utils.save_image( img_v, f'./image/{str(iter).zfill(4)}_{str(i).zfill(3)}_.png') return [x.raw for x in raw_advs] # print("====" * 8, "start attack", "====" * 8) collection_adv = [] collection_gt = [] for ii, (imgs, lbls) in tqdm.tqdm(enumerate(test_loader), total=len(test_loader)): imgs = imgs.to(device) lbls = lbls.to(device) # images, labels = ep.astensors(images,labels) adv_ret = attack_one_batch(fmodel=fmodel, images=imgs, labels=lbls, iter=ii, verbose=True) collection_adv.append(torch.stack(adv_ret)) collection_gt.append(lbls.cpu()) print("====" * 8, "start evaluation", "====" * 8) with torch.no_grad(): adv_total_dataset = torch.cat(collection_adv, dim=1) lbl_total_dataset = torch.cat(collection_gt).to(device) # print (adv_total_dataset.mean(dim=(1,2,3,4)),"the mean if each eps") for (eps, ep_adv_dataset) in zip(epsilons, adv_total_dataset): # print ("eps:",eps,"===>"*8) # print (ep_adv_dataset.mean(),"each...") advs_ = ep_adv_dataset.to(device) acc2 = accuracy(fmodel, advs_, lbl_total_dataset) print(f" Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %") dataset = torch.utils.data.TensorDataset(ep_adv_dataset, lbl_total_dataset) dl = torch.utils.data.DataLoader(dataset, batch_size=128) predict_func(dl, model)