def get_noised_result(adv_imgs, ori_imgs, perturbation_ratio=0.25, noise_level=10): # imgs_test = np.load('save_for_load/imgs_test.npy') # target_img_mat = np.load('save_for_load/target_imgs.npy') random_noise = np.random.randint(-noise_level, noise_level + 1, adv_imgs.shape).astype(float) adv_imgs_noised = np.stack( [(adv_imgs[i] - ori_imgs[i]) * perturbation_ratio + ori_imgs[i] for i in range(i_max)]) + random_noise / 255 # adv_imgs_noised = adv_imgs X = Variable(torch.Tensor(adv_imgs_noised)).cuda() noised_img_num_result = get_img_num_by_class_from_img_batch(X, model1, code, multi_label, threshold=5, batch_size=16) label_targeted = np.zeros([i_max, j_max]) for i in range(i_max): j_index_set = j_index_matrix[int( test_true_label_y[i_index_set[i]])].astype(int) label_targeted_i = np.array( [multi_label[j_index_set[j]] for j in range(j_max)]) label_targeted[i] = label_targeted_i # retrieval_result is a i_max*j_max matrix, which contains the number of targeted imgs of each input images. noised_adv_white_retrieval_result = get_targeted_from_all_class( noised_img_num_result, label_targeted) return noised_adv_white_retrieval_result
def get_noised_result(model1, adv_imgs, ori_imgs, perturbation_ratio=0.25, noise_level=10, is_orthogonal=False, noise_distribution='uniform'): # directly copied from myExpGetAdvVulnerable.py # imgs_test = np.load('save_for_load/imgs_test.npy') # target_img_mat = np.load('save_for_load/target_imgs.npy') if not is_orthogonal: if noise_distribution == 'uniform': random_noise = np.random.randint(-noise_level, noise_level + 1, adv_imgs.shape).astype(float) elif noise_distribution == 'Gaussian': # using 3-pi to define the max value of the noise random_noise = np.random.normal(0, noise_level / 3, size=adv_imgs.shape).astype(float) random_noise = np.clip(random_noise, -noise_level, noise_level) else: if noise_level == 0: random_noise = np.random.randint(-noise_level, noise_level + 1, adv_imgs.shape).astype(float) # Get the orthogonal projection of random noise and amplify it to designated number else: random_noise = get_random_noise_orthogonal(adv_imgs, ori_imgs, noise_distribution) random_noise = np.clip(random_noise, -noise_level * 3, noise_level * 3) random_noise /= random_noise.max() random_noise = np.clip(random_noise, -random_noise.max(), random_noise.max()) random_noise *= noise_level print("Random Noise Real Range:[%f, %f]" % (random_noise.min(), random_noise.max())) adv_imgs_noised = np.stack( [(adv_imgs[i] - ori_imgs[i]) * perturbation_ratio + ori_imgs[i] for i in range(i_max)]) + random_noise / 255 # adv_imgs_noised = adv_imgs X = Variable(torch.Tensor(adv_imgs_noised)).cuda() noised_img_num_result = get_img_num_by_class_from_img_batch(X, model1, code, multi_label, threshold=5, batch_size=16) label_targeted = np.zeros([i_max, j_max]) for i in range(i_max): j_index_set = j_index_matrix[int( test_true_label_y[i_index_set[i]])].astype(int) label_targeted_i = np.array( [multi_label[j_index_set[j]] for j in range(j_max)]) label_targeted[i] = label_targeted_i # retrieval_result is a i_max*j_max matrix, which contains the number of targeted imgs of each input images. noised_adv_white_retrieval_result = get_targeted_from_all_class( noised_img_num_result, label_targeted) return noised_adv_white_retrieval_result
def main_func(): from publicFunctions import load_net_inputs, load_net_params, load_dset_params npy_name = '/%s_imgs_step%1.1f_linf%d_%dx%d_%s.npy' % (adv_method, step, linf, i_max, j_max, dis_method) npy_path = 'save_for_load/' + net1 + npy_name path_white_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy'%(net1) path_black_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy'%(net2) dset_test, dset_database = load_dset_params(job_dataset) model1, snapshot_path, query_path, database_path = load_net_params(net1) tmp = np.load(database_path) _, code, multi_label = tmp['arr_0'], tmp['arr_1'], tmp['arr_2'] test_dis_white, test_dis_black = get_test_dis(path_white_test_dis_npy, path_black_test_dis_npy) test_true_id_x, test_true_label_y = choose_index_by_dis_method(dis_method, test_dis_white, test_dis_black, max_dis=18, min_dis=12) id_size = test_true_id_x.shape[0] print('id size:',id_size) i_index_set = np.arange(0, id_size, id_size / (i_max))[:i_max] inputs_ori_tensor = torch.stack([dset_test[test_true_id_x[i_index_set[i]]][0] for i in range(i_max)]) j_index_matrix = get_unique_index(code, multi_label, j_max) adv_imgs = np.load(npy_path) ori_imgs = inputs_ori_tensor.cpu().numpy() # imgs_test = np.load('save_for_load/imgs_test.npy') # target_img_mat = np.load('save_for_load/target_imgs.npy') target_img_mat = get_target_imgs(j_index_matrix, test_true_label_y, i_index_set, dset_database) from myRetrieval import get_img_num_by_class_from_img_batch, get_targeted_from_all_class inputs_targets = Variable(torch.Tensor(target_img_mat).cuda(), requires_grad=True) img_num_by_class_target = get_img_num_by_class_from_img_batch(inputs_targets, model1, code, multi_label, threshold=5, batch_size=16) target_targetedNum_mat = np.zeros([i_max, j_max]) for i in range(i_max): j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) label_targeted = np.array([multi_label[j_index_set[j]] for j in range(j_max)]) img_num_target_targeted = get_targeted_from_all_class(img_num_by_class_target[i], label_targeted) target_targetedNum_mat[i] = img_num_target_targeted target_targeted_retrieval_num_path = './save_for_load/%s/target_targetedRetrievalNum_%s_%s.npy' % (net1, adv_method, dis_method) np.save(target_targeted_retrieval_num_path, target_targetedNum_mat) ''' for i in range(i_max): for j in range(j_max): j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) target_img = target_img_mat[i, j] #target_result = get_target_retrival_result(model1, ) i, j = 3, 9 ori_img = ori_imgs[i] adv_img = adv_imgs[i,j] j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) label_targeted = np.array([database_label[j_index_set[j]] for j in range(j_max)]) target_label = label_targeted[j] perturbation_ratio_bound = estimate_subspace_size(adv_img, ori_img, model1, target_label, code, database_label) ''' return
def func_single_RRN(model, adv_img, source_img_index, radius_candidates_array, candidates_size=8, N=10): # NOTE: This file is not debugged. To be debugged when the computational resource is available # N is the minimal numbers of retrieval results to make a query to be considered as an adv # N = 10 # N is pre-set radius_array_size = len(radius_candidates_array) label_targeted = np.zeros([i_max, j_max]) for i in range(i_max): j_index_set = j_index_matrix[int( test_true_label_y[i_index_set[i]])].astype(int) label_targeted_i = np.array( [multi_label[j_index_set[j]] for j in range(j_max)]) label_targeted[i] = label_targeted_i for i_radius in range(radius_array_size): noise_level = radius_candidates_array[i_radius] random_noise = np.random.randint( -noise_level, noise_level + 1, np.concatenate((candidates_size, adv_img.shape), axis=None)).astype(float) adv_imgs_noised = adv_img + random_noise / 255 X = Variable(torch.Tensor(adv_imgs_noised)).cuda() noised_img_num_result = get_img_num_by_class_from_img_batch( X, model, code, multi_label, threshold=5, batch_size=16) # bugs over the following code label_noised = np.zeros([candidates_size]) label_noised = label_targeted[source_img_index][:candidates_size] noised_adv_white_retrieval_result = get_targeted_from_all_class( noised_img_num_result, label_noised) index_noised_gtN = noised_adv_white_retrieval_result > N print("Pass Number SUM:", index_noised_gtN.sum()) if index_noised_gtN.sum() == candidates_size: print("Stop at:", radius_candidates_array[i_radius]) return radius_candidates_array[i_radius] else: print("Continue at:", radius_candidates_array[i_radius]) if i_radius == radius_array_size: return 0 # default radius
def func_eval_adv_imgs(adv_imgs, model, code, test_true_label_y): # use it to evaluate the adv_imgs inputs_adv = Variable(torch.Tensor(adv_imgs).cuda()) better_img_num_result = get_img_num_by_class_from_img_batch(inputs_adv, model, code, multi_label2, threshold=threshold, batch_size=8) label_targeted = np.zeros([i_max, j_max]) for i in range(i_max): j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) label_targeted_i = np.array([multi_label2[j_index_set[j]] for j in range(j_max)]) label_targeted[i] = label_targeted_i better_adv_black_retrieval_result = get_targeted_from_all_class(better_img_num_result, label_targeted) return better_adv_black_retrieval_result
def get_adv_black_retrieval_result(net1, net2, adv_method, step, linf, i_max, j_max, dis_method, job_dataset='', threshold=5, batch_size=8, allowLoad=True): # save/load and return the black box retrieval result for specific adv_imgs # the adv_imgs is loaded in this function path_blackTargetedNum_folder = 'save_for_load/distanceADVRetrieval/%s'%(adv_method) path_blackTargetedNum = path_blackTargetedNum_folder + '/targetedNum_white_%s_black_%s_step%1.1f_linf%d_%s.npy' % ( net1, net2, step, linf, dis_method) if not os.path.exists(path_blackTargetedNum_folder): os.makedirs(path_blackTargetedNum_folder) if os.path.exists(path_blackTargetedNum) and allowLoad: adv_black_retrieval_result = np.load(path_blackTargetedNum) print('load path_blackTargetedNum in:', path_blackTargetedNum) return adv_black_retrieval_result adv_black_retrieval_result = np.zeros([i_max, j_max]) npy_name = '/%s_imgs_step%1.1f_linf%d_%dx%d_%s.npy' % (adv_method, step, linf, i_max, j_max, dis_method) npy_path = 'save_for_load/' + net1 + npy_name ''' path_white_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy' % (net1) path_black_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy' % (net2) dset_test, dset_database = load_dset_params(job_dataset) model1, snapshot_path, query_path, database_path = load_net_params(net1) model2, snapshot_path2, query_path2, database_path2 = load_net_params(net2) tmp = np.load(database_path) _, code, multi_label = tmp['arr_0'], tmp['arr_1'], tmp['arr_2'] tmp2 = np.load(database_path2) _, code2, multi_label2 = tmp2['arr_0'], tmp2['arr_1'], tmp2['arr_2'] test_dis_white, test_dis_black = get_test_dis(path_white_test_dis_npy, path_black_test_dis_npy) test_true_id_x, test_true_label_y = choose_index_by_dis_method(dis_method, test_dis_white, test_dis_black, max_dis=18, min_dis=12) id_size = test_true_id_x.shape[0] print('id size:', id_size) i_index_set = np.arange(0, id_size, id_size / (i_max))[:i_max] inputs_ori_tensor = torch.stack([dset_test[test_true_id_x[i_index_set[i]]][0] for i in range(i_max)]) j_index_matrix = get_unique_index(code, multi_label, j_max) ''' hash_bit = 48 from publicFunctions import NetworkSettings from myExpForPapers_nag import EXPSettings network_settings1 = NetworkSettings(job_dataset, hash_bit, net1, snapshot_iter=iters_list[net1], batch_size=16) network_settings2 = NetworkSettings(job_dataset, hash_bit, net2, snapshot_iter=iters_list[net2], batch_size=16) exp_settings = EXPSettings(net1, net2, dis_method, i_max, j_max, step=step, linf=linf) model2 = network_settings2.get_model() _, code, multi_label = network_settings1.get_out_code_label(part='database') _, code_test, multi_label_test = network_settings1.get_out_code_label(part='test') _, code2, multi_label2 = network_settings2.get_out_code_label(part='database') _, code_test2, _ = network_settings2.get_out_code_label(part='test') dset_loaders = network_settings1.get_dset_loaders() i_index_set, j_index_matrix = exp_settings.cal_index_set_matrix_white(code_test, code, multi_label) test_true_label_y = exp_settings.test_true_label_y dset_database = dset_loaders['database'].dataset print('load adv_imgs from:', npy_path) adv_imgs = np.load(npy_path) inputs_adv = Variable(torch.Tensor(adv_imgs).cuda()) black_img_num_result = get_img_num_by_class_from_img_batch(inputs_adv, model2, code2, multi_label2, threshold=threshold, batch_size=batch_size) label_targeted = np.zeros([i_max, j_max]) ''' for i in range(i_max): #j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) #label_targeted = np.array([multi_label[j_index_set[j]] for j in range(j_max)]) j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) label_targeted_i = np.array([multi_label[j_index_set[j]] for j in range(j_max)]) label_targeted[i] = label_targeted_i img_num_black_targeted = get_targeted_from_all_class(black_img_num_result[i], label_targeted_i) #print(i, label_targeted_i) adv_black_retrieval_result[i] = img_num_black_targeted #adv_black_retrieval_result = get_targeted_from_all_class(black_img_num_result, label_targeted) ''' for i in range(i_max): j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) label_targeted_i = np.array([multi_label[j_index_set[j]] for j in range(j_max)]) label_targeted[i] = label_targeted_i adv_black_retrieval_result = get_targeted_from_all_class(black_img_num_result, label_targeted) np.save(path_blackTargetedNum, adv_black_retrieval_result) print('save blackTargetedNum file to:', path_blackTargetedNum) return adv_black_retrieval_result
def get_target_targetedRetrievalNum(net1, net2, adv_method, step, linf, i_max, j_max, dis_method, job_dataset='', allowLoad=True): # returns the targeted retrieval number of original targets imgs. # The result has no relation with the adv method target_targeted_retrieval_num_folder_path = './save_for_load/%s/'%(net1) target_targeted_retrieval_num_path = target_targeted_retrieval_num_folder_path+'/target_targetedRetrievalNum_%s.npy' % ( dis_method) if not os.path.exists(target_targeted_retrieval_num_folder_path): os.makedirs(target_targeted_retrieval_num_folder_path) if os.path.exists(target_targeted_retrieval_num_path) and allowLoad: target_targetedNum_mat = np.load(target_targeted_retrieval_num_path) print('load target_targeted_retrieval_num_path in:', target_targeted_retrieval_num_path) return target_targetedNum_mat else: #npy_name = '/%s_imgs_step%1.1f_linf%d_%dx%d_%s.npy' % (adv_method, step, linf, i_max, j_max, dis_method) #npy_path = 'save_for_load/' + net1 + npy_name ''' path_white_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy'%(net1) path_black_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy'%(net2) dset_test, dset_database = load_dset_params(job_dataset) model1, snapshot_path, query_path, database_path = load_net_params(net1) tmp = np.load(database_path) _, code, multi_label = tmp['arr_0'], tmp['arr_1'], tmp['arr_2'] test_dis_white, test_dis_black = get_test_dis(path_white_test_dis_npy, path_black_test_dis_npy) test_true_id_x, test_true_label_y = choose_index_by_dis_method(dis_method, test_dis_white, test_dis_black, max_dis=18, min_dis=12) id_size = test_true_id_x.shape[0] print('id size:',id_size) i_index_set = np.arange(0, id_size, id_size / (i_max))[:i_max] #inputs_ori_tensor = torch.stack([dset_test[test_true_id_x[i_index_set[i]]][0] for i in range(i_max)]) j_index_matrix = get_unique_index(code, multi_label, j_max) ''' hash_bit = 48 from publicFunctions import NetworkSettings from myExpForPapers_nag import EXPSettings network_settings1 = NetworkSettings(job_dataset, hash_bit, net1, snapshot_iter=iters_list[net1], batch_size=16) network_settings2 = NetworkSettings(job_dataset, hash_bit, net2, snapshot_iter=iters_list[net2], batch_size=16) exp_settings = EXPSettings(net1, net2, dis_method, i_max, j_max, step=step, linf=linf) model1 = network_settings1.get_model() _, code, multi_label = network_settings1.get_out_code_label(part='database') _, code_test, multi_label_test = network_settings1.get_out_code_label(part='test') _, code2, multi_label2 = network_settings2.get_out_code_label(part='database') _, code_test2, _ = network_settings2.get_out_code_label(part='test') dset_loaders = network_settings1.get_dset_loaders() i_index_set, j_index_matrix = exp_settings.cal_index_set_matrix_white(code_test, code, multi_label) test_true_label_y = exp_settings.test_true_label_y dset_database = dset_loaders['database'].dataset target_img_mat = get_target_imgs(j_index_matrix, test_true_label_y, i_index_set, dset_database) from myRetrieval import get_img_num_by_class_from_img_batch, get_targeted_from_all_class inputs_targets = Variable(torch.Tensor(target_img_mat).cuda(), requires_grad=True) img_num_by_class_target = get_img_num_by_class_from_img_batch(inputs_targets, model1, code, multi_label, threshold=5, batch_size=16) target_targetedNum_mat = np.zeros([i_max, j_max]) for i in range(i_max): j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) label_targeted = np.array([multi_label[j_index_set[j]] for j in range(j_max)]) img_num_target_targeted = get_targeted_from_all_class(img_num_by_class_target[i], label_targeted) #print(i, label_targeted) target_targetedNum_mat[i] = img_num_target_targeted np.save(target_targeted_retrieval_num_path, target_targetedNum_mat) print('save blackTargetedNum(target_targetedNum_mat) to: %s'%(target_targeted_retrieval_num_path)) return target_targetedNum_mat
np.save(path_ori_img, ori_img) inputs_test = torch.tensor(ori_img).cuda().float() from myRetrieval import get_img_num_by_class_from_img_batch, get_targeted_from_all_class wrapModel_black = WrapSimpleRetriSysClassifierThreshold( hash_retri_sys, aux_labels=aux_labels, threshold=retrieval_threshold) ori_labels = wrapModel_black(inputs_test).argmax(-1) labels_target = ori_labels.cpu().numpy() if not targeted else ( ori_labels.cpu().numpy() + 1) % 100 black_img_num_result = get_img_num_by_class_from_img_batch(inputs_test, model, code, multi_label, threshold=5, batch_size=32) adv_black_retrieval_result = get_targeted_from_all_class( black_img_num_result, labels_target).astype(int) index_valid = adv_black_retrieval_result >= 10 if not targeted else adv_black_retrieval_result < 10 #n_queries, x_adv = attackFlow.square_attack_preset_args(inputs_test[index_valid], labels_target=labels_target[index_valid], niters=200, eps=0.031, p_init=0.031, targeted=targeted) path_x_adv = tmp_exp_path + 'x_adv_%s_%f_targeted_%s.npy' % ( net, pert_level, str(targeted)) if os.path.exists(path_x_adv): x_adv = np.load(path_x_adv) else: n_queries, x_adv = attackFlow.square_attack_preset_args( inputs_test[index_valid], labels_target=labels_target[index_valid], niters=200, eps=pert_level, p_init=pert_level, targeted=targeted,
index_close_white_class = np.argmin(query_avg_dis_white, axis=1) # also the label query_avg_dis_white_closest = np.array([ query_avg_dis_white[i, index_close_white_class[i]] for i in range(ad_sample_size) ]) # see if the closet class samples are included in returns. # If none returns, called it 'safe'. # We choose K(ad_size) safe AD imgs with the smallest 'query_avg_dis_white_closest' # index_closest_safe is the index of size 32 in range(ad_sample_size) to select the ideal data img_num_by_class = get_query_result_num_by_class(query_code, code, multi_label, threshold=5) img_num_target_targeted = get_targeted_from_all_class( img_num_by_class, index_close_white_class) index_safe_AD = img_num_target_targeted == 0 index_safe_AD_by_position = np.arange(ad_sample_size)[index_safe_AD] index_close_white_class_safe = index_close_white_class[index_safe_AD] query_avg_dis_white_closet_safe = query_avg_dis_white_closest[ index_safe_AD] index_closest_safe = index_safe_AD_by_position[np.argsort( query_avg_dis_white_closet_safe, ad_size)[:ad_size]] #index_closest_safe_AD = [index_closest_safe] # get the inputs_AD and label_target inputs_AD = inputs_AD_sample[index_closest_safe] label_target = index_close_white_class[index_closest_safe]
def func_enable_retrieval(): # this is a function segment for i in range(i_max): print('id:%d' % (i)) i_index = int(test_true_id_x[i_index_set[i]]) inputs_ori = Variable(inputs_ori_tensor.cuda())[i].unsqueeze(0) inputs_adv = Variable(torch.Tensor(adv_imgs).cuda())[i].unsqueeze(0) j_index_set = j_index_matrix[int( test_true_label_y[i_index_set[i]])].astype(int) label_targeted = np.array( [multi_label[j_index_set[j]] for j in range(j_max)]) label2_targeted = np.array( [multi_label2[j_index_set[j]] for j in range(j_max)]) if not bSaveBlackTargetedNum: X = np.stack( [dset_database[j_index_set[j]][0] for j in range(j_max)]) inputs_target = Variable(torch.Tensor(X).cuda(), requires_grad=True) # get the target's retrieval result fget_img_num_by_class_from_img_batchor each class on White and Black img_num_by_class_target = get_img_num_by_class_from_img_batch( inputs_target, model1, code, multi_label, threshold=threshold, batch_size=16) img_num_by_class_target_black = get_img_num_by_class_from_img_batch( inputs_target, model2, code2, multi_label2, threshold=threshold, batch_size=8) # get the adv's retrieval result for each class on White and Black img_num_by_class_adv = get_img_num_by_class_from_img_batch( inputs_adv, model1, code, multi_label, threshold=threshold, batch_size=16) img_num_by_class_adv_black = get_img_num_by_class_from_img_batch( inputs_adv, model2, code2, multi_label2, threshold=threshold, batch_size=8) if not bSaveBlackTargetedNum: # get the ori's retrieval result for each class on Black img_num_by_class_ori_black = get_img_num_by_class_from_img_batch( inputs_ori, model2, code2, multi_label2, threshold=threshold, batch_size=8) # get the target's retrieval result for targeted class only on White and Black img_num_target_targeted = get_targeted_from_all_class( img_num_by_class_target, np.expand_dims(label_targeted, 0)) img_num_target_black_targeted = get_targeted_from_all_class( img_num_by_class_target_black, np.expand_dims(label2_targeted, 0)) # get the adv's retrieval result for targeted class only on White and Black img_num_adv_targeted = get_targeted_from_all_class( img_num_by_class_adv, np.expand_dims(label_targeted, 0)) print(img_num_by_class_adv_black.shape) img_num_adv_black_targeted = get_targeted_from_all_class( img_num_by_class_adv_black, np.expand_dims(label2_targeted, 0)) if not bSaveBlackTargetedNum: # get the ori's retrieval result for targeted class only on Black img_num_by_class_ori_black_targeted = get_targeted_from_all_class( img_num_by_class_ori_black, np.expand_dims(label2_targeted, 0)) # GUIDE: # Compare img_num_adv_black_targeted with img_num_target_targeted, # if one item in img_num_target_targeted is high enough, ignore it. # if we found one item has a great difference, we succeed. if not bSaveBlackTargetedNum: print(adv_method + ":") print("WhiteBox(%d imgs overall):" % (1 * j_max)) print("", img_num_adv_targeted.sum(), (img_num_adv_targeted > 0).sum()) print("BlackBox(%d imgs overall):" % (1 * j_max)) print("", img_num_adv_black_targeted.sum(), (img_num_adv_black_targeted > 0).sum()) code_adv_black = np.sign( model_np_batch(model2, inputs_adv, batch_size=8)) code_ori_black = code_test2[i_index] code_targeted_black = code2[j_index_set] # code_ori_white = code_test[i_index] code_targeted_white = code[j_index_set] whiteHammingMatrix[i] = np.transpose( np.linalg.norm(code_ori_white - code_targeted_white, ord=0, axis=-1)) blackTargetedNumMatrix[i] = img_num_adv_black_targeted if not bSaveBlackTargetedNum: code_diff_adv_target = np.transpose( np.linalg.norm(code_adv_black - code_targeted_black, ord=0, axis=-1)) code_diff_ori_adv = np.linalg.norm( np.swapaxes(code_adv_black, 0, 1) - code_ori_black, ord=0, axis=-1) code_diff_ori_target = np.array([ np.transpose( np.linalg.norm(code_ori_black - code_targeted_black[j], ord=0, axis=-1)) for j in range(j_max) ]) print(code_diff_adv_target.mean()) print(code_diff_ori_adv.mean()) print(code_diff_ori_target.mean()) succeed_index = np.where(img_num_adv_black_targeted > 0) print(img_num_adv_black_targeted[ img_num_adv_black_targeted > 0].astype(int)) print(succeed_index[0], '\n', succeed_index[1]) oriBlackCountMatrix[i] = img_num_by_class_ori_black_targeted[0] whiteMatrix[i][0], whiteMatrix[i][1] = img_num_adv_targeted.sum( ), (img_num_adv_targeted > 0).sum() blackMatrix[i][0], blackMatrix[i][ 1] = img_num_adv_black_targeted.sum(), ( img_num_adv_black_targeted > 0).sum() distanceMatrix[i, 0], distanceMatrix[i, 1], distanceMatrix[i, 2] = \ code_diff_adv_target.mean(), code_diff_ori_adv.mean(), code_diff_ori_target.mean() targetCountMatrix[i] = img_num_adv_black_targeted[ img_num_adv_black_targeted > 0].astype(int) succeedIndexXMatrix[i], succeedIndexYMatrix[i] = succeed_index[ 0], succeed_index[1] if not bSaveBlackTargetedNum: print("dis_method:%s" % (dis_method)) print("retrieval num of ori in blackbox:\n", oriBlackCountMatrix.astype(int)) print("retrieval num and sample size of adv in whitebox:\n", whiteMatrix.astype(int).transpose()) print("retrieval num and sample size of adv in blackbox:\n", blackMatrix.astype(int).transpose()) print("distanceMatrix of adv in blackbox:\n", distanceMatrix.transpose()) print("attack percentage(i-level, white and black):%f,%f" % (float((whiteMatrix[:, 1] > 0).sum()) / i_max, float((blackMatrix[:, 1] > 0).sum()) / i_max)) print("attack percentage(i*j-level, white and black):%f,%f" % (float((whiteMatrix[:, 1]).sum()) / i_max / j_max, float((blackMatrix[:, 1]).sum()) / i_max / j_max)) if bSaveWhiteHamming: np.save(path_whiteHamming, whiteHammingMatrix) print('Save white hamming distance matrix file to: %s' % (path_whiteHamming)) if bSaveBlackTargetedNum: np.save(path_blackTargetedNum, blackTargetedNumMatrix) print('Save black targeted number file to: %s' % (path_blackTargetedNum))
inputs_adv_cornell, model2, code2, multi_label2, threshold=5, batch_size=8) img_num_target_targeted_white = np.zeros([i_max, j_max]) img_num_target_targeted_black = np.zeros([i_max, j_max]) img_num_target_ori = np.zeros([i_max, j_max]) for i in range(i_max): #i_index = int(test_true_id_x[i_index_set[i]]) j_index_set = j_index_matrix[int( test_true_label_y[i_index_set[i]])].astype(int) label_targeted = multi_label[j_index_set] img_num_target_targeted_white[i] = get_targeted_from_all_class( img_num_by_class_adv_cornell_white[i], label_targeted) img_num_target_targeted_black[i] = get_targeted_from_all_class( img_num_by_class_adv_cornell_black[i], label_targeted) targetCodes = code[j_index_set] img_num_by_class_ori_white = get_query_result_num_by_class( targetCodes, code, multi_label, threshold=5) img_num_target_ori[i] = get_targeted_from_all_class( img_num_by_class_ori_white, label_targeted ) #get_targeted_from_all_class(img_num_by_class_adv_cornell_white[i], label_targeted) img_num_target_ori = img_num_target_ori.astype(int) print("img_num_target_targeted_white:", (img_num_target_targeted_white >= 100).sum()) print("img_num_target_targeted_black:", (img_num_target_targeted_black >= 10).sum()) print("img_num_target_targeted_black(valid):", ( img_num_target_targeted_black[img_num_target_targeted_white >= 100]