def imgs_to_file(adv_imgs='', pics_root_path=''): from publicFunctions import load_net_inputs, load_net_params, load_dset_params import matplotlib.pyplot as plt npy_name = '/%s_imgs_step%1.1f_linf%d_%dx%d_%s.npy' % (adv_method, step, linf, i_max, j_max, dis_method) npy_path = 'save_for_load/' + net1 + npy_name path_white_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy' % (net1) path_black_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy' % (net2) dset_test, dset_database = load_dset_params(job_dataset) model1, snapshot_path, query_path, database_path = load_net_params(net1) tmp = np.load(database_path) _, code, multi_label = tmp['arr_0'], tmp['arr_1'], tmp['arr_2'] test_dis_white, test_dis_black = get_test_dis(path_white_test_dis_npy, path_black_test_dis_npy) test_true_id_x, test_true_label_y = choose_index_by_dis_method(dis_method, test_dis_white, test_dis_black, max_dis=18, min_dis=12) id_size = test_true_id_x.shape[0] print('id size:', id_size) i_index_set = np.arange(0, id_size, id_size / (i_max))[:i_max] j_index_matrix = get_unique_index(code, multi_label, j_max) adv_imgs = np.load(npy_path) pics_root_path = './save_for_load/pics/%s_imgs_step%1.1f_linf%d_%dx%d_%s/' %(adv_method, step, linf, i_max, j_max, dis_method) if not os.path.exists(pics_root_path): os.makedirs(pics_root_path) for i in range(i_max): for j in range(j_max): print('i,j:',i,j) file_name_full = pics_root_path + '/' + 'i%s_j%s.jpg'%(str(i), str(j)) img_array = np.moveaxis(adv_imgs[i, j], 0, -1) plt.imsave(file_name_full, img_array) return
def main_func(): from publicFunctions import load_net_inputs, load_net_params, load_dset_params npy_name = '/%s_imgs_step%1.1f_linf%d_%dx%d_%s.npy' % (adv_method, step, linf, i_max, j_max, dis_method) npy_path = 'save_for_load/' + net1 + npy_name path_white_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy'%(net1) path_black_test_dis_npy = 'save_for_load/distanceADVRetrieval/test_dis_%s.npy'%(net2) dset_test, dset_database = load_dset_params(job_dataset) model1, snapshot_path, query_path, database_path = load_net_params(net1) tmp = np.load(database_path) _, code, multi_label = tmp['arr_0'], tmp['arr_1'], tmp['arr_2'] test_dis_white, test_dis_black = get_test_dis(path_white_test_dis_npy, path_black_test_dis_npy) test_true_id_x, test_true_label_y = choose_index_by_dis_method(dis_method, test_dis_white, test_dis_black, max_dis=18, min_dis=12) id_size = test_true_id_x.shape[0] print('id size:',id_size) i_index_set = np.arange(0, id_size, id_size / (i_max))[:i_max] inputs_ori_tensor = torch.stack([dset_test[test_true_id_x[i_index_set[i]]][0] for i in range(i_max)]) j_index_matrix = get_unique_index(code, multi_label, j_max) adv_imgs = np.load(npy_path) ori_imgs = inputs_ori_tensor.cpu().numpy() # imgs_test = np.load('save_for_load/imgs_test.npy') # target_img_mat = np.load('save_for_load/target_imgs.npy') target_img_mat = get_target_imgs(j_index_matrix, test_true_label_y, i_index_set, dset_database) from myRetrieval import get_img_num_by_class_from_img_batch, get_targeted_from_all_class inputs_targets = Variable(torch.Tensor(target_img_mat).cuda(), requires_grad=True) img_num_by_class_target = get_img_num_by_class_from_img_batch(inputs_targets, model1, code, multi_label, threshold=5, batch_size=16) target_targetedNum_mat = np.zeros([i_max, j_max]) for i in range(i_max): j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) label_targeted = np.array([multi_label[j_index_set[j]] for j in range(j_max)]) img_num_target_targeted = get_targeted_from_all_class(img_num_by_class_target[i], label_targeted) target_targetedNum_mat[i] = img_num_target_targeted target_targeted_retrieval_num_path = './save_for_load/%s/target_targetedRetrievalNum_%s_%s.npy' % (net1, adv_method, dis_method) np.save(target_targeted_retrieval_num_path, target_targetedNum_mat) ''' for i in range(i_max): for j in range(j_max): j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) target_img = target_img_mat[i, j] #target_result = get_target_retrival_result(model1, ) i, j = 3, 9 ori_img = ori_imgs[i] adv_img = adv_imgs[i,j] j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int) label_targeted = np.array([database_label[j_index_set[j]] for j in range(j_max)]) target_label = label_targeted[j] perturbation_ratio_bound = estimate_subspace_size(adv_img, ori_img, model1, target_label, code, database_label) ''' return
def get_features_by_net(net, layer_index_value, inputs): layer_len = len(layer_index_value) features_by_layer = {} model, snapshot_path, query_path, database_path = load_net_params(net) for i in range(layer_len): layer_index = layer_index_value[i] sub_model = getConvLayerByIndex(model, layer_index, net) feature_out = sub_model(inputs) features_by_layer[i] = feature_out return features_by_layer
def get_feature_np(sub_model, inputs): # convIndex is the index for submodel conv, starting from 1 layer_index_value = layer_index_value_list[net] model, snapshot_path, query_path, database_path = load_net_params(net) dset_test, dset_database = load_dset_params(job_dataset) layer_index = layer_index_value[-2] sub_model = getConvLayerByIndex(model, layer_index, net) feature_out = sub_model(inputs) feature_np = feature_out.cpu().data.numpy() return 0
def get_histograms_by_net(net, layer_index_value, inputs, bins=10): layer_len = len(layer_index_value) histograms_by_layer = np.zeros([layer_len, bins]) model, snapshot_path, query_path, database_path = load_net_params(net) for i in range(layer_len): layer_index = layer_index_value[i] sub_model = getConvLayerByIndex(model, layer_index, net) histograms_by_layer[i] = get_histogram_by_sub_model(sub_model, inputs, bins=bins) return histograms_by_layer
def main_backup(): net1 = 'ResNet152' # convIndex is the index for submodel conv, starting from 1 from .publicVariables import layer_index_value_list layer_index_value1 = layer_index_value_list[net1] model1, snapshot_path, query_path, database_path = load_net_params(net1) dset_test, dset_database = load_dset_params(job_dataset) layer_index = layer_index_value1[-2] sub_model1 = getConvLayerByIndex(model1, layer_index, net1) feature_out1 = sub_model1(inputs) feature_np1 = feature_out1.cpu().data.numpy() net2 = 'ResNext101_32x4d' # convIndex is the index for submodel conv, starting from 1 layer_index_value2 = layer_index_value_list[net2] model2, snapshot_path, query_path, database_path = load_net_params(net2) dset_test, dset_database = load_dset_params(job_dataset) layer_index2 = layer_index_value2[-1] sub_model2 = getConvLayerByIndex(model2, layer_index2, net2) feature_out2 = sub_model2(inputs) feature_np2 = feature_out2.cpu().data.numpy() his_feature1 = np.histogram(feature_np1[feature_np1 <= 1]) his_feature2 = np.histogram(feature_np2[feature_np2 <= 1]) his_normalized_1 = his_feature1[0].astype(float) / his_feature1[0].sum() his_normalized_2 = his_feature2[0].astype(float) / his_feature2[0].sum() from scipy.stats import wasserstein_distance as emd emd1_2 = emd(his_normalized_1, his_normalized_2) emd2_1 = emd(his_normalized_2, his_normalized_1) print(emd1_2, emd2_1)
return adv if __name__ == "__main__": job_dataset = 'imagenet' job_values = ['mnist', 'cifar10', 'fashion_mnist'] net_values = ['ResNet18', 'ResNet34', 'AlexNet'] net = 'ResNet152' step = 1.0 linf = 32 adv_method = 'miFGSM' adv_method_list = ['iFGSM', 'iFGSMDI', 'iFGSMMT', 'iFGSMMTDI'] dset_test, dset_database = load_dset_params(job_dataset) model, snapshot_path, query_path, database_path = load_net_params(net) tmp = np.load(database_path) output, code, multi_label = tmp['arr_0'], tmp['arr_1'], tmp['arr_2'] tmp = np.load(query_path) output_test, code_test, multi_label_test = tmp['arr_0'], tmp['arr_1'], tmp[ 'arr_2'] # set index for the targeted image index = 4 # Load the advertised img ad_datapath = '../data/ad_dataset/ads/0/' datapath_dir = os.listdir(ad_datapath) #ad_imagepath = ad_datapath + '51.jpg'
bins = 16 # flllowing segment sets the AD images as the inputs ad_datapath = '../data/ad_dataset/ads/0/' inputs1 = load_net_inputs(ad_datapath, 0) #inputs2 = load_net_inputs(ad_datapath, 100) inputs2 = load_net_inputs(ad_datapath, 0) # following segment sets the ImageNet images as the inputs dset_test, dset_database = load_dset_params(job_dataset) # same net exp net1 = 'ResNet152' net2 = net1 #net2 = 'ResNet152' model1, snapshot_path, query_path, database_path = load_net_params(net1) model2, snapshot_path, query_path, database_path = load_net_params(net2) tmp = np.load(database_path) output, code, multi_label = tmp['arr_0'], tmp['arr_1'], tmp['arr_2'] from .publicVariables import layer_index_value_list layer_index_value1 = layer_index_value_list[net1][:-1] layer_index_value2 = layer_index_value_list[net2][:-1] A = len(layer_index_value1) B = len(layer_index_value2) len_1 = 3 + 8 + 36 + 3 if net1 == 'ResNet152' else 3 + 4 + 23 + 3 len_2 = 3 + 8 + 36 + 3 if net2 == 'ResNet152' else 3 + 4 + 23 + 3 # set net2 same as net1 # same_emd_eye, same_l2_eye, same_cos_eye, diff_emd_eye, diff_l2_eye, diff_cos_eye = multi_images()