def func_adv_hamming_distance():
    path_code_tar_adv_ori = 'save_for_load/distanceADVRetrieval/%s/code_tar_adv_ori_white_%s_black_%s_%s.npz' % (
        adv_method, net1, net2, dis_method)
    if os.path.exists(path_code_tar_adv_ori):
        code_tar_adv_ori = np.load(path_code_tar_adv_ori)
        code_targeted_white, code_source_white, code_adv_white, code_targeted_black, code_source_black, code_adv_black = \
            code_tar_adv_ori['arr_0'], code_tar_adv_ori['arr_1'], code_tar_adv_ori['arr_2'], \
            code_tar_adv_ori['arr_3'], code_tar_adv_ori['arr_4'], code_tar_adv_ori['arr_5']

    else:
        print("adv_method:%s, dis_method:%s" % (adv_method, dis_method))
        hash_bit = 48
        code_targeted_white = np.zeros([i_max, j_max, hash_bit])
        code_source_white = np.zeros([i_max, hash_bit])
        code_adv_white = np.zeros([i_max, j_max, hash_bit])
        code_targeted_black = np.zeros([i_max, j_max, hash_bit])
        code_source_black = np.zeros([i_max, hash_bit])
        code_adv_black = np.zeros([i_max, j_max, hash_bit])
        for i in range(i_max):
            print("Process: %d" % (i))
            j_index_set = j_index_matrix[int(
                test_true_label_y[i_index_set[i]])].astype(int)
            i_index = int(test_true_id_x[i_index_set[i]])

            code_targeted_white_i = np.array(
                [code[j_index_set[j]] for j in range(j_max)])
            code_targeted_white[i] = code_targeted_white_i
            code_targeted_black_i = np.array(
                [code2[j_index_set[j]] for j in range(j_max)])
            code_targeted_black[i] = code_targeted_black_i

            code_source_white[i] = code_test[i_index]
            code_source_black[i] = code_test2[i_index]

            adv_inputs = Variable(torch.Tensor(adv_imgs[i])).cuda()
            batch_size = 8
            code_adv_white[i] = get_query_code_batch(img_inputs=adv_inputs,
                                                     model=model1,
                                                     batch_size=batch_size)
            code_adv_black[i] = get_query_code_batch(img_inputs=adv_inputs,
                                                     model=model2,
                                                     batch_size=batch_size)
        np.savez(path_code_tar_adv_ori, code_targeted_white, code_source_white,
                 code_adv_white, code_targeted_black, code_source_black,
                 code_adv_black)
    return code_targeted_white, code_source_white, code_adv_white, code_targeted_black, code_source_black, code_adv_black
def func_trace_adv_code():
    path_traced_adv_codes = 'save_for_load/distanceADVRetrieval/%s/traced_adv_codes_%s_black_%s_%s.npz' % (
        adv_method, net1, net2, dis_method)
    if os.path.exists(path_traced_adv_codes):
        code_tar_adv_ori = np.load(path_traced_adv_codes)
        traced_adv_code_white, traced_adv_code_black = \
            code_tar_adv_ori['arr_0'], code_tar_adv_ori['arr_1']
    else:
        hash_bit = 48
        traced_adv_code_white = np.zeros([i_max, j_max, linf + 1, hash_bit])
        traced_adv_code_black = np.zeros([i_max, j_max, linf + 1, hash_bit])
        for i in range(i_max):
            i_index = int(test_true_id_x[i_index_set[i]])
            j_index_set = j_index_matrix[int(
                test_true_label_y[i_index_set[i]])]
            img_t = dset_test[i_index][0]
            for j in range(j_max):
                j_index = int(j_index_set[j])
                targetCode = code[j_index]

                print("i,j:", i, j)
                traced_adv_imgs = func_get_traced_adv_img(
                    model1,
                    img_t,
                    targetCode,
                    eps=step / 255,
                    l_inf_max=linf,
                    bShowProcess=False,
                    bEarlyStop=False,
                    adv_method=adv_method)

                traced_adv_img_inputs = Variable(
                    torch.Tensor(traced_adv_imgs)).cuda()
                traced_adv_code_white[i, j] = get_query_code_batch(
                    traced_adv_img_inputs, model1)
                traced_adv_code_black[i, j] = get_query_code_batch(
                    traced_adv_img_inputs, model2)
        np.savez(path_traced_adv_codes, traced_adv_code_white,
                 traced_adv_code_black)
    #traced_adv_code_white, trace_adv_code_black = func_traced_adv_imgs()
    return traced_adv_code_white, traced_adv_code_black
def get_adv_code_diff_to_targeted(net1, adv_method, step, linf, i_max, j_max, dis_method, allowLoad=True):

    path_whiteCodeDiff_folder = 'save_for_load/distanceADVRetrieval/%s/'%(adv_method)
    path_whiteCodeDiff = path_whiteCodeDiff_folder + '/whiteCodeDiff_white_%s_step%1.1f_linf%d_%s.npy' % (
        net1, step, linf, dis_method)
    if not os.path.exists(path_whiteCodeDiff_folder):
        os.makedirs(path_whiteCodeDiff_folder)

    if os.path.exists(path_whiteCodeDiff) and allowLoad:
        adv_code_diff_to_targeted = np.load(path_whiteCodeDiff)
        print('load whiteCodeDiff in:', path_whiteCodeDiff)
        return adv_code_diff_to_targeted

    hash_bit = 48
    job_dataset = 'imagenet'
    from publicFunctions import NetworkSettings
    from myExpForPapers_nag import EXPSettings
    network_settings1 = NetworkSettings(job_dataset, hash_bit, net1, snapshot_iter=iters_list[net1], batch_size=16)

    exp_settings = EXPSettings(net1, '', dis_method, i_max, j_max, step=step, linf=linf)

    npy_name = '/%s_imgs_step%1.1f_linf%d_%dx%d_%s.npy' % (adv_method, step, linf, i_max, j_max, dis_method)
    npy_path = 'save_for_load/' + net1 + npy_name
    model1 = network_settings1.get_model()
    _, code, multi_label = network_settings1.get_out_code_label(part='database')
    _, code_test, multi_label_test = network_settings1.get_out_code_label(part='test')

    i_index_set, j_index_matrix = exp_settings.cal_index_set_matrix_white(code_test, code, multi_label)
    test_true_label_y = exp_settings.test_true_label_y

    adv_imgs = np.load(npy_path)

    code_targeted = np.zeros([i_max, j_max, 48])
    adv_code_mat = np.zeros([i_max, j_max, 48])

    adv_code_diff_to_targeted = np.zeros([i_max, j_max])

    for i in range(i_max):
        j_index_set = j_index_matrix[int(test_true_label_y[i_index_set[i]])].astype(int)
        code_targeted_i = np.array([code[j_index_set[j]] for j in range(j_max)])
        code_targeted[i] = code_targeted_i

        img_inputs = Variable(torch.Tensor(adv_imgs[i])).cuda()
        adv_code = get_query_code_batch(img_inputs, model1, batch_size=16)
        adv_code_mat[i] = adv_code
        adv_code_diff_to_targeted[i] = np.linalg.norm(adv_code - code_targeted_i, ord=0, axis=1)

    np.save(path_whiteCodeDiff, adv_code_diff_to_targeted)
    return adv_code_diff_to_targeted
def cal_retrievable_rate(inputs_adv, model2, code2, multi_label,
                         label_target_mat):
    #adv_code_black = np.zeros([inputs_adv.shape[0], inputs_adv.shape[1], 48])
    retrievable_rate = np.ones([inputs_adv.shape[0]])
    for i in range(inputs_adv.shape[0]):
        label_targeted = label_target_mat[i, 0]
        target_class_size = (multi_label == label_targeted).sum()
        adv_code_black = get_query_code_batch(inputs_adv[i], model2)
        query_result_black = get_retrieval_result_by_query_code(adv_code_black,
                                                                code2,
                                                                threshold=5)

        all_result_black_reshape = np.concatenate([
            query_result_black[j] for j in range(query_result_black.shape[0])
        ]).reshape([-1])
        query_result_white_unique = np.unique(all_result_black_reshape)
        query_result_white_unique = query_result_white_unique[
            multi_label[query_result_white_unique] == label_targeted]
        print("success:%d, close: %d, ratio:%f" %
              (query_result_white_unique.size, target_class_size,
               float(query_result_white_unique.size) / target_class_size))
        retrievable_rate[i] = float(
            query_result_white_unique.size) / target_class_size
    return retrievable_rate
    _, code2, multi_label2 = network_settings2.get_out_code_label(
        part='database')

    ad_datapath = '../data/ad_dataset/ads/0/'
    ad_sample_size = 256
    ad_size = 32  # advertised imgs used to generate the NAG ADV.
    target_size = 16

    bVisualize = False
    targetType = exp_target_type  # 'multi'single
    inputs_AD_sample = load_net_inputs(ad_datapath,
                                       0,
                                       batch_size=ad_sample_size)

    # get white closest class
    query_code = get_query_code_batch(inputs_AD_sample, model1, batch_size=8)
    query_avg_dis_white = get_query_avg_dis(query_code, code, multi_label)
    #close_white_class = np.where(query_avg_dis_white < 18)
    if targetType == 'single':
        index_close_white_class = np.argmin(query_avg_dis_white,
                                            axis=1)  # also the label
        query_avg_dis_white_closest = np.array([
            query_avg_dis_white[i, index_close_white_class[i]]
            for i in range(ad_sample_size)
        ])

        # see if the closet class samples are included in returns.
        # If none returns, called it 'safe'.
        # We choose K(ad_size) safe AD imgs with the smallest 'query_avg_dis_white_closest'
        # index_closest_safe is the index of size 32 in range(ad_sample_size) to select the ideal data
        img_num_by_class = get_query_result_num_by_class(query_code,