Exemple #1
0
def read_n_preprocess(image_path):
    """
    Helper funciton.
    """
    return preprocess_image_batch([image_path],
                                  img_size=(256, 256),
                                  crop_size=(224, 224))
Exemple #2
0
                target += 1

        else:
            print(
                '>> Found a pre-computed universal perturbation! Retrieving it from ", file_perturbation'
            )
            v = np.load(file_perturbation)

        print('>> Testing the targeted universal perturbation on an image')

        # Test the perturbation on the image
        labels = open(os.path.join('data', 'labels.txt'),
                      'r').read().split('\n')

        image_original = preprocess_image_batch([path_test_image],
                                                img_size=(256, 256),
                                                crop_size=(224, 224),
                                                color_mode="rgb")
        str_label_original = img2str(f=f, img=image_original)

        # Clip the perturbation to make sure images fit in uint8

        image_perturbed = avg_add_clip_pert(image_original, v)
        label_perturbed = np.argmax(f(image_perturbed), axis=1).flatten()
        str_label_perturbed = img2str(f=f, img=image_perturbed)

        # Additional
        pre_file_perturbation = os.path.join('data',
                                             'precomputing_perturbations',
                                             npy_perturbations)
        pre_v = np.load(file_perturbation)
        # import matplotlib
Exemple #3
0
def pick_btlnk_label(sess, path_imagenet, pert, how_many, input_tensor,
                     bottleneck_tensor, output_tensor, softmax_tensor,
                     eval_type, T):
    """
	Pick out valid sample which can be perturbed successfully with the pert.
	***Design for CROSS universal perturbation test***.
	Returns:
	res: Array int containing normal label part and adversarial label part
	bottleneck_lists: Array float64 containing normal bottleneck part 
		and adversarial bottleneck part(前一半adversarial部分,后一半为original部分).
	"""
    half_how_many = int(np.ceil(how_many / 2))
    print('>>PICK PERT: need to pick out %d valid sample...' % half_how_many)

    bottleneck_lists = np.zeros((how_many, BOTTLENECK_TENSOR_SIZE),
                                dtype=np.float)
    if eval_type == 'top':
        res = np.zeros((how_many, ), dtype=np.int)
    elif eval_type == 'jsd' or eval_type == 'cos':
        res = np.zeros((how_many, OUTPUT_TENSOR_SIZE), dtype=np.float)

    already_get = 0  # 记录成功采集的对抗样本数
    path_test_set = os.path.join(path_imagenet, 'test')
    filenames = [x[2] for x in os.walk(path_test_set)][0]
    total_num = len(filenames)
    num_of_batch = int(np.ceil(total_num / FLAGS.batch_size))

    for i in range(num_of_batch):
        start = i * FLAGS.batch_size
        end = min((i + 1) * FLAGS.batch_size, total_num)
        image_batch = preprocess_image_batch(path_test_set,
                                             filenames[start:end], (256, 256),
                                             (224, 224))
        clipped_v = np.clip(undo_image_avg(image_batch + pert), 0,
                            255) - np.clip(undo_image_avg(image_batch), 0, 255)
        image_perturbed_batch = image_batch + clipped_v

        # 计算一个batch的瓶颈值,输出值
        orin_btlnks = run_bottleneck_on_image(sess, image_batch, input_tensor,
                                              bottleneck_tensor)
        adv_btlnks = run_bottleneck_on_image(sess, image_perturbed_batch,
                                             input_tensor, bottleneck_tensor)
        orin_logits = run_bottleneck_on_image(sess, orin_btlnks,
                                              bottleneck_tensor, output_tensor)
        adv_logits = run_bottleneck_on_image(sess, adv_btlnks,
                                             bottleneck_tensor, output_tensor)
        # 挑选出一个batch的干扰成功样本
        orin_labels = np.argmax(orin_logits, axis=1)
        adv_labels = np.argmax(adv_logits, axis=1)
        mask = orin_labels != adv_labels
        valid_num = np.sum(mask)
        temp_already_get = already_get + valid_num

        if temp_already_get >= half_how_many:
            temp_cnt = half_how_many - already_get

            bottleneck_lists[already_get:half_how_many] = adv_btlnks[
                mask][:temp_cnt]
            bottleneck_lists[(
                already_get +
                half_how_many):how_many] = orin_btlnks[mask][:temp_cnt]
            if eval_type == 'top':
                res[already_get:half_how_many] = adv_labels[mask][:temp_cnt]
                res[(already_get +
                     half_how_many):how_many] = orin_labels[mask][:temp_cnt]
            elif eval_type == 'jsd':
                # jsd评估需要的是softmax值,softmax层还能放大激活值的分布差异
                # 不过需要scale参数T防止饱和
                res[already_get:half_how_many] = run_bottleneck_on_image(
                    sess, (adv_logits[mask][:temp_cnt] / T), output_tensor,
                    softmax_tensor)
                res[(already_get +
                     half_how_many):how_many] = run_bottleneck_on_image(
                         sess, (orin_logits[mask][:temp_cnt] / T),
                         output_tensor, softmax_tensor)
            elif eval_type == 'cos':
                res[already_get:half_how_many] = adv_logits[mask][:temp_cnt]
                res[(already_get +
                     half_how_many):how_many] = orin_logits[mask][:temp_cnt]
            else:
                print(
                    '++ Warning!! Please choose a evaluation type: top or jsd.'
                )
            print('++ pick_bltnk_label--res shape: ', res.shape)

            break
        else:
            bottleneck_lists[already_get:temp_already_get] = adv_btlnks[mask]
            bottleneck_lists[(already_get + half_how_many):(
                temp_already_get + half_how_many)] = orin_btlnks[mask]
            if eval_type == 'top':
                res[already_get:temp_already_get] = adv_labels[mask]
                res[(already_get +
                     half_how_many):(temp_already_get +
                                     half_how_many)] = orin_labels[mask]
            elif eval_type == 'jsd':
                res[already_get:temp_already_get] = run_bottleneck_on_image(
                    sess, (adv_logits[mask] / T), output_tensor,
                    softmax_tensor)
                res[(already_get +
                     half_how_many):(temp_already_get +
                                     half_how_many)] = run_bottleneck_on_image(
                                         sess, (orin_logits[mask] / T),
                                         output_tensor, softmax_tensor)
            elif eval_type == 'cos':
                res[already_get:temp_already_get] = adv_logits[mask]
                res[(already_get +
                     half_how_many):(temp_already_get +
                                     half_how_many)] = orin_logits[mask]
            else:
                print(
                    '++ Warning!! Please choose a evaluation type: top or jsd.'
                )
        already_get = temp_already_get
    print('++ pick res shape: ', res.shape)

    return bottleneck_lists, res