示例#1
0
def exp_over_grid_eval(model,
                       attack,
                       sess,
                       config,
                       attack_type,
                       eval_on_train=False):
    num_eval_examples = config.eval.num_eval_examples
    eval_batch_size = config.eval.batch_size

    if config.data.dataset_name == "cifar-10":
        data_iterator = cifar10_input.CIFAR10Data(config.data.data_path)
    elif config.data.dataset_name == "cifar-100":
        data_iterator = cifar100_input.CIFAR100Data(config.data.data_path)
    elif config.data.dataset_name == "svhn":
        data_iterator = svhn_input.SVHNData(config.data.data_path)
    else:
        raise ValueError("Unknown dataset name.")

    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    n_grid = np.prod(config.attack.grid_granularity)
    results = np.zeros([num_eval_examples, n_grid])
    for ibatch in trange(num_batches):
        bstart = ibatch * eval_batch_size
        bend = min(bstart + eval_batch_size, num_eval_examples)

        if eval_on_train:
            x_batch = data_iterator.train_data.xs[bstart:bend, :]
            y_batch = data_iterator.train_data.ys[bstart:bend]
        else:
            x_batch = data_iterator.eval_data.xs[bstart:bend, :]
            y_batch = data_iterator.eval_data.ys[bstart:bend]
        grid = product(*list(
            np.linspace(-l, l, num=g) for l, g in zip(
                config.attack.spatial_limits, config.attack.grid_granularity)))
        n_batch = len(x_batch)
        for i, (tx, ty, r) in enumerate(grid):
            trans = np.stack(repeat([tx, ty, r], n_batch))
            dict_nat = {
                model.x_input: x_batch,
                model.y_input: y_batch,
                model.transform: trans,
                model.is_training: False
            }
            results[bstart:bend, i] = sess.run(model.correct_prediction,
                                               feed_dict=dict_nat)

    return results
示例#2
0
def exp_over_grid_eval_random_sample(model,
                                     sess,
                                     config,
                                     eval_on_train=False,
                                     num_eval_examples=100,
                                     seed=1):
    np.random.seed(seed)
    if config.data.dataset_name == "cifar-10":
        data_iterator = cifar10_input.CIFAR10Data(config.data.data_path)
    elif config.data.dataset_name == "cifar-100":
        data_iterator = cifar100_input.CIFAR100Data(config.data.data_path)
    elif config.data.dataset_name == "svhn":
        data_iterator = svhn_input.SVHNData(config.data.data_path)
    else:
        raise ValueError("Unknown dataset name.")

    if eval_on_train:
        n = data_iterator.train_data.n
        indices = np.random.choice(n, num_eval_examples)
        x_batch = data_iterator.train_data.xs[indices, :]
        y_batch = data_iterator.train_data.ys[indices]
    else:
        n = data_iterator.eval_data.n
        indices = np.random.choice(n, num_eval_examples)
        x_batch = data_iterator.eval_data.xs[indices, :]
        y_batch = data_iterator.eval_data.ys[indices]

    grid = product(*list(
        np.linspace(-l, l, num=g) for l, g in zip(
            config.attack.spatial_limits, config.attack.grid_granularity)))
    n_grid = np.prod(config.attack.grid_granularity)
    results = np.zeros([len(x_batch), n_grid])
    n_batch = len(x_batch)
    for i, (tx, ty, r) in enumerate(grid):
        trans = np.stack(repeat([tx, ty, r], n_batch))
        dict_nat = {
            model.x_input: x_batch,
            model.y_input: y_batch,
            model.transform: trans,
            model.is_training: False
        }
        results[:, i] = sess.run(model.correct_prediction, feed_dict=dict_nat)

    return results
def get_correctly_classified_angles(model,
                                    sess,
                                    config,
                                    eval_on_train=False,
                                    num_eval_examples=200,
                                    seed=1):
    np.random.seed(seed)
    if config.data.dataset_name == "cifar-10":
        data_iterator = cifar10_input.CIFAR10Data(config.data.data_path)
    elif config.data.dataset_name == "cifar-100":
        data_iterator = cifar100_input.CIFAR100Data(config.data.data_path)
    elif config.data.dataset_name == "svhn":
        data_iterator = svhn_input.SVHNData(config.data.data_path)

    if eval_on_train:
        n = data_iterator.train_data.n
        indices = np.random.choice(n, num_eval_examples)
        x_batch = data_iterator.train_data.xs[indices, :]
        y_batch = data_iterator.train_data.ys[indices]
    else:
        n = data_iterator.eval_data.n
        indices = np.random.choice(n, num_eval_examples)
        x_batch = data_iterator.eval_data.xs[indices, :]
        y_batch = data_iterator.eval_data.ys[indices]

    trans = np.zeros([len(x_batch), 3])
    results = np.zeros([len(x_batch), 61])
    predictions = np.zeros([len(x_batch), 61])
    angles = np.arange(-30, 31)
    for i, j in enumerate(range(-30, 31)):
        trans[:, 2] = j
        dict_nat = {
            model.x_input: x_batch,
            model.y_input: y_batch,
            model.transform: trans,
            model.is_training: False
        }
        results[:, i], predictions[:, i] = sess.run(
            [model.correct_prediction, model.predictions], feed_dict=dict_nat)

    cor_class_means = np.mean(results, axis=1)
    # get images that fooled the model for all angles
    indices_all_fooled = np.where(cor_class_means == 0)[0]
    if len(indices_all_fooled) > 0:
        fooled_tuple = get_images(indices_all_fooled, config, data_iterator,
                                  model, sess, x_batch, y_batch, results,
                                  predictions, angles)
    else:
        fooled_tuple = None

    # get images that were correctly classified for all angles
    indices_all_correct = np.where(cor_class_means == 1)[0]
    if len(indices_all_correct) > 0:
        correct_tuple = get_images(indices_all_correct, config, data_iterator,
                                   model, sess, x_batch, y_batch, results,
                                   predictions, angles)
    else:
        correct_tuple = None

    # get images that were correctly classified for a large proportion of angles
    indices_some_correct = np.where(
        np.logical_and(cor_class_means >= .9, cor_class_means < 1))[0]
    if len(indices_some_correct) > 0:
        some_correct_tuple = get_images(indices_some_correct, config,
                                        data_iterator, model, sess, x_batch,
                                        y_batch, results, predictions, angles)
    else:
        some_correct_tuple = None

    return results, predictions, fooled_tuple, correct_tuple, some_correct_tuple
                        dataset, split, args.method, args.k, args.exp_id)))
            plt.close()

            if config.data.dataset_name == "cifar-10":
                bins = 10
                data_iterator = cifar10_input.CIFAR10Data(
                    config.data.data_path)
                label_names = data_iterator.label_names
            elif config.data.dataset_name == "cifar-100":
                bins = 100
                data_iterator = cifar100_input.CIFAR100Data(
                    config.data.data_path)
                label_names = data_iterator.label_names
            elif config.data.dataset_name == "svhn":
                bins = 10
                data_iterator = svhn_input.SVHNData(config.data.data_path)
                label_names = np.arange(bins)

            # images fooled
            if fool_tup is not None:
                imgs_fooled, y_fooled, pred_fooled, imgs_f_per, pred_f_per = fool_tup
                plot_images(imgs_fooled, y_fooled, pred_fooled, imgs_f_per,
                            pred_f_per, bins, label_names, save_plot_subdir,
                            'misclassified_all_angles', args.method,
                            args.exp_id, args.k)

            # images all correct
            if cor_tup is not None:
                imgs_correct, y_correct, pred_correct, imgs_c_per, pred_c_per = cor_tup
                plot_images(imgs_correct, y_correct, pred_correct, imgs_c_per,
                            pred_c_per, bins, label_names, save_plot_subdir,