Exemple #1
0
    def __init__(self, name, config):
        """
        :param name: dataset name
        :param config: dictionary whose keys are dependent on the dataset created
         (see source code below)
        """
        assert name in ['mnist', 'cifar10', 'cifar10aug',
                        'imagenet'], "Invalid dataset"
        self.name = name

        if self.name == 'cifar10':
            data_path = data_path_join('cifar10_data')
            self.data = cifar10_input.CIFAR10Data(data_path)
        elif self.name == 'cifar10aug':
            data_path = data_path_join('cifar10_data')
            raw_cifar = cifar10_input.CIFAR10Data(data_path)
            sess = config['sess']
            model = config['model']
            self.data = cifar10_input.AugmentedCIFAR10Data(
                raw_cifar, sess, model)
        elif self.name == 'mnist':
            self.data = input_data.read_data_sets(data_path_join('mnist_data'),
                                                  one_hot=False)
        elif self.name == 'imagenet':
            self.data = ImagenetValidData(data_dir=config['data_dir'])
Exemple #2
0
def dm(urls, data_dir):

    for url in urls:
        fname = data_path_join(
            data_dir,
            url.split('/')[-1].split('?')[0])  # get the name of the file

        # model download
        if not os.path.exists(fname):
            print('Downloading models')
            if sys.version_info >= (3, ):
                import urllib.request
                urllib.request.urlretrieve(url, fname)
            else:
                import urllib
                urllib.urlretrieve(url, fname)

        # computing model hash
        sha256 = hashlib.sha256()
        with open(fname, 'rb') as f:
            data = f.read()
            sha256.update(data)
        print('SHA256 hash: {}'.format(sha256.hexdigest()))

        # extracting model
        print('Extracting model')
        if fname.endswith('.tar.gz'):
            opener = tarfile.open(fname, 'r:gz')
        else:
            opener = zipfile.ZipFile(fname, 'r')

        with opener as model_zip:
            model_zip.extractall(data_path_join(data_dir))
            print('Extracted model in {}'.format(data_path_join(data_dir)))
Exemple #3
0
def plot_keep_k_sign_exp(files):
    pgf_setup()
    create_dir(os.path.join(get_data_dir(), 'keep_k_res'))
    for i, file in enumerate(files):
        # load data
        dset = os.path.split(file)[1].split('_')[0]
        p = os.path.split(file)[1].split('_')[1]
        with open(file, 'r') as f:
            res = json.load(f)
        # process data
        step_size = 2
        xticks = [(_ix, bf(r"{0:.0f}%".format(_x * 100)))
                  for _ix, _x in enumerate(res['retain_p'])][::step_size]
        res = res[dset]
        ys_rand = [1 - _y for _y in res['random']['adv_acc']]
        ys_top = [1 - _y for _y in res['top']['adv_acc']]
        plt.clf()
        ax = plt.subplot()
        ax.plot(ys_rand, label=bf('random-k'), linestyle='--', marker='.')
        ax.plot(ys_top, label=bf('top-k'), linestyle='--', marker='*')
        if i == 0: ax.legend()  # show legend for the first
        plt.xticks(*list(zip(*xticks)))
        ax.set_ylabel(bf('misclassification rate'))
        ax.set_xlabel(bf('k percent of {} coordinates'.format(dset.upper())))
        plt.tight_layout()
        plt.savefig(
            data_path_join('keep_k_res',
                           'keep_k_sign_{}_{}.pdf'.format(dset, p)))
def main(config_file):
    """
    :param config_file:
    :return:
    """
    tf.reset_default_graph()

    with open(config_file) as config_file:
        config = json.load(config_file)

    dset = Dataset(config['dset_name'], config['dset_config'])

    model_file = get_model_file(config)

    with tf.device(config['device']):
        model = construct_model(config['dset_name'])
        attack = construct_attack(model, config, dset)

    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        # Restore the checkpoint
        saver.restore(sess, model_file)

        # Iterate over the samples batch-by-batch
        num_eval_examples = config['num_eval_examples']
        eval_batch_size = config['eval_batch_size']
        num_batches = int(math.ceil(num_eval_examples / eval_batch_size))

        x_adv = []  # adv accumulator

        print('Iterating over {} batches'.format(num_batches))

        for ibatch in range(num_batches):
            bstart = ibatch * eval_batch_size
            bend = min(bstart + eval_batch_size, num_eval_examples)
            print('batch size: {}'.format(bend - bstart))

            x_batch, y_batch = dset.get_eval_data(bstart, bend)

            x_batch_adv = attack.perturb(x_batch, y_batch, sess)

            x_adv.append(x_batch_adv)

        print('Storing examples')
        path = data_path_join(config['store_adv_path'])
        x_adv = np.concatenate(x_adv, axis=0)
        np.save(path, x_adv)
        print('Examples stored in {}'.format(path))
Exemple #5
0
def main(config_file):
    """
    :param config_file:
    :return:
    """
    # deallocate memory if any
    tf.reset_default_graph()
    #free_gpus()

    # load configs.
    with open(config_file) as config_file:
        config = json.load(config_file)

    # load dataset
    dset = Dataset(config['dset_name'], config['dset_config'])

    with tf.device(config['device']):
        model = construct_model(config['dset_name'])

    x_adv = np.load(data_path_join(config['store_adv_path']))

    model_file = get_model_file(config)

    num_eval_examples = config['num_eval_examples']
    eval_batch_size = config['eval_batch_size']
    target_shape = (num_eval_examples, ) + get_dataset_shape(
        config['dset_name'])

    check_values(x_adv, dset.min_value, dset.max_value)
    check_shape(x_adv, target_shape)

    res = get_res(model_file,
                  x_adv,
                  config['attack_config']['epsilon'],
                  model,
                  dset,
                  num_eval_examples=num_eval_examples,
                  eval_batch_size=eval_batch_size)

    return res
def main(config_file):
    np.random.seed(1)
    tf.reset_default_graph()
    config = load_config(config_file)

    # dataset
    dset_name = config['dset_name']
    dset = Dataset(dset_name, config['dset_config'])
    dset_shape = get_dataset_shape(config['dset_name'])
    dim = np.prod(dset_shape)

    # model and computational graph
    model_file = get_model_file(config)
    with tf.device(config['device']):
            model = construct_model(dset_name)
            grad = tf.gradients(model.xent, model.x_input)[0]
            flat_grad = tf.reshape(grad, [NUM_SAMPLES, -1])
            flat_sgn = tf_nsign(flat_grad)
            norm_flat_grad = tf.div(flat_grad, tf.norm(flat_grad, axis=1, keepdims=True))

            sim_mat = tf.matmul(norm_flat_grad, norm_flat_grad, transpose_b=True)
            sims = tf.gather_nd(sim_mat, list(zip(*np.triu_indices(NUM_SAMPLES, k=1))))

            dist_mat = (dim - tf.matmul(flat_sgn, flat_sgn, transpose_b=True)) / 2.0
            dists = tf.gather_nd(dist_mat, list(zip(*np.triu_indices(NUM_SAMPLES, k=1))))

    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(
        data_path_join("hamming_dist_exp")
    )

    epsilon = config['attack_config']['epsilon']
    num_batches = int(math.ceil(NUM_EVAL_EXAMPLES / EVAL_BATCH_SIZE))

    for _epsilon in np.linspace(epsilon/10, epsilon, 3):
        # histogram recorder
        tf.summary.histogram(
            "{}_hamming_dist_xr_sgn_grad_eps_{}_{}_samples_{}_pts".format(dset_name, _epsilon, NUM_SAMPLES, NUM_EVAL_EXAMPLES),
            dists
        )

        tf.summary.histogram(
            "{}_cosine_sim_xr_grad_eps_{}_{}_samples_{}_pts".format(dset_name, _epsilon, NUM_SAMPLES, NUM_EVAL_EXAMPLES),
            sims
        )

        summs = tf.summary.merge_all()

        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            # Restore the checkpoint
            saver.restore(sess, model_file)
            # Iterate over the data points one-by-one

            print('Iterating over {} batches'.format(num_batches))

            for ibatch in range(num_batches):
                bstart = ibatch * EVAL_BATCH_SIZE
                bend = min(bstart + EVAL_BATCH_SIZE, NUM_EVAL_EXAMPLES)
                print('batch size: {}'.format(bend - bstart))

                x_batch, y_batch = dset.get_eval_data(bstart, bend)

                xr_batch = np.clip(
                    x_batch + np.random.uniform(-_epsilon, _epsilon, [NUM_SAMPLES, *x_batch.shape[1:]]),
                    dset.min_value,
                    dset.max_value
                )
                yr_batch = y_batch.repeat(NUM_SAMPLES)

                summ_val = sess.run(summs, feed_dict={
                    model.x_input: xr_batch,
                    model.y_input: yr_batch
                })

                writer.add_summary(summ_val, global_step=ibatch)
Exemple #7
0
        # extracting model
        print('Extracting model')
        if fname.endswith('.tar.gz'):
            opener = tarfile.open(fname, 'r:gz')
        else:
            opener = zipfile.ZipFile(fname, 'r')

        with opener as model_zip:
            model_zip.extractall(data_path_join(data_dir))
            print('Extracted model in {}'.format(data_path_join(data_dir)))


# cifar models
data_dir = 'cifar10_models'
os.makedirs(data_path_join(data_dir), exist_ok=True)
urls = [
    'https://www.dropbox.com/s/cgzd5odqoojvxzk/natural.zip?dl=1',
    'https://www.dropbox.com/s/g4b6ntrp8zrudbz/adv_trained.zip?dl=1',
    'https://www.dropbox.com/s/ywc0hg8lr5ba8zd/secret.zip?dl=1'
]
dm(urls, data_dir)

# mnist models
data_dir = 'mnist_models'
os.makedirs(data_path_join(data_dir), exist_ok=True)
urls = [
    'https://github.com/MadryLab/mnist_challenge_models/raw/master/natural.zip',
    'https://github.com/MadryLab/mnist_challenge_models/raw/master/secret.zip',
    'https://github.com/MadryLab/mnist_challenge_models/raw/master/adv_trained.zip'
]
def main(config_file):
    np.random.seed(1)
    tf.reset_default_graph()

    config = load_config(config_file)

    dset_name = config['dset_name']
    dset = Dataset(dset_name, config['dset_config'])
    model_file = get_model_file(config)
    epsilon = config['attack_config']['epsilon']

    with tf.device(config['device']):
        model = construct_model(dset_name)
        abs_grad = tf.abs(tf.gradients(model.xent, model.x_input)[0])

    # histogram recorder
    # place holder for dx at x0 and x_rand
    dxo = tf.placeholder(tf.float32, shape=get_dataset_shape(dset_name))
    tf.summary.histogram("{}_part_deriv_mag_xo".format(dset_name), dxo)

    dxr = tf.placeholder(tf.float32, shape=get_dataset_shape(dset_name))
    tf.summary.histogram("{}_part_deriv_mag_xr".format(dset_name), dxr)

    writer = tf.summary.FileWriter(
        data_path_join("partial_derivative_exp")
    )
    summaries = tf.summary.merge_all()
    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        # Restore the checkpoint
        saver.restore(sess, model_file)
        # Iterate over the samples batch-by-batch
        eval_batch_size = config['eval_batch_size']
        num_batches = int(math.ceil(NUM_EVAL_EXAMPLES / eval_batch_size))

        #dxs = None  # grads accumulator

        print('Iterating over {} batches'.format(num_batches))

        for ibatch in range(num_batches):
            bstart = ibatch * eval_batch_size
            bend = min(bstart + eval_batch_size, NUM_EVAL_EXAMPLES)
            print('batch size: {}'.format(bend - bstart))

            x_batch, y_batch = dset.get_eval_data(bstart, bend)
            xr_batch = np.clip(x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape),
                               dset.min_value,
                               dset.max_value)
            #print(y_batch)
            dxo_batch = sess.run(abs_grad, feed_dict={
                model.x_input: x_batch,
                model.y_input: y_batch
            })

            dxr_batch = sess.run(abs_grad, feed_dict={
                model.x_input: xr_batch,
                model.y_input: y_batch
            })

            for i, step in enumerate(range(bstart, bend)):
                summ = sess.run(summaries, feed_dict={dxo: dxo_batch[i],
                                                      dxr: dxr_batch[i]})
                writer.add_summary(summ, global_step=step)
Exemple #9
0
def get_res(model_file,
            x_adv,
            epsilon,
            model,
            dset,
            num_eval_examples=10000,
            eval_batch_size=64,
            is_ignore_misclassified=True):

    saver = tf.train.Saver()

    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    total_corr = 0
    num_corr_nat = 0 if is_ignore_misclassified else num_eval_examples

    x_nat, _ = dset.get_eval_data(0, num_eval_examples)
    l_inf = np.amax(np.abs(x_nat - x_adv))

    if l_inf > epsilon + 0.0001:
        print('maximum perturbation found: {}'.format(l_inf))
        print('maximum perturbation allowed: {}'.format(epsilon))
        return

    y_pred = []  # label accumulator

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        # Restore the checkpoint
        saver.restore(sess, model_file)

        # Iterate over the samples batch-by-batch
        for ibatch in range(num_batches):
            bstart = ibatch * eval_batch_size
            bend = min(bstart + eval_batch_size, num_eval_examples)

            x_batch = x_adv[bstart:bend, :]
            _, y_batch = dset.get_eval_data(bstart, bend)

            cur_corr, y_pred_batch = sess.run(
                [model.num_correct, model.y_pred],
                feed_dict={
                    model.x_input: x_batch,
                    model.y_input: y_batch
                })

            # measure accuracy only for those whose x_nat is not misclassified.
            x_batch = x_nat[bstart:bend, :]

            if is_ignore_misclassified:
                cur_corr_nat, y_pred_batch_nat = sess.run(
                    [model.num_correct, model.y_pred],
                    feed_dict={
                        model.x_input: x_batch,
                        model.y_input: y_batch
                    })

                total_corr += sum((y_batch == y_pred_batch_nat)
                                  & (y_batch == y_pred_batch))
                num_corr_nat += cur_corr_nat
            else:
                total_corr += cur_corr

            y_pred.append(y_pred_batch)

        accuracy = total_corr / num_corr_nat
        accuracy_nat = num_corr_nat / num_eval_examples

        print('Accuracy (x_adv): {:.2f}% for {} pts'.format(
            100.0 * accuracy, num_corr_nat))
        print('Accuracy (x_nat): {:.2f}% for {} pts'.format(
            100.0 * accuracy_nat, num_eval_examples))
        y_pred = np.concatenate(y_pred, axis=0)
        np.save(data_path_join('pred.npy'), y_pred)
        print('Output saved at {}'.format(data_path_join('pred.npy')))

        return accuracy, accuracy_nat
Exemple #10
0
        # 'imagenet_sign_linf_config.json',
        # 'imagenet_bandit_linf_config.json',
        # 'imagenet_zosignsgd_l2_config.json',
        # 'imagenet_nes_l2_config.json',
        # 'imagenet_sign_l2_config.json',
        # 'imagenet_bandit_l2_config.json'
        # 'mnist_rand_linf_config.json',
        # 'cifar10_rand_linf_config.json',
        # 'imagenet_rand_linf_config.json',
        # 'mnist_rand_l2_config.json',
        # 'cifar10_rand_l2_config.json',
        # 'imagenet_rand_l2_config.json'
    ]

    # create/ allocate the result json for tabulation
    data_dir = data_path_join('blackbox_attack_exp')
    create_dir(data_dir)
    res = {}

    # create a store for logging / if the store is there remove it
    store_name = os.path.join(data_dir, '{}_tbl.h5'.format(exp_id))
    offset = 0
    # rewrite all the results alternatively one could make use of `offset` to append to the h5 file above.
    if os.path.exists(store_name):
        os.remove(store_name)

    for _cf in cfs:
        # for reproducibility
        np.random.seed(1)
        config_file = config_path_join(_cf)
        tf.reset_default_graph()
from utils.plt_fcts import plot_keep_k_sign_exp

# EXPERIMENT GLOBAL PARAMETERS
np.random.seed(1)
config_files = ['cifar10_topk_linf_config.json',
                'cifar10_topk_l2_config.json',
                'mnist_topk_linf_config.json',
                'mnist_topk_l2_config.json',
                'imagenet_topk_linf_config.json',
                'imagenet_topk_l2_config.json'
                ]

# for efficiency batch size are customized for each dataset
batch_sz = [100, 100, 200, 200, 50, 50]

_dir = data_path_join('keep_k_res')
create_dir(_dir)

num_eval_examples = 1000

for idx, _cf in enumerate(config_files):
    eval_batch_size = batch_sz[idx]
    res = {}
    print(_cf)
    config_file = config_path_join(_cf)
    dset = _cf.split('_')[0]
    p = _cf.split('_')[2]
    res[dset] = {}
    res['retain_p'] = list(np.linspace(0, 1, 11))
    with open(config_file, 'r') as f:
        config_json = json.load(f, object_pairs_hook=OrderedDict)
Exemple #12
0
        '../../data/blackbox_attack_exp/ens_imagenet_tbl_1.h5',
        '../../data/blackbox_attack_exp/ens_imagenet_tbl_2.h5',
        '../../data/blackbox_attack_exp/ens_imagenet_tbl_3.h5',
    ])

    # # plot all
    plt_from_h5tbl([
        '../../data/blackbox_attack_exp/mnist_sota_tbl.h5',
        '../../data/blackbox_attack_exp/mnist_sign_tbl.h5',
        '../../data/blackbox_attack_exp/mnist_cifar_rand_tbl.h5',
        '../../data/blackbox_attack_exp/cifar10_linf_sota_tbl.h5',
        '../../data/blackbox_attack_exp/cifar10_linf_sign_tbl.h5',
        '../../data/blackbox_attack_exp/cifar10_l2_sota_tbl.h5',
        '../../data/blackbox_attack_exp/cifar10_l2_sign_tbl.h5',
        '../../data/blackbox_attack_exp/imagenet_linf_sota_tbl.h5',
        '../../data/blackbox_attack_exp/imagenet_linf_sign_tbl.h5',
        '../../data/blackbox_attack_exp/imagenet_l2_sota_tbl.h5',
        '../../data/blackbox_attack_exp/imagenet_l2_sign_tbl.h5',
        '../../data/blackbox_attack_exp/imagenet_rand_tbl.h5'
    ])

    # # plot adv cone plots
    adv_cone_files = [
        'adv-cone_step-10_query-1000.p', 'adv-cone_step-10_query-500.p',
        'adv-cone_step-16_query-1000.p', 'adv-cone_step-16_query-500.p',
        'adv-cone_step-4_query-1000.p', 'adv-cone_step-4_query-500.p'
    ]
    for _i, _f in enumerate(adv_cone_files):
        plot_adv_cone_res(data_path_join('adv_cone_exp', _f),
                          is_legend=_i == 4)
def main():
    """
    main routine of the experiment, results are stored in data
    :return:
    """
    # results dir setup
    _dir = data_path_join('adv_cone_exp')
    create_dir(_dir)

    # for reproducibility
    np.random.seed(1)

    # init res data structure
    res = {
        'epsilon': EPS,
        'adv-cone-orders': K,
        'sign-hunter-step': 10 / 255.,
        'num_queries': 1000
    }

    # config files
    config_files = [
        'imagenet_sign_linf_config.json', 'imagenet_sign_linf_ens_config.json'
    ]

    # config load
    for _n, _cf in zip(['nat', 'adv'], config_files):
        tf.reset_default_graph()
        config_file = config_path_join(_cf)
        with open(config_file) as config_file:
            config = json.load(config_file)

        # dset load
        dset = Dataset(config['dset_name'], config['dset_config'])
        dset_dim = np.prod(get_dataset_shape(config['dset_name']))

        # model tf load/def
        model_file = get_model_file(config)
        with tf.device(config['device']):
            model = construct_model(config['dset_name'])
            flat_est_grad = tf.placeholder(tf.float32, shape=[None, dset_dim])
            flat_grad = tf.reshape(
                tf.gradients(model.xent, model.x_input)[0], [-1, dset_dim])
            norm_flat_grad = tf.maximum(
                tf.norm(flat_grad, axis=1, keepdims=True),
                np.finfo(np.float64).eps)
            norm_flat_est_grad = tf.maximum(
                tf.norm(flat_est_grad, axis=1, keepdims=True),
                np.finfo(np.float64).eps)
            cos_sim = tf.reduce_sum(tf.multiply(
                tf.div(flat_grad, norm_flat_grad),
                tf.div(flat_est_grad, norm_flat_est_grad)),
                                    axis=1,
                                    keepdims=False)
            ham_sim = tf.reduce_mean(tf.cast(tf.math.equal(
                tf_nsign(flat_grad), tf_nsign(flat_est_grad)),
                                             dtype=tf.float32),
                                     axis=1,
                                     keepdims=False)

        # set torch default device:
        if 'gpu' in config['device'] and ch.cuda.is_available():
            ch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            ch.set_default_tensor_type('torch.FloatTensor')
        saver = tf.train.Saver()

        # init res file: ijth entry of the matrix should
        # denote the probability that at least K[j] orthogonal vectors r_p such that
        # x + EPS[i] * r_p is misclassified
        res[_n] = {
            'grad-sign': np.zeros((len(EPS), len(K))),
            'sign-hunter': np.zeros((len(EPS), len(K)))
        }

        # main block of code
        attacker = SignAttack(**config['attack_config'],
                              lb=dset.min_value,
                              ub=dset.max_value)

        # to over-ride attacker's configuration
        attacker.max_loss_queries = res['num_queries']
        attacker.epsilon = res['sign-hunter-step']

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True,
                gpu_options=tf.GPUOptions(
                    allow_growth=True,
                    per_process_gpu_memory_fraction=0.9))) as sess:
            # Restore the checkpoint
            saver.restore(sess, model_file)

            # Iterate over the samples batch-by-batch
            num_eval_examples = int(
                NUM_DATA_PTS / 0.7
            )  # only correctly classified are considered (boost the total number sampled by the model accuracy)~
            eval_batch_size = 30  # config['eval_batch_size']
            num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
            # consider only correctly classified pts
            eff_num_eval_examples = 0
            print('Iterating over {} batches'.format(num_batches))

            for ibatch in range(num_batches):
                if eff_num_eval_examples >= NUM_DATA_PTS:
                    break
                bstart = ibatch * eval_batch_size
                bend = min(bstart + eval_batch_size, num_eval_examples)
                print('batch size: {}:({},{})'.format(bend - bstart, bstart,
                                                      bend))

                x_batch, y_batch = dset.get_eval_data(bstart, bend)

                # filter misclassified pts
                is_correct = sess.run(model.correct_prediction,
                                      feed_dict={
                                          model.x_input: x_batch,
                                          model.y_input: y_batch
                                      })

                # pass only correctly classified data till the NUM_DATA_PTS
                x_batch = x_batch[is_correct, :]
                y_batch = y_batch[is_correct]

                batch_size = min(NUM_DATA_PTS - eff_num_eval_examples,
                                 sum(is_correct))
                x_batch = x_batch[:batch_size, :]
                y_batch = y_batch[:batch_size]

                eff_num_eval_examples += batch_size

                def loss_fct(xs):
                    _l = sess.run(model.y_xent,
                                  feed_dict={
                                      model.x_input: xs,
                                      model.y_input: y_batch
                                  })
                    return _l

                def early_stop_crit_fct(xs):
                    _is_correct = sess.run(model.correct_prediction,
                                           feed_dict={
                                               model.x_input: xs,
                                               model.y_input: y_batch
                                           })
                    return np.logical_not(_is_correct)

                def metric_fct(xs, flat_est_grad_vals):
                    _cos_sim_val, _ham_sim_val = sess.run(
                        [cos_sim, ham_sim],
                        feed_dict={
                            model.x_input: xs,
                            model.y_input: y_batch,
                            flat_est_grad: flat_est_grad_vals
                        })
                    return _cos_sim_val, _ham_sim_val

                # handy function for performance tracking (or for cheat attack)
                def grad_fct(xs):
                    _grad_val = sess.run(flat_grad,
                                         feed_dict={
                                             model.x_input: xs,
                                             model.y_input: y_batch
                                         })
                    return _grad_val

                attacker.run(x_batch, loss_fct, early_stop_crit_fct,
                             metric_fct)

                # get attacker adv perturb estimate:
                g_batch = attacker.get_gs().cpu().numpy()
                # compute adv cone
                update_adv_cone_metrics(x_batch, g_batch, early_stop_crit_fct,
                                        res[_n]['sign-hunter'])

                # get gradient as adv perturb estimate:
                g_batch = sign(grad_fct(x_batch))
                # compute adversarial cones
                update_adv_cone_metrics(x_batch, g_batch, early_stop_crit_fct,
                                        res[_n]['grad-sign'])
                print(attacker.summary())
                print("Adv. Cone Stats for SH:")
                print(res[_n]['sign-hunter'])
                print("Adv. Cone Stats for GS:")
                print(res[_n]['grad-sign'])

        res[_n]['sign-hunter'] /= eff_num_eval_examples
        res[_n]['grad-sign'] /= eff_num_eval_examples

    p_fname = os.path.join(
        _dir, 'adv-cone_step-{}.p'.format(res['sign-hunter-step']))
    with open(p_fname, 'wb') as f:
        pickle.dump(res, f)

    plot_adv_cone_res(p_fname)
Exemple #14
0
"""
Download cifar10 data
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import sys
import tarfile
from utils.helper_fcts import data_path_join

data_dir = data_path_join('cifar10_data')
os.makedirs(data_dir, exist_ok=True)
url = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
fname = data_path_join('cifar10_data', 'cifar-10-python.tar.gz')

if not os.path.exists(fname):
    print('Downloading cifar10')
    if sys.version_info >= (3, ):
        import urllib.request

        urllib.request.urlretrieve(url, fname)
    else:
        import urllib
        urllib.urlretrieve(url, fname)

# extracting data
tar = tarfile.open(fname, "r:gz")
tar.extractall(data_dir)
tar.close()