def main(config_file):
    """
    :param config_file:
    :return:
    """
    tf.reset_default_graph()

    with open(config_file) as config_file:
        config = json.load(config_file)

    dset = Dataset(config['dset_name'], config['dset_config'])

    model_file = get_model_file(config)

    with tf.device(config['device']):
        model = construct_model(config['dset_name'])
        attack = construct_attack(model, config, dset)

    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        # Restore the checkpoint
        saver.restore(sess, model_file)

        # Iterate over the samples batch-by-batch
        num_eval_examples = config['num_eval_examples']
        eval_batch_size = config['eval_batch_size']
        num_batches = int(math.ceil(num_eval_examples / eval_batch_size))

        x_adv = []  # adv accumulator

        print('Iterating over {} batches'.format(num_batches))

        for ibatch in range(num_batches):
            bstart = ibatch * eval_batch_size
            bend = min(bstart + eval_batch_size, num_eval_examples)
            print('batch size: {}'.format(bend - bstart))

            x_batch, y_batch = dset.get_eval_data(bstart, bend)

            x_batch_adv = attack.perturb(x_batch, y_batch, sess)

            x_adv.append(x_batch_adv)

        print('Storing examples')
        path = data_path_join(config['store_adv_path'])
        x_adv = np.concatenate(x_adv, axis=0)
        np.save(path, x_adv)
        print('Examples stored in {}'.format(path))
Exemple #2
0
def main(config_file):
    """
    :param config_file:
    :return:
    """
    # deallocate memory if any
    tf.reset_default_graph()
    #free_gpus()

    # load configs.
    with open(config_file) as config_file:
        config = json.load(config_file)

    # load dataset
    dset = Dataset(config['dset_name'], config['dset_config'])

    with tf.device(config['device']):
        model = construct_model(config['dset_name'])

    x_adv = np.load(data_path_join(config['store_adv_path']))

    model_file = get_model_file(config)

    num_eval_examples = config['num_eval_examples']
    eval_batch_size = config['eval_batch_size']
    target_shape = (num_eval_examples, ) + get_dataset_shape(
        config['dset_name'])

    check_values(x_adv, dset.min_value, dset.max_value)
    check_shape(x_adv, target_shape)

    res = get_res(model_file,
                  x_adv,
                  config['attack_config']['epsilon'],
                  model,
                  dset,
                  num_eval_examples=num_eval_examples,
                  eval_batch_size=eval_batch_size)

    return res
def main(config_file):
    np.random.seed(1)
    tf.reset_default_graph()
    config = load_config(config_file)

    # dataset
    dset_name = config['dset_name']
    dset = Dataset(dset_name, config['dset_config'])
    dset_shape = get_dataset_shape(config['dset_name'])
    dim = np.prod(dset_shape)

    # model and computational graph
    model_file = get_model_file(config)
    with tf.device(config['device']):
            model = construct_model(dset_name)
            grad = tf.gradients(model.xent, model.x_input)[0]
            flat_grad = tf.reshape(grad, [NUM_SAMPLES, -1])
            flat_sgn = tf_nsign(flat_grad)
            norm_flat_grad = tf.div(flat_grad, tf.norm(flat_grad, axis=1, keepdims=True))

            sim_mat = tf.matmul(norm_flat_grad, norm_flat_grad, transpose_b=True)
            sims = tf.gather_nd(sim_mat, list(zip(*np.triu_indices(NUM_SAMPLES, k=1))))

            dist_mat = (dim - tf.matmul(flat_sgn, flat_sgn, transpose_b=True)) / 2.0
            dists = tf.gather_nd(dist_mat, list(zip(*np.triu_indices(NUM_SAMPLES, k=1))))

    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(
        data_path_join("hamming_dist_exp")
    )

    epsilon = config['attack_config']['epsilon']
    num_batches = int(math.ceil(NUM_EVAL_EXAMPLES / EVAL_BATCH_SIZE))

    for _epsilon in np.linspace(epsilon/10, epsilon, 3):
        # histogram recorder
        tf.summary.histogram(
            "{}_hamming_dist_xr_sgn_grad_eps_{}_{}_samples_{}_pts".format(dset_name, _epsilon, NUM_SAMPLES, NUM_EVAL_EXAMPLES),
            dists
        )

        tf.summary.histogram(
            "{}_cosine_sim_xr_grad_eps_{}_{}_samples_{}_pts".format(dset_name, _epsilon, NUM_SAMPLES, NUM_EVAL_EXAMPLES),
            sims
        )

        summs = tf.summary.merge_all()

        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            # Restore the checkpoint
            saver.restore(sess, model_file)
            # Iterate over the data points one-by-one

            print('Iterating over {} batches'.format(num_batches))

            for ibatch in range(num_batches):
                bstart = ibatch * EVAL_BATCH_SIZE
                bend = min(bstart + EVAL_BATCH_SIZE, NUM_EVAL_EXAMPLES)
                print('batch size: {}'.format(bend - bstart))

                x_batch, y_batch = dset.get_eval_data(bstart, bend)

                xr_batch = np.clip(
                    x_batch + np.random.uniform(-_epsilon, _epsilon, [NUM_SAMPLES, *x_batch.shape[1:]]),
                    dset.min_value,
                    dset.max_value
                )
                yr_batch = y_batch.repeat(NUM_SAMPLES)

                summ_val = sess.run(summs, feed_dict={
                    model.x_input: xr_batch,
                    model.y_input: yr_batch
                })

                writer.add_summary(summ_val, global_step=ibatch)
def main(config_file):
    np.random.seed(1)
    tf.reset_default_graph()

    config = load_config(config_file)

    dset_name = config['dset_name']
    dset = Dataset(dset_name, config['dset_config'])
    model_file = get_model_file(config)
    epsilon = config['attack_config']['epsilon']

    with tf.device(config['device']):
        model = construct_model(dset_name)
        abs_grad = tf.abs(tf.gradients(model.xent, model.x_input)[0])

    # histogram recorder
    # place holder for dx at x0 and x_rand
    dxo = tf.placeholder(tf.float32, shape=get_dataset_shape(dset_name))
    tf.summary.histogram("{}_part_deriv_mag_xo".format(dset_name), dxo)

    dxr = tf.placeholder(tf.float32, shape=get_dataset_shape(dset_name))
    tf.summary.histogram("{}_part_deriv_mag_xr".format(dset_name), dxr)

    writer = tf.summary.FileWriter(
        data_path_join("partial_derivative_exp")
    )
    summaries = tf.summary.merge_all()
    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        # Restore the checkpoint
        saver.restore(sess, model_file)
        # Iterate over the samples batch-by-batch
        eval_batch_size = config['eval_batch_size']
        num_batches = int(math.ceil(NUM_EVAL_EXAMPLES / eval_batch_size))

        #dxs = None  # grads accumulator

        print('Iterating over {} batches'.format(num_batches))

        for ibatch in range(num_batches):
            bstart = ibatch * eval_batch_size
            bend = min(bstart + eval_batch_size, NUM_EVAL_EXAMPLES)
            print('batch size: {}'.format(bend - bstart))

            x_batch, y_batch = dset.get_eval_data(bstart, bend)
            xr_batch = np.clip(x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape),
                               dset.min_value,
                               dset.max_value)
            #print(y_batch)
            dxo_batch = sess.run(abs_grad, feed_dict={
                model.x_input: x_batch,
                model.y_input: y_batch
            })

            dxr_batch = sess.run(abs_grad, feed_dict={
                model.x_input: xr_batch,
                model.y_input: y_batch
            })

            for i, step in enumerate(range(bstart, bend)):
                summ = sess.run(summaries, feed_dict={dxo: dxo_batch[i],
                                                      dxr: dxr_batch[i]})
                writer.add_summary(summ, global_step=step)
Exemple #5
0
    if os.path.exists(store_name):
        os.remove(store_name)

    for _cf in cfs:
        # for reproducibility
        np.random.seed(1)
        config_file = config_path_join(_cf)
        tf.reset_default_graph()

        with open(config_file) as config_file:
            config = json.load(config_file)

        dset = Dataset(config['dset_name'], config['dset_config'])
        dset_dim = np.prod(get_dataset_shape(config['dset_name']))

        model_file = get_model_file(config)
        with tf.device(config['device']):
            model = construct_model(config['dset_name'])
            flat_est_grad = tf.placeholder(tf.float32, shape=[None, dset_dim])
            flat_grad = tf.reshape(
                tf.gradients(model.xent, model.x_input)[0], [-1, dset_dim])
            norm_flat_grad = tf.maximum(
                tf.norm(flat_grad, axis=1, keepdims=True),
                np.finfo(np.float64).eps)
            norm_flat_est_grad = tf.maximum(
                tf.norm(flat_est_grad, axis=1, keepdims=True),
                np.finfo(np.float64).eps)
            cos_sim = tf.reduce_sum(tf.multiply(
                tf.div(flat_grad, norm_flat_grad),
                tf.div(flat_est_grad, norm_flat_est_grad)),
                                    axis=1,
def main():
    """
    main routine of the experiment, results are stored in data
    :return:
    """
    # results dir setup
    _dir = data_path_join('adv_cone_exp')
    create_dir(_dir)

    # for reproducibility
    np.random.seed(1)

    # init res data structure
    res = {
        'epsilon': EPS,
        'adv-cone-orders': K,
        'sign-hunter-step': 10 / 255.,
        'num_queries': 1000
    }

    # config files
    config_files = [
        'imagenet_sign_linf_config.json', 'imagenet_sign_linf_ens_config.json'
    ]

    # config load
    for _n, _cf in zip(['nat', 'adv'], config_files):
        tf.reset_default_graph()
        config_file = config_path_join(_cf)
        with open(config_file) as config_file:
            config = json.load(config_file)

        # dset load
        dset = Dataset(config['dset_name'], config['dset_config'])
        dset_dim = np.prod(get_dataset_shape(config['dset_name']))

        # model tf load/def
        model_file = get_model_file(config)
        with tf.device(config['device']):
            model = construct_model(config['dset_name'])
            flat_est_grad = tf.placeholder(tf.float32, shape=[None, dset_dim])
            flat_grad = tf.reshape(
                tf.gradients(model.xent, model.x_input)[0], [-1, dset_dim])
            norm_flat_grad = tf.maximum(
                tf.norm(flat_grad, axis=1, keepdims=True),
                np.finfo(np.float64).eps)
            norm_flat_est_grad = tf.maximum(
                tf.norm(flat_est_grad, axis=1, keepdims=True),
                np.finfo(np.float64).eps)
            cos_sim = tf.reduce_sum(tf.multiply(
                tf.div(flat_grad, norm_flat_grad),
                tf.div(flat_est_grad, norm_flat_est_grad)),
                                    axis=1,
                                    keepdims=False)
            ham_sim = tf.reduce_mean(tf.cast(tf.math.equal(
                tf_nsign(flat_grad), tf_nsign(flat_est_grad)),
                                             dtype=tf.float32),
                                     axis=1,
                                     keepdims=False)

        # set torch default device:
        if 'gpu' in config['device'] and ch.cuda.is_available():
            ch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            ch.set_default_tensor_type('torch.FloatTensor')
        saver = tf.train.Saver()

        # init res file: ijth entry of the matrix should
        # denote the probability that at least K[j] orthogonal vectors r_p such that
        # x + EPS[i] * r_p is misclassified
        res[_n] = {
            'grad-sign': np.zeros((len(EPS), len(K))),
            'sign-hunter': np.zeros((len(EPS), len(K)))
        }

        # main block of code
        attacker = SignAttack(**config['attack_config'],
                              lb=dset.min_value,
                              ub=dset.max_value)

        # to over-ride attacker's configuration
        attacker.max_loss_queries = res['num_queries']
        attacker.epsilon = res['sign-hunter-step']

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True,
                gpu_options=tf.GPUOptions(
                    allow_growth=True,
                    per_process_gpu_memory_fraction=0.9))) as sess:
            # Restore the checkpoint
            saver.restore(sess, model_file)

            # Iterate over the samples batch-by-batch
            num_eval_examples = int(
                NUM_DATA_PTS / 0.7
            )  # only correctly classified are considered (boost the total number sampled by the model accuracy)~
            eval_batch_size = 30  # config['eval_batch_size']
            num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
            # consider only correctly classified pts
            eff_num_eval_examples = 0
            print('Iterating over {} batches'.format(num_batches))

            for ibatch in range(num_batches):
                if eff_num_eval_examples >= NUM_DATA_PTS:
                    break
                bstart = ibatch * eval_batch_size
                bend = min(bstart + eval_batch_size, num_eval_examples)
                print('batch size: {}:({},{})'.format(bend - bstart, bstart,
                                                      bend))

                x_batch, y_batch = dset.get_eval_data(bstart, bend)

                # filter misclassified pts
                is_correct = sess.run(model.correct_prediction,
                                      feed_dict={
                                          model.x_input: x_batch,
                                          model.y_input: y_batch
                                      })

                # pass only correctly classified data till the NUM_DATA_PTS
                x_batch = x_batch[is_correct, :]
                y_batch = y_batch[is_correct]

                batch_size = min(NUM_DATA_PTS - eff_num_eval_examples,
                                 sum(is_correct))
                x_batch = x_batch[:batch_size, :]
                y_batch = y_batch[:batch_size]

                eff_num_eval_examples += batch_size

                def loss_fct(xs):
                    _l = sess.run(model.y_xent,
                                  feed_dict={
                                      model.x_input: xs,
                                      model.y_input: y_batch
                                  })
                    return _l

                def early_stop_crit_fct(xs):
                    _is_correct = sess.run(model.correct_prediction,
                                           feed_dict={
                                               model.x_input: xs,
                                               model.y_input: y_batch
                                           })
                    return np.logical_not(_is_correct)

                def metric_fct(xs, flat_est_grad_vals):
                    _cos_sim_val, _ham_sim_val = sess.run(
                        [cos_sim, ham_sim],
                        feed_dict={
                            model.x_input: xs,
                            model.y_input: y_batch,
                            flat_est_grad: flat_est_grad_vals
                        })
                    return _cos_sim_val, _ham_sim_val

                # handy function for performance tracking (or for cheat attack)
                def grad_fct(xs):
                    _grad_val = sess.run(flat_grad,
                                         feed_dict={
                                             model.x_input: xs,
                                             model.y_input: y_batch
                                         })
                    return _grad_val

                attacker.run(x_batch, loss_fct, early_stop_crit_fct,
                             metric_fct)

                # get attacker adv perturb estimate:
                g_batch = attacker.get_gs().cpu().numpy()
                # compute adv cone
                update_adv_cone_metrics(x_batch, g_batch, early_stop_crit_fct,
                                        res[_n]['sign-hunter'])

                # get gradient as adv perturb estimate:
                g_batch = sign(grad_fct(x_batch))
                # compute adversarial cones
                update_adv_cone_metrics(x_batch, g_batch, early_stop_crit_fct,
                                        res[_n]['grad-sign'])
                print(attacker.summary())
                print("Adv. Cone Stats for SH:")
                print(res[_n]['sign-hunter'])
                print("Adv. Cone Stats for GS:")
                print(res[_n]['grad-sign'])

        res[_n]['sign-hunter'] /= eff_num_eval_examples
        res[_n]['grad-sign'] /= eff_num_eval_examples

    p_fname = os.path.join(
        _dir, 'adv-cone_step-{}.p'.format(res['sign-hunter-step']))
    with open(p_fname, 'wb') as f:
        pickle.dump(res, f)

    plot_adv_cone_res(p_fname)