Esempio n. 1
0
 def metric_fct(xs, flat_est_grad_vals):
     _grad_val = (- 2 * (xs - x_opt)).reshape(xs.shape[0], -1)
     # _grad_val = -np.ones(xs.shape).reshape(xs.shape[0], -1)
     norm_grad = np.linalg.norm(_grad_val, axis=1)
     norm_est = np.linalg.norm(flat_est_grad_vals, axis=1)
     _cos_sim_val = np.sum(_grad_val * flat_est_grad_vals, axis=1) / (norm_grad * norm_est)
     _ham_sim_val = np.sum((sign(_grad_val) * sign(flat_est_grad_vals)) == 1., axis=1)
     return _cos_sim_val, _ham_sim_val
Esempio n. 2
0
 def _suggest(self, xs_t, loss_fct, metric_fct):
     _shape = list(xs_t.shape)
     dim = np.prod(_shape[1:])
     add_queries = 0
     if self.is_new_batch:
         self.xo_t = xs_t.clone()
         self.i = 0
     if self.i == 0:
         self.sgn_t = sign(ch.ones(_shape[0], dim))
         fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon, self.p)
         bxs_t = self.xo_t
         est_deriv = (loss_fct(fxs_t.cpu().numpy()) - loss_fct(bxs_t.cpu().numpy())) / self.epsilon
         self.best_est_deriv = est_deriv
         add_queries = 2
     self.sgn_t[:, self.i] *= -1
     fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon, self.p)
     bxs_t = self.xo_t
     est_deriv = (loss_fct(fxs_t.cpu().numpy()) - loss_fct(bxs_t.cpu().numpy())) / self.epsilon
     self.sgn_t[[i for i, val in enumerate(est_deriv < self.best_est_deriv) if val], self.i] *= -1.
     self.best_est_deriv = (est_deriv >= self.best_est_deriv) * est_deriv + (
             est_deriv < self.best_est_deriv) * self.best_est_deriv
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(self.xo_t.cpu().numpy(), self.sgn_t.cpu().numpy())
     # perform the step
     new_xs = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon, self.p)
     self.i += 1
     if self.i == dim:
         self.xo_t = new_xs.clone()
         self.i = 0
     return new_xs, np.ones(_shape[0]) + add_queries, cos_sims, ham_sims
 def _suggest(self, xs_t, loss_fct, metric_fct):
     _shape = list(xs_t.shape)
     dim = np.prod(_shape[1:])
     # additional queries at the start
     add_queries = 0
     if self.is_new_batch:
         self.xo_t = xs_t.clone()
         self.h = 0
         self.i = 0
     if self.i == 0 and self.h == 0:
         self.sgn_t = sign(ch.ones(_shape[0], dim))
         fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon,
                         self.p)
         bxs_t = self.xo_t  # - self.epsilon * self.sgn_t.view(_shape)
         est_deriv = (loss_fct(fxs_t.cpu().numpy()) -
                      loss_fct(bxs_t.cpu().numpy())) / self.epsilon
         self.best_est_deriv = est_deriv
         add_queries = 3  # because of bxs_t and the 2 evaluations in the i=0, h=0, case.
     chunk_len = np.ceil(dim / (2**self.h)).astype(int)
     #istart = self.i * chunk_len
     iend = min(dim, (self.i + 1) * chunk_len)
     randinds = np.random.choice(dim, chunk_len, replace=False)
     self.sgn_t[:, randinds] *= -1.
     fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon,
                     self.p)
     bxs_t = self.xo_t
     est_deriv = (loss_fct(fxs_t.cpu().numpy()) -
                  loss_fct(bxs_t.cpu().numpy())) / self.epsilon
     switch_flag = np.array([
         i for i, val in enumerate(est_deriv < self.best_est_deriv) if val
     ])
     self.sgn_t[switch_flag[:, None], randinds] *= -1.
     self.best_est_deriv = (
         est_deriv >= self.best_est_deriv) * est_deriv + (
             est_deriv < self.best_est_deriv) * self.best_est_deriv
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(self.xo_t.cpu().numpy(),
                                     self.sgn_t.cpu().numpy())
     # perform the step
     new_xs = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon,
                      self.p)
     # update i and h for next iteration
     self.i += 1
     if self.i == 2**self.h or iend == dim:
         self.h += 1
         self.i = 0
         # if h is exhausted, set xo_t to be xs_t
         if self.h == np.ceil(np.log2(dim)).astype(int) + 1:
             self.xo_t = xs_t.clone()
             self.h = 0
             print("new change")
     return new_xs, np.ones(_shape[0]) + add_queries, cos_sims, ham_sims
Esempio n. 4
0
    def _suggest(self, xs_t, loss_fct, metric_fct):
        _shape = list(xs_t.shape)
        dim = np.prod(_shape[1:])
        if self.is_new_batch:
            self.xo_t = xs_t.clone()
        sgn_t = sign(ch.rand(_shape[0], dim) - 0.5)

        # compute the cosine similarity
        cos_sims, ham_sims = metric_fct(self.xo_t.cpu().numpy(),
                                        sgn_t.cpu().numpy())
        # perform the step
        new_xs = lp_step(self.xo_t, sgn_t.view(_shape), self.epsilon, self.p)
        return new_xs, np.ones(_shape[0]), cos_sims, ham_sims
def main():
    """
    main routine of the experiment, results are stored in data
    :return:
    """
    # results dir setup
    _dir = data_path_join('adv_cone_exp')
    create_dir(_dir)

    # for reproducibility
    np.random.seed(1)

    # init res data structure
    res = {
        'epsilon': EPS,
        'adv-cone-orders': K,
        'sign-hunter-step': 10 / 255.,
        'num_queries': 1000
    }

    # config files
    config_files = [
        'imagenet_sign_linf_config.json', 'imagenet_sign_linf_ens_config.json'
    ]

    # config load
    for _n, _cf in zip(['nat', 'adv'], config_files):
        tf.reset_default_graph()
        config_file = config_path_join(_cf)
        with open(config_file) as config_file:
            config = json.load(config_file)

        # dset load
        dset = Dataset(config['dset_name'], config['dset_config'])
        dset_dim = np.prod(get_dataset_shape(config['dset_name']))

        # model tf load/def
        model_file = get_model_file(config)
        with tf.device(config['device']):
            model = construct_model(config['dset_name'])
            flat_est_grad = tf.placeholder(tf.float32, shape=[None, dset_dim])
            flat_grad = tf.reshape(
                tf.gradients(model.xent, model.x_input)[0], [-1, dset_dim])
            norm_flat_grad = tf.maximum(
                tf.norm(flat_grad, axis=1, keepdims=True),
                np.finfo(np.float64).eps)
            norm_flat_est_grad = tf.maximum(
                tf.norm(flat_est_grad, axis=1, keepdims=True),
                np.finfo(np.float64).eps)
            cos_sim = tf.reduce_sum(tf.multiply(
                tf.div(flat_grad, norm_flat_grad),
                tf.div(flat_est_grad, norm_flat_est_grad)),
                                    axis=1,
                                    keepdims=False)
            ham_sim = tf.reduce_mean(tf.cast(tf.math.equal(
                tf_nsign(flat_grad), tf_nsign(flat_est_grad)),
                                             dtype=tf.float32),
                                     axis=1,
                                     keepdims=False)

        # set torch default device:
        if 'gpu' in config['device'] and ch.cuda.is_available():
            ch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            ch.set_default_tensor_type('torch.FloatTensor')
        saver = tf.train.Saver()

        # init res file: ijth entry of the matrix should
        # denote the probability that at least K[j] orthogonal vectors r_p such that
        # x + EPS[i] * r_p is misclassified
        res[_n] = {
            'grad-sign': np.zeros((len(EPS), len(K))),
            'sign-hunter': np.zeros((len(EPS), len(K)))
        }

        # main block of code
        attacker = SignAttack(**config['attack_config'],
                              lb=dset.min_value,
                              ub=dset.max_value)

        # to over-ride attacker's configuration
        attacker.max_loss_queries = res['num_queries']
        attacker.epsilon = res['sign-hunter-step']

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True,
                gpu_options=tf.GPUOptions(
                    allow_growth=True,
                    per_process_gpu_memory_fraction=0.9))) as sess:
            # Restore the checkpoint
            saver.restore(sess, model_file)

            # Iterate over the samples batch-by-batch
            num_eval_examples = int(
                NUM_DATA_PTS / 0.7
            )  # only correctly classified are considered (boost the total number sampled by the model accuracy)~
            eval_batch_size = 30  # config['eval_batch_size']
            num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
            # consider only correctly classified pts
            eff_num_eval_examples = 0
            print('Iterating over {} batches'.format(num_batches))

            for ibatch in range(num_batches):
                if eff_num_eval_examples >= NUM_DATA_PTS:
                    break
                bstart = ibatch * eval_batch_size
                bend = min(bstart + eval_batch_size, num_eval_examples)
                print('batch size: {}:({},{})'.format(bend - bstart, bstart,
                                                      bend))

                x_batch, y_batch = dset.get_eval_data(bstart, bend)

                # filter misclassified pts
                is_correct = sess.run(model.correct_prediction,
                                      feed_dict={
                                          model.x_input: x_batch,
                                          model.y_input: y_batch
                                      })

                # pass only correctly classified data till the NUM_DATA_PTS
                x_batch = x_batch[is_correct, :]
                y_batch = y_batch[is_correct]

                batch_size = min(NUM_DATA_PTS - eff_num_eval_examples,
                                 sum(is_correct))
                x_batch = x_batch[:batch_size, :]
                y_batch = y_batch[:batch_size]

                eff_num_eval_examples += batch_size

                def loss_fct(xs):
                    _l = sess.run(model.y_xent,
                                  feed_dict={
                                      model.x_input: xs,
                                      model.y_input: y_batch
                                  })
                    return _l

                def early_stop_crit_fct(xs):
                    _is_correct = sess.run(model.correct_prediction,
                                           feed_dict={
                                               model.x_input: xs,
                                               model.y_input: y_batch
                                           })
                    return np.logical_not(_is_correct)

                def metric_fct(xs, flat_est_grad_vals):
                    _cos_sim_val, _ham_sim_val = sess.run(
                        [cos_sim, ham_sim],
                        feed_dict={
                            model.x_input: xs,
                            model.y_input: y_batch,
                            flat_est_grad: flat_est_grad_vals
                        })
                    return _cos_sim_val, _ham_sim_val

                # handy function for performance tracking (or for cheat attack)
                def grad_fct(xs):
                    _grad_val = sess.run(flat_grad,
                                         feed_dict={
                                             model.x_input: xs,
                                             model.y_input: y_batch
                                         })
                    return _grad_val

                attacker.run(x_batch, loss_fct, early_stop_crit_fct,
                             metric_fct)

                # get attacker adv perturb estimate:
                g_batch = attacker.get_gs().cpu().numpy()
                # compute adv cone
                update_adv_cone_metrics(x_batch, g_batch, early_stop_crit_fct,
                                        res[_n]['sign-hunter'])

                # get gradient as adv perturb estimate:
                g_batch = sign(grad_fct(x_batch))
                # compute adversarial cones
                update_adv_cone_metrics(x_batch, g_batch, early_stop_crit_fct,
                                        res[_n]['grad-sign'])
                print(attacker.summary())
                print("Adv. Cone Stats for SH:")
                print(res[_n]['sign-hunter'])
                print("Adv. Cone Stats for GS:")
                print(res[_n]['grad-sign'])

        res[_n]['sign-hunter'] /= eff_num_eval_examples
        res[_n]['grad-sign'] /= eff_num_eval_examples

    p_fname = os.path.join(
        _dir, 'adv-cone_step-{}.p'.format(res['sign-hunter-step']))
    with open(p_fname, 'wb') as f:
        pickle.dump(res, f)

    plot_adv_cone_res(p_fname)