Ejemplo n.º 1
0
class BIMAttack(AdversarialAttack):
    def __init__(self,
                 model,
                 step_size_iter=0.05,
                 max_perturbation=0.3,
                 n_iterations=10,
                 targeted=False,
                 norm_order=np.inf,
                 rand_init=None,
                 rand_minmax=0.3,
                 clip_min=None,
                 clip_max=None,
                 sanity_checks=True):
        super().__init__(model=model, clip_min=clip_min, clip_max=clip_max)
        self._targeted = targeted
        self._step_size_iter = step_size_iter
        self._max_perturbation = max_perturbation
        self._n_iterations = n_iterations
        self._norm_order = norm_order
        self._rand_init = rand_init
        self._rand_minmax = rand_minmax
        self._sanity_checks = sanity_checks

        with self.graph.as_default():
            self._method = BasicIterativeMethod(
                self._model,
                sess=self.session,
                eps=self._max_perturbation,
                eps_iter=self._step_size_iter,
                nb_iter=self._n_iterations,
                ord=self._norm_order,
                clip_min=self._clip_min,
                clip_max=self._clip_max,
                rand_init=self._rand_init,
                sanity_checks=self._sanity_checks)

    def attack_method(self, labels):
        if labels is not None:
            if self._targeted:
                return self._method.generate(x=self._x_clean,
                                             y_target=labels,
                                             rand_minmax=self._rand_minmax)
            else:
                return self._method.generate(x=self._x_clean,
                                             y=labels,
                                             rand_minmax=self._rand_minmax)
        return self._method.generate(x=self._x_clean,
                                     rand_minmax=self._rand_minmax)
Ejemplo n.º 2
0
def backtracking(sess, x, y, model, x_test, y_test, params, batch_size=128):
    tf.set_random_seed(1822)
    set_log_level(logging.DEBUG)
    from cleverhans.attacks import BasicIterativeMethod
    method = BasicIterativeMethod(model, sess=sess)
    adv_x = method.generate(x, **params)
    num_batch = x_test.shape[0] // batch_size
    adv_imgs = []
    for i in range(num_batch):
        if (i + 1) * batch_size >= x_test.shape[0]:
            adv_imgs.append(
                sess.run(adv_x,
                         feed_dict={
                             x: x_test[i * batch_size:],
                             y: y_test[i * batch_size:]
                         }))
        else:
            adv_imgs.append(
                sess.run(adv_x,
                         feed_dict={
                             x: x_test[i * batch_size:(i + 1) * batch_size],
                             y: y_test[i * batch_size:(i + 1) * batch_size]
                         }))
    adv_imgs = np.concatenate(adv_imgs, axis=0)

    return adv_imgs
Ejemplo n.º 3
0
def main(_):
    # Images for inception classifier are normalized to be in [-1, 1] interval,
    # eps is a difference between pixels so it should be in [0, 2] interval.
    # Renormalizing epsilon from [0, 255] to [0, 2].
    eps = 2.0 * FLAGS.max_epsilon / 255.0
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
    num_classes = 1001

    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default():
        # Prepare graph
        x_input = tf.placeholder(tf.float32, shape=batch_shape)

        model = InceptionModel(num_classes)

        fgsm = BasicIterativeMethod(model)
        x_adv = fgsm.generate(x_input, eps=eps, clip_min=-1., clip_max=1.)

        # Run computation
        saver = tf.train.Saver(slim.get_model_variables())
        session_creator = tf.train.ChiefSessionCreator(
            scaffold=tf.train.Scaffold(saver=saver),
            checkpoint_filename_with_path=FLAGS.checkpoint_path,
            master=FLAGS.master)

        with tf.train.MonitoredSession(
                session_creator=session_creator) as sess:
            for filenames, images in load_images(FLAGS.input_dir, batch_shape):
                adv_images = sess.run(x_adv, feed_dict={x_input: images})
                save_images(adv_images, filenames, FLAGS.output_dir)
Ejemplo n.º 4
0
        def fgsm_combo():
            acc = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_par)
            print('Test accuracy on legitimate examples: %0.4f\n' % acc)

            fgsm = FastGradientMethod(model, sess=sess)
            #initialize_uninitialized_global_variables(sess)
            adv_x = fgsm.generate(x, **fgsm_params)

            preds_adv = model.get_probs(adv_x)
            acc = model_eval(sess,
                             x,
                             y,
                             preds_adv,
                             X_test,
                             Y_test,
                             args=eval_par)

            print(
                'Test accuracy on adversarial examples generated by fgsm: %0.4f\n'
                % acc)
            bim = BasicIterativeMethod(model, sess=sess)
            adv_x = bim.generate(x)
            preds_adv = model.get_probs(adv_x)

            acc = model_eval(sess,
                             x,
                             y,
                             preds_adv,
                             X_test,
                             Y_test,
                             args=eval_par)
            print(
                'Test accuracy on adversarial examples generated by IterativeMethod: %0.4f\n'
                % acc)
Ejemplo n.º 5
0
def main(argv):
    checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)

    if checkpoint is None:
        raise ValueError("Couldn't find latest checkpoint in " +
                         FLAGS.checkpoint_dir)

    train_start = 0
    train_end = 60000
    test_start = 0
    test_end = 10000
    X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
                                                  train_end=train_end,
                                                  test_start=test_start,
                                                  test_end=test_end)

    assert Y_train.shape[1] == 10

    # NOTE: for compatibility with Madry Lab downloadable checkpoints,
    # we cannot enclose this in a scope or do anything else that would
    # change the automatic naming of the variables.
    model = MadryMNIST()

    x_input = tf.placeholder(tf.float32, shape=[None, 784])
    x_image = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
    y = tf.placeholder(tf.float32, shape=[None, 10])

    if FLAGS.attack_type == 'fgsm':
        fgsm = FastGradientMethod(model)
        fgsm_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.}
        adv_x = fgsm.generate(x_image, **fgsm_params)
    elif FLAGS.attack_type == 'bim':
        bim = BasicIterativeMethod(model)
        bim_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.,
                      'nb_iter': 50,
                      'eps_iter': .01}
        adv_x = bim.generate(x_image, **bim_params)
    else:
        raise ValueError(FLAGS.attack_type)
    preds_adv = model.get_probs(adv_x)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        # Restore the checkpoint
        saver.restore(sess, checkpoint)

        # Evaluate the accuracy of the MNIST model on adversarial examples
        eval_par = {'batch_size': FLAGS.batch_size}
        t1 = time.time()
        acc = model_eval(
            sess, x_image, y, preds_adv, X_test, Y_test, args=eval_par)
        t2 = time.time()
        print("Took", t2 - t1, "seconds")
        print('Test accuracy on adversarial examples: %0.4f\n' % acc)
Ejemplo n.º 6
0
def main(argv):
    checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)

    if checkpoint is None:
        raise ValueError("Couldn't load checkpoint")

    train_start = 0
    train_end = 60000
    test_start = 0
    test_end = 10000
    X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
                                                  train_end=train_end,
                                                  test_start=test_start,
                                                  test_end=test_end)

    assert Y_train.shape[1] == 10

    # NOTE: for compatibility with Madry Lab downloadable checkpoints,
    # we cannot enclose this in a scope or do anything else that would
    # change the automatic naming of the variables.
    model = MadryMNIST()

    x_input = tf.placeholder(tf.float32, shape=[None, 784])
    x_image = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
    y = tf.placeholder(tf.float32, shape=[None, 10])

    if FLAGS.attack_type == 'fgsm':
        fgsm = FastGradientMethod(model)
        fgsm_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.}
        adv_x = fgsm.generate(x_image, **fgsm_params)
    elif FLAGS.attack_type == 'bim':
        bim = BasicIterativeMethod(model)
        bim_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.,
                      'nb_iter': 50,
                      'eps_iter': .01}
        adv_x = bim.generate(x_image, **bim_params)
    else:
        raise ValueError(FLAGS.attack_type)
    preds_adv = model.get_probs(adv_x)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        # Restore the checkpoint
        saver.restore(sess, checkpoint)

        # Evaluate the accuracy of the MNIST model on adversarial examples
        eval_par = {'batch_size': FLAGS.batch_size}
        t1 = time.time()
        acc = model_eval(
            sess, x_image, y, preds_adv, X_test, Y_test, args=eval_par)
        t2 = time.time()
        print("Took", t2 - t1, "seconds")
        print('Test accuracy on adversarial examples: %0.4f\n' % acc)
Ejemplo n.º 7
0
    def _BIM(self):
        bim_attack = BasicIterativeMethod(self.wrapped_model, sess=self.sess)
        eps = 0

        if self.dataset == 'MNIST':
            for _ in range(5):
                eps = eps + 0.1
                params = {
                    'eps': eps,
                    'eps_iter': eps / 10,
                    'nb_iter': 10,
                    'y': self.y,
                    'clip_min': 0.,
                    'clip_max': 1.
                }
                adv_x = bim_attack.generate(self.x, **params)
                adv_x = tf.stop_gradient(adv_x)

                print(f'Epsilon: {eps}')
                self.out_file.write(f'Epsilon: {eps}\n')
                self.save_images(adv_x, self.save_loc + f'_e{eps}')

        if self.dataset == 'CIFAR10':
            for _ in range(10):
                eps = eps + 1
                params = {
                    'eps': eps / 255,
                    'eps_iter': eps / 255 / 10,
                    'nb_iter': 10,
                    'y': self.y,
                    'clip_min': 0.,
                    'clip_max': 1.
                }
                adv_x = bim_attack.generate(self.x, **params)
                adv_x = tf.stop_gradient(adv_x)

                print(f'Epsilon: {eps}')
                self.out_file.write(f'Epsilon: {eps}\n')
                self.save_images(adv_x, self.save_loc + f'_e{eps}')
Ejemplo n.º 8
0
def backtracking(sess, x, model, x_test, params, batch_size=128):
    from cleverhans.attacks import BasicIterativeMethod
    method = BasicIterativeMethod(model, sess=sess)

    adv_x = method.generate(x, **params)
    num_batch = x_test.shape[0] // batch_size
    adv_imgs = []
    for i in range(num_batch):
        if i + 1 == num_batch:
            x_feed = x_test[i*batch_size:]
        else:
            x_feed = x_test[i*batch_size:(i+1)*batch_size]
        adv_img = sess.run(adv_x, feed_dict={x: x_feed})
        adv_imgs.append(adv_img)

    adv_imgs = np.concatenate(adv_imgs, axis=0)
    return adv_imgs
Ejemplo n.º 9
0
def evaluate_checkpoint(filename):
    if attack_method == 'BIM':
        bim = BasicIterativeMethod(model)
        bim_params = {
            'eps': 0.3,
            'clip_min': 0.,
            'clip_max': 1.,
            'nb_iter': 50,
            'eps_iter': .01
        }
        adv_x = bim.generate(x_image, **bim_params)
    elif attack_method == 'FGM':
        FGM_attack = FastGradientMethod(model)
        FGM_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.}
        adv_x = FGM_attack.generate(x_image, **FGM_params)
    elif attack_method == 'PGD':
        pgd = ProjectedGradientDescent(model)
        pgd_params = {
            'eps': 0.09,
            'clip_min': 0.,
            'clip_max': 1.,
            'nb_iter': 40,
            'eps_iter': .01
        }
        adv_x = pgd.generate(x_image, **pgd_params)
    preds_adv = model.get_probs(adv_x)

    with tf.Session() as sess:
        # Restore the checkpoint
        saver = tf.train.Saver(var_list=model.all_variables)
        saver.restore(sess, filename)

        eval_par = {'batch_size': batch_size}
        t1 = time.time()
        acc = model_eval(sess,
                         x_image,
                         y,
                         preds_adv,
                         X_test,
                         Y_test,
                         args=eval_par)
        t2 = time.time()
        print("Took", t2 - t1, "seconds")
        print('Test accuracy on adversarial examples: %0.4f\n' % acc)
    def adversarial_training(epsilon=0.3,
                             eps_iter=0.05,
                             nb_iter=10,
                             order=np.inf):
        bim2 = BasicIterativeMethod(wrap_2, sess=sess)
        preds_2_adv = model_2(bim2.generate(x, **fgsm_params))

        def evaluate_2():
            # Accuracy of adversarially trained model on legitimate test inputs
            eval_params = {'batch_size': batch_size}
            accuracy = model_eval(sess,
                                  x,
                                  y,
                                  preds_2,
                                  X_test,
                                  Y_test,
                                  args=eval_params)
            print('Test accuracy on legitimate examples: %0.4f' % accuracy)
            report.adv_train_clean_eval = accuracy

            # Accuracy of the adversarially trained model on adversarial examples
            accuracy = model_eval(sess,
                                  x,
                                  y,
                                  preds_2_adv,
                                  X_test,
                                  Y_test,
                                  args=eval_params)
            print('Test accuracy on adversarial examples: %0.4f' % accuracy)
            report.adv_train_adv_eval = accuracy

        # Perform and evaluate adversarial training
        model_train(sess,
                    x,
                    y,
                    preds_2,
                    X_train,
                    Y_train,
                    predictions_adv=preds_2_adv,
                    evaluate=evaluate_2,
                    args=train_params,
                    save=False,
                    rng=rng)
Ejemplo n.º 11
0
def test():
    """
    """
    tf.reset_default_graph()
    g = tf.get_default_graph()

    with g.as_default():
        # Placeholder nodes.
        images_holder = tf.placeholder(
            tf.float32,
            [None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS])
        label_holder = tf.placeholder(tf.float32, [None, FLAGS.NUM_CLASSES])
        is_training = tf.placeholder(tf.bool, ())

        # model
        model = model_cifar100.RDPCNN(images_holder, label_holder,
                                      FLAGS.INPUT_SIGMA,
                                      is_training)  # for adv examples

        model_loss = model.loss()
        model_acc = model.cnn_accuracy

        # robust
        def inference(x):
            logits, _ = model.cnn.prediction(x)
            return logits

        def inference_prob(x):
            _, probs = model.cnn.prediction(x)
            return probs

        graph_dict = {}
        graph_dict["images_holder"] = images_holder
        graph_dict["label_holder"] = label_holder
        graph_dict["is_training"] = is_training

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config, graph=g) as sess:
        sess.run(tf.global_variables_initializer())
        # load model
        model.tf_load(sess, name=FLAGS.CNN_CKPT_RESTORE_NAME)

        # adv test
        ####################################################################################################
        x_advs = {}
        ch_model_logits = CallableModelWrapper(callable_fn=inference,
                                               output_layer='logits')
        ch_model_probs = CallableModelWrapper(callable_fn=inference_prob,
                                              output_layer='probs')
        # FastGradientMethod
        fgsm_obj = FastGradientMethod(model=ch_model_probs, sess=sess)
        x_advs["fgsm"] = fgsm_obj.generate(x=images_holder,
                                           eps=FLAGS.ATTACK_SIZE,
                                           clip_min=0.0,
                                           clip_max=1.0)  # testing now

        # Iterative FGSM (BasicIterativeMethod/ProjectedGradientMethod with no random init)
        # default: eps_iter=0.05, nb_iter=10
        ifgsm_obj = BasicIterativeMethod(model=ch_model_probs, sess=sess)
        x_advs["ifgsm"] = ifgsm_obj.generate(x=images_holder,
                                             eps=FLAGS.ATTACK_SIZE,
                                             eps_iter=FLAGS.ATTACK_SIZE / 10,
                                             nb_iter=10,
                                             clip_min=0.0,
                                             clip_max=1.0)

        # MomentumIterativeMethod
        # default: eps_iter=0.06, nb_iter=10
        mim_obj = MomentumIterativeMethod(model=ch_model_probs, sess=sess)
        x_advs["mim"] = mim_obj.generate(x=images_holder,
                                         eps=FLAGS.ATTACK_SIZE,
                                         eps_iter=FLAGS.ATTACK_SIZE / 10,
                                         nb_iter=10,
                                         decay_factor=1.0,
                                         clip_min=0.0,
                                         clip_max=1.0)

        # MadryEtAl (Projected Grdient with random init, same as rand+fgsm)
        # default: eps_iter=0.01, nb_iter=40
        madry_obj = MadryEtAl(model=ch_model_probs, sess=sess)
        x_advs["madry"] = madry_obj.generate(x=images_holder,
                                             eps=FLAGS.ATTACK_SIZE,
                                             eps_iter=FLAGS.ATTACK_SIZE / 10,
                                             nb_iter=10,
                                             clip_min=0.0,
                                             clip_max=1.0)
        graph_dict["x_advs"] = x_advs
        ####################################################################################################

        # tensorboard writer
        #test_writer = model_utils.init_writer(FLAGS.TEST_LOG_PATH, g)
        print("\nTest")
        if FLAGS.local:
            total_test_batch = 2
        else:
            total_test_batch = None
        dp_info = np.load(FLAGS.DP_INFO_NPY, allow_pickle=True).item()
        test_info(sess,
                  model,
                  True,
                  graph_dict,
                  dp_info,
                  FLAGS.TEST_LOG_FILENAME,
                  total_batch=total_test_batch)
        robust_info(sess, model, graph_dict, FLAGS.ROBUST_LOG_FILENAME)
def b_dcgan(dataset, args):

    z_dim = args.z_dim
    x_dim = dataset.x_dim
    batch_size = args.batch_size
    dataset_size = dataset.dataset_size

    session = get_session()

    test_x = tf.placeholder(tf.float32, shape=(batch_size, 28, 28, 1))
    x = tf.placeholder(tf.float32, shape=(batch_size, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(batch_size, 10))

    unlabeled_batch_ph = tf.placeholder(tf.float32,
                                        shape=(batch_size, 28, 28, 1))
    labeled_image_ph = tf.placeholder(tf.float32,
                                      shape=(batch_size, 28, 28, 1))
    if args.random_seed is not None:
        tf.set_random_seed(args.random_seed)
    # due to how much the TF code sucks all functions take fixed batch_size at all times
    dcgan = BDCGAN(
        x_dim,
        z_dim,
        dataset_size,
        batch_size=batch_size,
        J=args.J,
        M=args.M,
        lr=args.lr,
        optimizer=args.optimizer,
        gen_observed=args.gen_observed,
        adv_train=args.adv_train,
        num_classes=dataset.num_classes if args.semi_supervised else 1)
    if args.adv_test and args.semi_supervised:
        if args.basic_iterative:
            fgsm = BasicIterativeMethod(dcgan, sess=session)
            dcgan.adv_constructor = fgsm
            fgsm_params = {
                'eps': args.eps,
                'eps_iter': float(args.eps / 4),
                'nb_iter': 4,
                'ord': np.inf,
                'clip_min': 0.,
                'clip_max': 1.
            }
            #,'y_target': None}
        else:
            fgsm = FastGradientMethod(dcgan, sess=session)
            dcgan.adv_constructor = fgsm
            eval_params = {'batch_size': batch_size}
            fgsm_params = {'eps': args.eps, 'clip_min': 0., 'clip_max': 1.}
        adv_x = fgsm.generate(x, **fgsm_params)
        adv_test_x = fgsm.generate(test_x, **fgsm_params)
        preds = dcgan.get_probs(adv_x)
    if args.adv_train:
        unlabeled_targets = np.zeros([batch_size, dcgan.K + 1])
        unlabeled_targets[:, 0] = 1
        fgsm_targeted_params = {
            'eps': args.eps,
            'clip_min': 0.,
            'clip_max': 1.,
            'y_target': unlabeled_targets
        }

    saver = tf.train.Saver()

    print("Starting session")
    session.run(tf.global_variables_initializer())

    prev_iters = 0
    if args.load_chkpt:
        saver.restore(session, args.chkpt)
        # Assume checkpoint is of the form "model_300"
        prev_iters = int(args.chkpt.split('/')[-1].split('_')[1])
        print("Model restored from iteration:", prev_iters)

    print("Starting training loop")
    num_train_iter = args.train_iter

    if hasattr(dataset, "supervised_batches"):
        # implement own data feeder if data doesnt fit in memory
        supervised_batches = dataset.supervised_batches(args.N, batch_size)
    else:
        supervised_batches = get_supervised_batches(
            dataset, args.N, batch_size, list(range(dataset.num_classes)))

    if args.semi_supervised:
        test_image_batches, test_label_batches = get_test_batches(
            dataset, batch_size)

        optimizer_dict = {
            "semi_d": dcgan.d_optim_semi_adam,
            "sup_d": dcgan.s_optim_adam,
            "adv_d": dcgan.d_optim_adam,
            "gen": dcgan.g_optims_adam
        }
    else:
        optimizer_dict = {
            "adv_d": dcgan.d_optim_adam,
            "gen": dcgan.g_optims_adam
        }

    base_learning_rate = args.lr  # for now we use same learning rate for Ds and Gs
    lr_decay_rate = args.lr_decay

    for train_iter in range(1 + prev_iters, 1 + num_train_iter):

        if train_iter == 5000:
            print("Switching to user-specified optimizer")
            if args.semi_supervised:
                optimizer_dict = {
                    "semi_d": dcgan.d_optim_semi,
                    "sup_d": dcgan.s_optim,
                    "adv_d": dcgan.d_optim,
                    "gen": dcgan.g_optims
                }
            else:
                optimizer_dict = {
                    "adv_d": dcgan.d_optim,
                    "gen": dcgan.g_optims
                }

        learning_rate = base_learning_rate * np.exp(-lr_decay_rate * min(
            1.0, (train_iter * batch_size) / float(dataset_size)))

        batch_z = np.random.uniform(-1, 1, [batch_size, z_dim])
        image_batch, batch_label = dataset.next_batch(batch_size,
                                                      class_id=None)
        batch_targets = np.zeros([batch_size, 11])
        batch_targets[:, 0] = 1

        if args.semi_supervised:
            labeled_image_batch, labels = next(supervised_batches)
            if args.adv_train:
                adv_labeled = session.run(
                    fgsm.generate(labeled_image_ph, **fgsm_targeted_params),
                    feed_dict={labeled_image_ph: labeled_image_batch})
                adv_unlabeled = session.run(
                    fgsm.generate(unlabeled_batch_ph, **fgsm_params),
                    feed_dict={unlabeled_batch_ph: image_batch})
                _, d_loss = session.run(
                    [optimizer_dict["semi_d"], dcgan.d_loss_semi],
                    feed_dict={
                        dcgan.labeled_inputs: labeled_image_batch,
                        dcgan.labels: get_gan_labels(labels),
                        dcgan.inputs: image_batch,
                        dcgan.z: batch_z,
                        dcgan.d_semi_learning_rate: learning_rate,
                        dcgan.adv_unlab: adv_unlabeled,
                        dcgan.adv_labeled: adv_labeled
                    })
            else:
                _, d_loss = session.run(
                    [optimizer_dict["semi_d"], dcgan.d_loss_semi],
                    feed_dict={
                        dcgan.labeled_inputs: labeled_image_batch,
                        dcgan.labels: get_gan_labels(labels),
                        dcgan.inputs: image_batch,
                        dcgan.z: batch_z,
                        dcgan.d_semi_learning_rate: learning_rate
                    })

            _, s_loss = session.run([optimizer_dict["sup_d"], dcgan.s_loss],
                                    feed_dict={
                                        dcgan.inputs: labeled_image_batch,
                                        dcgan.lbls: labels
                                    })

        else:
            # regular GAN
            _, d_loss = session.run(
                [optimizer_dict["adv_d"], dcgan.d_loss],
                feed_dict={
                    dcgan.inputs: image_batch,
                    dcgan.z: batch_z,
                    dcgan.d_learning_rate: learning_rate
                })

        if args.wasserstein:
            session.run(dcgan.clip_d, feed_dict={})

        g_losses = []
        for gi in range(dcgan.num_gen):

            # compute g_sample loss
            batch_z = np.random.uniform(-1, 1, [batch_size, z_dim])
            for m in range(dcgan.num_mcmc):
                _, g_loss = session.run([
                    optimizer_dict["gen"][gi * dcgan.num_mcmc + m],
                    dcgan.generation["g_losses"][gi * dcgan.num_mcmc + m]
                ],
                                        feed_dict={
                                            dcgan.z: batch_z,
                                            dcgan.g_learning_rate:
                                            learning_rate
                                        })
                g_losses.append(g_loss)

        # if args.adv_test:
        #     probs, logits = dcgan.discriminator(adv_x,dcgan.K+1,reuse = True)

        #     labels = tf.placeholder(tf.float32,
        #                              [args.batch_size, dcgan.K+1], name='real_targets')
        #     compare_labels = tf.convert_to_tensor(np.array([np.append(0,i) for i in batch_label]))

        #     print(session.run(model_loss(compare_labels,probs), feed_dict = {x:image_batch}))
        # if args.adv_test:
        #     #preds = dcgan.get_probs(adv_x)
        #     #eval_preds = session.run(preds, feed_dict = {x:image_batch})
        #     #print(eval_preds[0])
        #     #adv_exs = session.run(adv_test_x, feed_dict = {x:test_image_batches})
        #     # adv_acc = model_eval(
        #     #     session, x, y, preds, image_batch, batch_label, args=eval_params)
        #     # #print(session.run(model_loss(compare_labels,probs), feed_dict = {x:image_batch}))
        #     # print("Adversarial loss = %2.f" % (1-adv_acc))
        #     print(get_test_accuracy(session,dcgan,adv_set,test_label_batches))

        if train_iter > 0 and train_iter % args.n_save == 0:
            print("Iter %i" % train_iter)
            # collect samples
            if args.save_samples:  # saving samples
                all_sampled_imgs = []
                for gi in range(dcgan.num_gen):
                    _imgs, _ps = [], []
                    for _ in range(10):
                        sample_z = np.random.uniform(-1,
                                                     1,
                                                     size=(batch_size, z_dim))
                        sampled_imgs, sampled_probs = session.run([
                            dcgan.generation["gen_samplers"][gi *
                                                             dcgan.num_mcmc],
                            dcgan.generation["d_probs"][gi * dcgan.num_mcmc]
                        ],
                                                                  feed_dict={
                                                                      dcgan.z:
                                                                      sample_z
                                                                  })
                        _imgs.append(sampled_imgs)
                        _ps.append(sampled_probs)

                    sampled_imgs = np.concatenate(_imgs)
                    sampled_probs = np.concatenate(_ps)
                    all_sampled_imgs.append(
                        [sampled_imgs, sampled_probs[:, 1:].sum(1)])

            print("Disc loss = %.2f, Gen loss = %s" %
                  (d_loss, ", ".join(["%.2f" % gl for gl in g_losses])))

            #if args.adv_test:
            #preds = dcgan.get_probs(adv_x)
            #eval_preds = session.run(preds, feed_dict = {x:image_batch})
            #print(eval_preds[0])
            #adv_exs = session.run(adv_test_x, feed_dict = {x:test_image_batches})
            # adv_acc = model_eval(
            #     session, x, y, preds, image_batch, batch_label, args=eval_params)
            # #print(session.run(model_loss(compare_labels,probs), feed_dict = {x:image_batch}))
            # print("Adversarial loss = %2.f" % (1-adv_acc))
            #print(get_test_accuracy(session,dcgan,adv_set,test_label_batches))

            # adv_x = fgsm.generate(x,**fgsm_params)
            # preds = dcgan.get_probs(adv_x)
            # acc = model_eval(
            #     session, x, y, preds, image_batch, batch_label, args=eval_params)
            # print("Adversarial loss = %2.f" % (1-acc))

            if args.semi_supervised:
                # get test set performance on real labels only for both GAN-based classifier and standard one

                s_acc, ss_acc, non_adv_acc, ex_prob = get_test_accuracy(
                    session, dcgan, test_image_batches, test_label_batches)
                if args.adv_test:
                    adv_set = []
                    for test_images in test_image_batches:
                        adv_set.append(
                            session.run(adv_x, feed_dict={x: test_images}))
                    adv_sup_acc, adv_ss_acc, correct_uncertainty, incorrect_uncertainty, adv_acc, adv_ex_prob = get_adv_test_accuracy(
                        session, dcgan, adv_set, test_label_batches)
                    print("Adversarial semi-sup accuracy with filter: %.2f" %
                          adv_sup_acc)
                    print("Adverarial semi-sup accuracy: %.2f" % adv_ss_acc)
                    print("Uncertainty for correct predictions: %.2f" %
                          correct_uncertainty)
                    print("Uncertainty for incorrect predictions: %.2f" %
                          incorrect_uncertainty)
                    print("non_adversarial_classification_accuracy: %.2f" %
                          non_adv_acc)
                    print("adversarial_classification_accuracy: %.2f" %
                          adv_acc)

                    if args.save_samples:
                        print("saving adversarial test images and test images")
                        i = 0

                        for x, y in zip(adv_set[-1], test_image_batches[-1]):
                            np.save(
                                args.out_dir + '/adv_test' + str(train_iter) +
                                '_' + str(i), x)
                            np.save(
                                args.out_dir + '/test' + str(train_iter) +
                                '_' + str(i), y)
                            i = i + 1
                            if i == 5:  #save 5 adversarial images
                                break

                print("Supervised acc: %.2f" % (s_acc))
                print("Semi-sup acc: %.2f" % (ss_acc))

            print("saving results and samples")

            results = {
                "disc_loss": float(d_loss),
                "gen_losses": list(map(float, g_losses))
            }
            if args.semi_supervised:
                #results["example_non_adversarial_probs"] = list(ex_prob.flatten())
                #results["example_adversarial_probs"] = list(adv_ex_prob.flatten())
                results["non_adversarial_classification_accuracy"] = float(
                    non_adv_acc)
                results["adversarial_classification_accuracy"] = float(adv_acc)
                results["adversarial_uncertainty_correct"] = float(
                    correct_uncertainty)
                results["adversarial_uncertainty_incorrect"] = float(
                    incorrect_uncertainty)
                results["supervised_acc"] = float(s_acc)
                results['adversarial_filtered_semi_supervised_acc'] = float(
                    adv_sup_acc)
                results["adversarial_unfilted_semi_supervised_acc"] = float(
                    adv_ss_acc)
                results["semi_supervised_acc"] = float(ss_acc)
                results["timestamp"] = time.time()
                results["previous_chkpt"] = args.chkpt

            with open(
                    os.path.join(args.out_dir, 'results_%i.json' % train_iter),
                    'w') as fp:
                json.dump(results, fp)

            if args.save_samples:
                for gi in range(dcgan.num_gen):
                    print_images(all_sampled_imgs[gi],
                                 "B_DCGAN_%i_%.2f" %
                                 (gi, g_losses[gi * dcgan.num_mcmc]),
                                 train_iter,
                                 directory=args.out_dir)

                print_images(image_batch,
                             "RAW",
                             train_iter,
                             directory=args.out_dir)

            if args.save_weights:
                var_dict = {}
                for var in tf.trainable_variables():
                    var_dict[var.name] = session.run(var.name)

                np.savez_compressed(
                    os.path.join(args.out_dir, "weights_%i.npz" % train_iter),
                    **var_dict)

            print("Done saving weights")

        if train_iter > 0 and train_iter % args.save_chkpt == 0:
            save_path = saver.save(
                session, os.path.join(args.out_dir, "model_%i" % train_iter))
            print("Model checkpointed in file: %s" % save_path)

    session.close()
Ejemplo n.º 13
0
def main(_):
  # Images for inception classifier are normalized to be in [-1, 1] interval,
  # eps is a difference between pixels so it should be in [0, 2] interval.
  # Renormalizing epsilon from [0, 255] to [0, 2].
  print('this line #1')
  eps = 2.0 * FLAGS.max_epsilon / 255.0
  slim_model = False        # For AlexNet and SqueezeNet: False, For Inception and MobileNet: True
  batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
  num_classes = 1001

  tf.logging.set_verbosity(tf.logging.INFO)
  print('this line #2')
  with tf.Graph().as_default():
    # Prepare graph
    x_input = tf.placeholder(tf.float32, shape=batch_shape)

    # model = InceptionModel(num_classes)
    # model = MobileNetModel(num_classes)
    model = KaffeModel(num_classes, 'AlexNet') # Use AlexNet model for DeepCompression, INQ, and Proposed
    # model = KaffeModel(num_classes, 'Squeezenet')
    print('this line #3')

    # # FGSM 
    # fgsm = FastGradientMethod(model)
    # x_adv = fgsm.generate(x_input, eps=eps, clip_min=-1., clip_max=1.)

    # # BIM 
    bim = BasicIterativeMethod(model)
    x_adv = bim.generate(x_input, eps=eps, clip_min=-1., clip_max=1.)

    # # VAT
    # vat = VirtualAdversarialMethod(model)
    # x_adv = vat.generate(x_input, eps=eps, clip_min=-1., clip_max=1.)

    # # PGD 
    # pgd = MadryEtAl(model)
    # pgd_params = {'clip_min':-1., 'clip_max':1.}
    # x_adv = pgd.generate(x_input, clip_min=-1., clip_max=1.)#, **pgd_params)
    
    # # JSMA
    # sess = tf.Session()
    # jsma = SaliencyMapMethod(model)
    # # target = np.zeros((1,1000),dtype=np.float32)
    # # target[0,50] = 1                    #here, we suppose that the target label is 50
    # # jsma_params = {'theta': 1., 'gamma': 0.1, 'clip_min': 0., 'clip_max': 1., 'y_target': target}
    # jsma_params = {'theta': 1., 'gamma': 0.1, 'clip_min': 0., 'clip_max': 1.}
    # x_adv = jsma.generate(x_input,**jsma_params)
    # sess = tf.Session()
    # sess.run(tf.global_variables_initializer())

    # jsma = SaliencyMapMethod(model, back='tf', sess=sess)
    # jsma_params = {'theta': 1., 'gamma': 0.1, 'clip_min': 0., 'clip_max': 1.}
    # x_adv = jsma.generate(x_input,**jsma_params)


    # # DeepFool
    # deepfool = DeepFool(model, back='tf', sess=sess)
    # # # deepfool_params =  {'over_shoot', 'max_iter':1000, 'clip_max':1., 'clip_min':0., 'nb_candidate}
    # deepfool_params =  {'max_iter':1000, 'clip_max':1., 'clip_min':0.}
    # x_adv = deepfool.generate(x_input,**deepfool_params)

    # # sess = tf.Session()
    # # with tf.Session() as sess:
    # # # # # CarliniWagner L2
    # # # # sess = tf.train.MonitoredSession()
    # # # sess = tf.Session()
    # cwl2 = CarliniWagnerL2(model, back='tf', sess=sess)
    # # cwl2 = CarliniWagnerL2(model, back='tf')
    # # # cwl2_params = {'batch_size':9, 'confidence':0, 'max_iterations':1000, 'clip_min':0., 'clip_max':1.}
    # cwl2_params = {'clip_min':-1.0, 'clip_max':1.0}
    # # cwl2_params = {'batch_size':9, 'confidence':0,'learning_rate':1e-2,'binary_search_steps':9, 'max_iterations':1000,'abort_early':True, 'initial_const': 1e-3,'clip_min': 0.0, 'clip_max': 1.0}
    # x_adv = cwl2.generate(x_input,**cwl2_params)
    # with tf.Session() as sess:
    #   cwl2 = CarliniWagnerL2(sess, model, batch_size=1, confidence=0, targeted=True, learning_rate=5e-3, binary_search_steps=5, max_iterations=1000, abort_early=True, initial_const=1e-2, clip_min=0, clip_max=1, num_labels=3, shape=x_input.get_shape().as_list()[1:])
    #   # x_adv = cwl2.
    #   def cw_wrap(x_val, y_val):
    #       return np.array(cwl2.attack(x_val, y_val), dtype=np.float32)
    #   x_adv = tf.py_func(cw_wrap, [x, labels], tf.float32)
    # (self, sess, model, batch_size, confidence,
    #              targeted, learning_rate,
    #              binary_search_steps, max_iterations,
    #              abort_early, initial_const,
    #              clip_min, clip_max, num_labels, shape)

        # attack = CWL2(self.sess, self.model, self.batch_size,
        #               self.confidence, 'y_target' in kwargs,
        #               self.learning_rate, self.binary_search_steps,
        #               self.max_iterations, self.abort_early,
        #               self.initial_const, self.clip_min, self.clip_max,
        #               nb_classes, x_input.get_shape().as_list()[1:])
    # sess = tf.Session()
    # cw = CarliniWagnerL2(model, back='tf', sess=sess)
    # cw_params = {'binary_search_steps': 1, 'y': None, 'max_iterations': 1000, 'learning_rate': 5e-3, 'batch_size': 1, 'initial_const': 1e-2}
    # # x_adv = cw.generate_np(x_input,**cw_params)
    # x_adv = cw.generate(x_input,**cw_params)

    # (self, model, back='tf', sess=None)
    
    print('this line #4')

    # Run computation
    if slim_model:
        saver = tf.train.Saver(slim.get_model_variables())
        session_creator = tf.train.ChiefSessionCreator(
            scaffold=tf.train.Scaffold(saver=saver),
            checkpoint_filename_with_path=FLAGS.checkpoint_path,
            master=FLAGS.master)

        with tf.train.MonitoredSession(session_creator=session_creator) as sess:
          for filenames, images in load_images(FLAGS.input_dir, batch_shape):
            adv_images = sess.run(x_adv, feed_dict={x_input: images})
            save_images(adv_images, filenames, FLAGS.output_dir)

    else:
        with tf.Session() as sess:
          model.load_model(model_path=FLAGS.checkpoint_path, session=sess)
          for filenames, images in load_images(FLAGS.input_dir, batch_shape):
            adv_images = sess.run(x_adv, feed_dict={x_input: images})
            save_images(adv_images, filenames, FLAGS.output_dir)
Ejemplo n.º 14
0
                output_dir = './fgsm_images_{}'.format(epsilon)
                os.mkdir(output_dir)

            except:
                pass

        if IFGSM:
            ifgsm = BasicIterativeMethod(model)
            ifgsm_params = {'eps': epsilon,
                            'eps_iter': epsilon / 5,
                            'nb_iter': 5,
                            'clip_min': -1.,
                            'clip_max': 1.,
                            }

            x_adv = ifgsm.generate(x_input, **ifgsm_params)
            try:
                output_dir = './ifgsm_images_{}'.format(epsilon)
                os.mkdir(output_dir)
            except:
                pass
        saver = tf.train.Saver(tf.contrib.slim.get_model_variables())
        session_creator = tf.train.ChiefSessionCreator(
            scaffold=tf.train.Scaffold(saver=saver),
            checkpoint_filename_with_path=checkpoint_path,
            master="")
        print('Attacking with epsilon {}'.format(epsilon))
        with tf.train.MonitoredSession(session_creator=session_creator) as sess:
            for filenames, images in load_imagefiles('./images', batch_shape):
                nontargeted_images = sess.run(x_adv, feed_dict={x_input: images})
                for filename, nontargeted_image in zip(filenames, nontargeted_images):
Ejemplo n.º 15
0
def train(cifar10_data, logfile):
    """Train CIFAR-10 for a number of steps."""
    logfile.write("fgsm_eps \t %g, epsilon \t %d \n" %
                  (fgsm_eps, target_eps[0]))
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)

        # Parameters Declarification
        #with tf.variable_scope('conv1') as scope:
        kernel1 = _variable_with_weight_decay(
            'kernel1',
            shape=[3, 3, 3, 128],
            stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
            wd=0.0)
        biases1 = cifar10._variable_on_cpu('biases1', [128],
                                           tf.constant_initializer(0.0))
        #with tf.variable_scope('conv2') as scope:
        kernel2 = _variable_with_weight_decay(
            'kernel2',
            shape=[5, 5, 128, 128],
            stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
            wd=0.0)
        biases2 = cifar10._variable_on_cpu('biases2', [128],
                                           tf.constant_initializer(0.1))
        #with tf.variable_scope('conv3') as scope:
        kernel3 = _variable_with_weight_decay(
            'kernel3',
            shape=[5, 5, 256, 256],
            stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
            wd=0.0)
        biases3 = cifar10._variable_on_cpu('biases3', [256],
                                           tf.constant_initializer(0.1))
        #with tf.variable_scope('local4') as scope:
        kernel4 = cifar10._variable_with_weight_decay(
            'kernel4',
            shape=[int(image_size / 4)**2 * 256, hk],
            stddev=0.04,
            wd=0.004)
        biases4 = cifar10._variable_on_cpu('biases4', [hk],
                                           tf.constant_initializer(0.1))
        #with tf.variable_scope('local5') as scope:
        kernel5 = cifar10._variable_with_weight_decay(
            'kernel5', [hk, 10],
            stddev=np.sqrt(2.0 /
                           (int(image_size / 4)**2 * 256)) / math.ceil(5 / 2),
            wd=0.0)
        biases5 = cifar10._variable_on_cpu('biases5', [10],
                                           tf.constant_initializer(0.1))

        scale2 = tf.Variable(tf.ones([hk]))
        beta2 = tf.Variable(tf.zeros([hk]))

        params = [
            kernel1, biases1, kernel2, biases2, kernel3, biases3, kernel4,
            biases4, kernel5, biases5, scale2, beta2
        ]
        ########

        # Build a Graph that computes the logits predictions from the
        # inference model.
        shape = kernel1.get_shape().as_list()
        w_t = tf.reshape(kernel1, [-1, shape[-1]])
        w = tf.transpose(w_t)
        sing_vals = tf.svd(w, compute_uv=False)
        sensitivityW = tf.reduce_max(sing_vals)
        dp_delta = 0.05
        #dp_mult = attack_norm_bound * math.sqrt(2 * math.log(1.25 / dp_delta)) / dp_epsilon
        noise = tf.placeholder(tf.float32, [None, 28, 28, 32])

        dp_mult = attack_norm_bound * math.sqrt(
            2 * math.log(1.25 / dp_delta)) / dp_epsilon
        noise = tf.placeholder(tf.float32, [None, 14, 14, 128])
        sigma = tf.placeholder(tf.float32)
        x = tf.placeholder(tf.float32, [None, image_size, image_size, 3])
        #y_conv, h_conv1 = inference(x, params, dp_mult**2 * noise);
        y_conv, h_conv1 = inference(x, params, attack_norm_bound * noise)
        softmax_y_conv = tf.nn.softmax(y_conv)
        y_ = tf.placeholder(tf.float32, [None, 10])

        #logits = inference(images)

        # Calculate loss. Apply Taylor Expansion for the output layer
        loss = cifar10.lossDPSGD(y_conv, y_)

        # noise redistribution #
        grad, = tf.gradients(loss, h_conv1)
        normalized_grad = tf.sign(grad)
        normalized_grad = tf.stop_gradient(normalized_grad)
        normalized_grad_r = tf.abs(tf.reduce_mean(normalized_grad,
                                                  axis=(0)))**2
        sum_r = tf.reduce_sum(normalized_grad_r,
                              axis=(0, 1, 2),
                              keepdims=False)
        normalized_grad_r = 14 * 14 * 128 * normalized_grad_r / sum_r
        print(normalized_grad_r)

        shape_grad = normalized_grad_r.get_shape().as_list()
        grad_t = tf.reshape(normalized_grad_r, [-1, shape_grad[-1]])
        g = tf.transpose(grad_t)
        sing_g_vals = tf.svd(g, compute_uv=False)
        sensitivity_2 = tf.reduce_max(sing_g_vals)
        ########################

        opt = tf.train.GradientDescentOptimizer(lr)

        gw_K1 = tf.gradients(loss, kernel1)[0]
        gb1 = tf.gradients(loss, biases1)[0]

        gw_K2 = tf.gradients(loss, kernel2)[0]
        gb2 = tf.gradients(loss, biases2)[0]

        gw_K3 = tf.gradients(loss, kernel3)[0]
        gb3 = tf.gradients(loss, biases3)[0]

        gw_K4 = tf.gradients(loss, kernel4)[0]
        gb4 = tf.gradients(loss, biases4)[0]

        gw_K5 = tf.gradients(loss, kernel5)[0]
        gb5 = tf.gradients(loss, biases5)[0]

        #clip gradient
        gw_K1 = tf.clip_by_norm(gw_K1, clip_bound)
        gw_K2 = tf.clip_by_norm(gw_K2, clip_bound)
        gw_K3 = tf.clip_by_norm(gw_K3, clip_bound)
        gw_K4 = tf.clip_by_norm(gw_K4, clip_bound)
        gw_K5 = tf.clip_by_norm(gw_K5, clip_bound)

        #perturb
        gw_K1 += tf.random_normal(shape=tf.shape(gw_K1),
                                  mean=0.0,
                                  stddev=(sigma * sensitivity),
                                  dtype=tf.float32) / batch_size
        gw_K2 += tf.random_normal(shape=tf.shape(gw_K2),
                                  mean=0.0,
                                  stddev=(sigma * sensitivity),
                                  dtype=tf.float32) / batch_size
        gw_K3 += tf.random_normal(shape=tf.shape(gw_K3),
                                  mean=0.0,
                                  stddev=(sigma * sensitivity),
                                  dtype=tf.float32) / batch_size
        gw_K4 += tf.random_normal(shape=tf.shape(gw_K4),
                                  mean=0.0,
                                  stddev=(sigma * sensitivity),
                                  dtype=tf.float32) / batch_size
        gw_K5 += tf.random_normal(shape=tf.shape(gw_K5),
                                  mean=0.0,
                                  stddev=(sigma * sensitivity),
                                  dtype=tf.float32) / batch_size
        gb1 += tf.random_normal(shape=tf.shape(gb1),
                                mean=0.0,
                                stddev=(sigma * sensitivity),
                                dtype=tf.float32) / batch_size
        gb2 += tf.random_normal(shape=tf.shape(gb2),
                                mean=0.0,
                                stddev=(sigma * sensitivity),
                                dtype=tf.float32) / batch_size
        gb3 += tf.random_normal(shape=tf.shape(gb3),
                                mean=0.0,
                                stddev=(sigma * sensitivity),
                                dtype=tf.float32) / batch_size
        gb4 += tf.random_normal(shape=tf.shape(gb4),
                                mean=0.0,
                                stddev=(sigma * sensitivity),
                                dtype=tf.float32) / batch_size
        gb5 += tf.random_normal(shape=tf.shape(gb5),
                                mean=0.0,
                                stddev=(sigma * sensitivity),
                                dtype=tf.float32) / batch_size

        # apply gradients and keep tracking moving average of the parameters
        apply_gradient_op = opt.apply_gradients([(gw_K1, kernel1),
                                                 (gb1, biases1),
                                                 (gw_K2, kernel2),
                                                 (gb2, biases2),
                                                 (gw_K3, kernel3),
                                                 (gb3, biases3),
                                                 (gw_K4, kernel4),
                                                 (gb4, biases4),
                                                 (gw_K5, kernel5),
                                                 (gb5, biases5)],
                                                global_step=global_step)
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY, global_step)
        variables_averages_op = variable_averages.apply(
            tf.trainable_variables())
        with tf.control_dependencies(
            [apply_gradient_op, variables_averages_op]):
            train_op = tf.no_op(name='train')

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        #train_op = cifar10.trainDPSGD(loss, global_step, clip_bound, sigma, sensitivity)

        sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))

        attack_switch = {
            'fgsm': True,
            'ifgsm': True,
            'deepfool': False,
            'mim': True,
            'spsa': False,
            'cwl2': False,
            'madry': True,
            'stm': False
        }

        ch_model_probs = CustomCallableModelWrapper(
            callable_fn=inference_test_input_probs,
            output_layer='probs',
            params=params,
            image_size=image_size)

        # define each attack method's tensor
        attack_tensor_dict = {}
        # FastGradientMethod
        if attack_switch['fgsm']:
            print('creating attack tensor of FastGradientMethod')
            fgsm_obj = FastGradientMethod(model=ch_model_probs, sess=sess)
            #x_adv_test_fgsm = fgsm_obj.generate(x=x, eps=fgsm_eps, clip_min=-1.0, clip_max=1.0, ord=2) # testing now
            x_adv_test_fgsm = fgsm_obj.generate(x=x,
                                                eps=fgsm_eps,
                                                clip_min=-1.0,
                                                clip_max=1.0)  # testing now
            attack_tensor_dict['fgsm'] = x_adv_test_fgsm

        # Iterative FGSM (BasicIterativeMethod/ProjectedGradientMethod with no random init)
        # default: eps_iter=0.05, nb_iter=10
        if attack_switch['ifgsm']:
            print('creating attack tensor of BasicIterativeMethod')
            ifgsm_obj = BasicIterativeMethod(model=ch_model_probs, sess=sess)
            #x_adv_test_ifgsm = ifgsm_obj.generate(x=x, eps=fgsm_eps, eps_iter=fgsm_eps/10, nb_iter=10, clip_min=-1.0, clip_max=1.0, ord=2)
            x_adv_test_ifgsm = ifgsm_obj.generate(x=x,
                                                  eps=fgsm_eps,
                                                  eps_iter=fgsm_eps / 3,
                                                  nb_iter=3,
                                                  clip_min=-1.0,
                                                  clip_max=1.0)
            attack_tensor_dict['ifgsm'] = x_adv_test_ifgsm

        # MomentumIterativeMethod
        # default: eps_iter=0.06, nb_iter=10
        if attack_switch['mim']:
            print('creating attack tensor of MomentumIterativeMethod')
            mim_obj = MomentumIterativeMethod(model=ch_model_probs, sess=sess)
            #x_adv_test_mim = mim_obj.generate(x=x, eps=fgsm_eps, eps_iter=fgsm_eps/10, nb_iter=10, decay_factor=1.0, clip_min=-1.0, clip_max=1.0, ord=2)
            x_adv_test_mim = mim_obj.generate(x=x,
                                              eps=fgsm_eps,
                                              eps_iter=fgsm_eps / 3,
                                              nb_iter=3,
                                              decay_factor=1.0,
                                              clip_min=-1.0,
                                              clip_max=1.0)
            attack_tensor_dict['mim'] = x_adv_test_mim

        # MadryEtAl (Projected Grdient with random init, same as rand+fgsm)
        # default: eps_iter=0.01, nb_iter=40
        if attack_switch['madry']:
            print('creating attack tensor of MadryEtAl')
            madry_obj = MadryEtAl(model=ch_model_probs, sess=sess)
            #x_adv_test_madry = madry_obj.generate(x=x, eps=fgsm_eps, eps_iter=fgsm_eps/10, nb_iter=10, clip_min=-1.0, clip_max=1.0, ord=2)
            x_adv_test_madry = madry_obj.generate(x=x,
                                                  eps=fgsm_eps,
                                                  eps_iter=fgsm_eps / 3,
                                                  nb_iter=3,
                                                  clip_min=-1.0,
                                                  clip_max=1.0)
            attack_tensor_dict['madry'] = x_adv_test_madry
        #====================== attack =========================

        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # Create a saver.
        saver = tf.train.Saver(tf.all_variables())

        # Privacy accountant
        priv_accountant = accountant.GaussianMomentsAccountant(D)
        privacy_accum_op = priv_accountant.accumulate_privacy_spending(
            [None, None], sigma, batch_size)

        # Build the summary operation based on the TF collection of Summaries.
        #summary_op = tf.summary.merge_all()

        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()

        # Start running operations on the Graph.
        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(os.getcwd() + path, sess.graph)

        # load the most recent models
        _global_step = 0
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print(ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
            _global_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        else:
            print('No checkpoint file found')

        T = int(int(math.ceil(D / batch_size)) * epochs + 1)  # number of steps
        step_for_epoch = int(math.ceil(D / batch_size))
        #number of steps for one epoch

        s = math.log(sqrt(2.0 / math.pi) * 1e+5)
        sigmaEGM = sqrt(2.0) * 1.0 * (sqrt(s) + sqrt(s + dp_epsilon)) / (
            2.0 * dp_epsilon)
        #print(sigmaEGM)
        __noiseE = np.random.normal(0.0, sigmaEGM,
                                    14 * 14 * 128).astype(np.float32)
        __noiseE = np.reshape(__noiseE, [-1, 14, 14, 128])
        print("Compute The Noise Redistribution Vector")
        for step in xrange(_global_step, 100 * step_for_epoch):
            batch = cifar10_data.train.next_batch(batch_size)
            #Get a random batch.
            _, loss_value = sess.run(
                [train_op, loss],
                feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    noise: __noiseE * 0,
                    sigma: sigma_value * 0
                })
            if step % (5 * step_for_epoch) == 0:
                print(loss_value)
        batch = cifar10_data.train.next_batch(40 * batch_size)
        grad_redis = sess.run([normalized_grad_r],
                              feed_dict={
                                  x: batch[0],
                                  y_: batch[1],
                                  noise: __noiseE * 0
                              })
        _sensitivity_2 = sess.run([sensitivity_2],
                                  feed_dict={
                                      x: batch[0],
                                      y_: batch[1],
                                      noise: __noiseE * 0
                                  })
        #print(_sensitivity_2)

        _sensitivityW = sess.run(sensitivityW)
        #print(_sensitivityW)
        Delta_redis = _sensitivityW / sqrt(_sensitivity_2[0])
        #print(Delta_redis)
        sigmaHGM = sqrt(2.0) * Delta_redis * (
            sqrt(s) + sqrt(s + dp_epsilon)) / (2.0 * dp_epsilon)
        #print(sigmaHGM)
        __noiseH = np.random.normal(0.0, sigmaHGM,
                                    14 * 14 * 128).astype(np.float32)
        __noiseH = np.reshape(__noiseH, [-1, 14, 14, 128]) * grad_redis

        sess.run(init)
        print("Training")
        for step in xrange(_global_step, _global_step + T):
            start_time = time.time()
            batch = cifar10_data.train.next_batch(batch_size)
            #Get a random batch.
            #grad_redis = sess.run([normalized_grad_r], feed_dict = {x: batch[0], y_: batch[1], noise: (__noise + grad_redis)/2})
            _, loss_value = sess.run(
                [train_op, loss],
                feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    noise: (__noiseE + __noiseH) / 2,
                    sigma: sigma_value
                })
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            sess.run([privacy_accum_op])
            spent_eps_deltas = priv_accountant.get_privacy_spent(
                sess, target_eps=target_eps)
            if step % (5 * step_for_epoch) == 0:
                print(loss_value)
                print(spent_eps_deltas)
            _break = False
            for _eps, _delta in spent_eps_deltas:
                if _delta >= delta:
                    _break = True
                    break
            if _break == True:
                break

        ## Robustness
        print("Testing")
        adv_acc_dict = {}
        robust_adv_acc_dict = {}
        robust_adv_utility_dict = {}
        test_bach_size = 5000
        for atk in attack_switch.keys():
            if atk not in adv_acc_dict:
                adv_acc_dict[atk] = -1
                robust_adv_acc_dict[atk] = -1
                robust_adv_utility_dict[atk] = -1
            if attack_switch[atk]:
                test_bach = cifar10_data.test.next_batch(test_bach_size)
                adv_images_dict = sess.run(attack_tensor_dict[atk],
                                           feed_dict={x: test_bach[0]})
                ### PixelDP Robustness ###
                predictions_form_argmax = np.zeros([test_bach_size, 10])
                softmax_predictions = sess.run(softmax_y_conv,
                                               feed_dict={
                                                   x: adv_images_dict,
                                                   noise:
                                                   (__noiseE + __noiseH) / 2
                                               })
                argmax_predictions = np.argmax(softmax_predictions, axis=1)
                for n_draws in range(0, 1000):
                    _noiseE = np.random.normal(0.0, sigmaEGM, 14 * 14 *
                                               128).astype(np.float32)
                    _noiseE = np.reshape(_noiseE, [-1, 14, 14, 128])
                    _noise = np.random.normal(0.0, sigmaHGM,
                                              14 * 14 * 128).astype(np.float32)
                    _noise = np.reshape(_noise, [-1, 14, 14, 128]) * grad_redis
                    for j in range(test_bach_size):
                        pred = argmax_predictions[j]
                        predictions_form_argmax[j, pred] += 1
                    softmax_predictions = sess.run(
                        softmax_y_conv,
                        feed_dict={
                            x:
                            adv_images_dict,
                            noise:
                            (__noiseE + __noiseH) / 2 + (_noiseE + _noise) / 4
                        })
                    argmax_predictions = np.argmax(softmax_predictions, axis=1)
                final_predictions = predictions_form_argmax
                is_correct = []
                is_robust = []
                for j in range(test_bach_size):
                    is_correct.append(
                        np.argmax(test_bach[1][j]) == np.argmax(
                            final_predictions[j]))
                    robustness_from_argmax = robustnessGGaussian.robustness_size_argmax(
                        counts=predictions_form_argmax[j],
                        eta=0.05,
                        dp_attack_size=fgsm_eps,
                        dp_epsilon=dp_epsilon,
                        dp_delta=0.05,
                        dp_mechanism='gaussian') / dp_mult
                    is_robust.append(robustness_from_argmax >= fgsm_eps)
                adv_acc_dict[atk] = np.sum(is_correct) * 1.0 / test_bach_size
                robust_adv_acc_dict[atk] = np.sum([
                    a and b for a, b in zip(is_robust, is_correct)
                ]) * 1.0 / np.sum(is_robust)
                robust_adv_utility_dict[atk] = np.sum(
                    is_robust) * 1.0 / test_bach_size
                ##############################
        log_str = ""
        for atk in attack_switch.keys():
            if attack_switch[atk]:
                # added robust prediction
                log_str += " {}: {:.4f} {:.4f} {:.4f} {:.4f}".format(
                    atk, adv_acc_dict[atk], robust_adv_acc_dict[atk],
                    robust_adv_utility_dict[atk],
                    robust_adv_acc_dict[atk] * robust_adv_utility_dict[atk])
        print(log_str)
        logfile.write(log_str + '\n')
Ejemplo n.º 16
0
def main(_):
    tf.logging.set_verbosity(tf.logging.DEBUG)

    # Images for inception classifier are normalized to be in [-1, 1] interval,
    num_classes = 1001
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]

    # Load ImageNet Class Labels
    with open('labels.json') as f:
        labels = json.load(f)

    # Prepare Graph
    with tf.Graph().as_default():

        # Build Model
        if FLAGS.model_arch.lower() == 'resnet_v2_101':
            model = models.Resnet_V2_101_Model(num_classes)
            exceptions = []

        elif FLAGS.model_arch.lower() == 'inception_v3':
            model = models.Inception_V3_Model(num_classes)
            exceptions = ['InceptionV3/AuxLogits.*']

        else:
            raise ValueError('Invalid model architecture specified: {}'.format(
                FLAGS.model_arch))

        # Define Model Variables
        x_input = tf.placeholder(tf.float32, shape=batch_shape)
        FastGradientMethod(model).generate(x_input)
        model_variables = tf.contrib.framework.filter_variables(
            slim.get_model_variables(), exclude_patterns=exceptions)

        # Load Session
        saver = tf.train.Saver(model_variables)
        with tf.train.SessionManager().prepare_session(
                master=FLAGS.master,
                checkpoint_filename_with_path=FLAGS.checkpoint_path,
                saver=saver) as sess:

            # For Targeted Attacks
            target_idx = 0  # This will vary
            target = tf.constant(0, shape=[FLAGS.batch_size, num_classes])
            #      target = np.zeros((FLAGS.batch_size, num_classes), dtype=np.uint32)
            #      target[:, target] = 1

            # Build Attack
            if FLAGS.attack_type.lower() == 'fgsm':
                fgsm_opts = {
                    'eps': 0.3,
                    'clip_min': 0,
                    'clip_max': 1.,
                    'y_target': None
                }
                fgsm = FastGradientMethod(model)
                x_adv = fgsm.generate(x_input, **fgsm_opts)

            elif FLAGS.attack_type.lower() == 'bim':
                bim_opts = {
                    'eps': 0.3,
                    'clip_min': 0.,
                    'clip_max': 1.,
                    'y_target': None
                }
                bim = BasicIterativeMethod(model)
                x_adv = bim.generate(x_input, **bim_opts)

            elif FLAGS.attack_type.lower() == 'mim':
                mim_opts = {'eps': 0.3, 'clip_min': 0, 'clip_max': 1.}
                mim = MomentumIterativeMethod(model)
                x_adv = mim.generate(x_input, **mim_opts)

            elif FLAGS.attack_type.lower() == 'pgd':
                pgd_opts = {'eps': 0.3, 'clip_min': 0, 'clip_max': 1.}
                pgd = MadryEtAl(model)
                x_adv = pgd.generate(x_input, **pgd_opts)

            # Broken
            elif FLAGS.attack_type.lower() == 'jsma':
                jsma_opts = {
                    'theta': 1.,
                    'gamma': 0.1,
                    'clip-min': 0.,
                    'clip-max': 1.,
                    'y_target': None
                }
                jsma = SaliencyMapMethod(model)
                x_adv = jsma.generate(x_input, **jsma_opts)

            elif FLAGS.attack_type.lower() == 'lbfgs':
                lbfgs_opts = {'y_target': target}
                lbfgs = LBFGS(model)
                x_adv = lbfgs.generate(x_input, **lbfgs_opts)

            else:
                raise ValueError('Invalid attack type specified: {}'.format(
                    FLAGS.attack_type))

            start_time, batch_time, num_processed = time.time(), time.time(), 0
            for filenames, images in load_images(FLAGS.input_dir, batch_shape):
                adv_images = sess.run(x_adv, feed_dict={x_input: images})
                save_images(adv_images, filenames, FLAGS.output_dir)

                if FLAGS.show_predictions:
                    preds = sess.run(model(np.float32(images)))
                    probs = np.amax(preds, axis=1)
                    classes = np.argmax(preds, axis=1)
                    adv_preds = sess.run(model(adv_images))
                    adv_probs = np.amax(adv_preds, axis=1)
                    adv_classes = np.argmax(adv_preds, axis=1)

                    for i, _ in enumerate(filenames):
                        print('\nOriginal: {:.2f}% ({})\nAdversarial: {:.2f}% ({})'.format( \
                          probs[i]*100, labels[str(classes[i])], adv_probs[i]*100, labels[str(adv_classes[i])]))

                time_delta = time.time() - batch_time
                batch_time = time.time()
                num_processed += len(filenames)
                print('[SPEED ESTIMATION] BatchRate={:.4f} Hz; AverageRate={:.4f} Hz'.format( \
                  (len(filenames) / time_delta * 1.0), ((num_processed * 1.0) / (batch_time - start_time))))
def train(cifar10_data, epochs, L, learning_rate, scale3, Delta2, epsilon2,
          eps2_ratio, alpha, perturbFM, fgsm_eps, total_eps, logfile,
          parameter_dict):
    logfile.write("fgsm_eps \t %g, LR \t %g, alpha \t %d , epsilon \t %d \n" %
                  (fgsm_eps, learning_rate, alpha, total_eps))
    """Train CIFAR-10 for a number of steps."""
    # make sure variables are placed on cpu
    # TODO: for AWS version, check if put variables on GPU will be better
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        global_step = tf.Variable(0, trainable=False)
        attacks = ['ifgsm', 'mim', 'madry']

        # manually create all scopes
        with tf.variable_scope('conv1', reuse=tf.AUTO_REUSE) as scope:
            scope_conv1 = scope
        with tf.variable_scope('conv2', reuse=tf.AUTO_REUSE) as scope:
            scope_conv2 = scope
        with tf.variable_scope('conv3', reuse=tf.AUTO_REUSE) as scope:
            scope_conv3 = scope
        with tf.variable_scope('local4', reuse=tf.AUTO_REUSE) as scope:
            scope_local4 = scope
        with tf.variable_scope('local5', reuse=tf.AUTO_REUSE) as scope:
            scope_local5 = scope

        # Parameters Declarification
        #with tf.variable_scope('conv1') as scope:
        # with tf.device('/gpu:{}'.format(AUX_GPU_IDX[0])):
        with tf.variable_scope(scope_conv1) as scope:
            kernel1 = _variable_with_weight_decay(
                'kernel1',
                shape=[4, 4, 3, 128],
                stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
                wd=0.0,
                collect=[AECODER_VARIABLES])
            biases1 = _bias_on_cpu('biases1', [128],
                                   tf.constant_initializer(0.0),
                                   collect=[AECODER_VARIABLES])

        #
        shape = kernel1.get_shape().as_list()
        w_t = tf.reshape(kernel1, [-1, shape[-1]])
        w = tf.transpose(w_t)
        sing_vals = tf.svd(w, compute_uv=False)
        sensitivity = tf.reduce_max(sing_vals)
        gamma = 2 * Delta2 / (L * sensitivity)

        with tf.variable_scope(scope_conv2) as scope:
            kernel2 = _variable_with_weight_decay(
                'kernel2',
                shape=[5, 5, 128, 128],
                stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
                wd=0.0,
                collect=[CONV_VARIABLES])
            biases2 = _bias_on_cpu('biases2', [128],
                                   tf.constant_initializer(0.1),
                                   collect=[CONV_VARIABLES])

        with tf.variable_scope(scope_conv3) as scope:
            kernel3 = _variable_with_weight_decay(
                'kernel3',
                shape=[5, 5, 256, 256],
                stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
                wd=0.0,
                collect=[CONV_VARIABLES])
            biases3 = _bias_on_cpu('biases3', [256],
                                   tf.constant_initializer(0.1),
                                   collect=[CONV_VARIABLES])

        with tf.variable_scope(scope_local4) as scope:
            kernel4 = _variable_with_weight_decay(
                'kernel4',
                shape=[int(image_size / 4)**2 * 256, hk],
                stddev=0.04,
                wd=0.004,
                collect=[CONV_VARIABLES])
            biases4 = _bias_on_cpu('biases4', [hk],
                                   tf.constant_initializer(0.1),
                                   collect=[CONV_VARIABLES])

        with tf.variable_scope(scope_local5) as scope:
            kernel5 = _variable_with_weight_decay(
                'kernel5', [hk, 10],
                stddev=np.sqrt(2.0 / (int(image_size / 4)**2 * 256)) /
                math.ceil(5 / 2),
                wd=0.0,
                collect=[CONV_VARIABLES])
            biases5 = _bias_on_cpu('biases5', [10],
                                   tf.constant_initializer(0.1),
                                   collect=[CONV_VARIABLES])

        # group these for use as parameters
        params = [
            kernel1, biases1, kernel2, biases2, kernel3, biases3, kernel4,
            biases4, kernel5, biases5
        ]
        scopes = [
            scope_conv1, scope_conv2, scope_conv3, scope_local4, scope_local5
        ]

        # placeholders for input values
        FM_h = tf.placeholder(tf.float32, [None, 14, 14, 128])  # one time
        noise = tf.placeholder(tf.float32,
                               [None, image_size, image_size, 3])  # one time
        adv_noise = tf.placeholder(
            tf.float32, [None, image_size, image_size, 3])  # one time

        x_sb = tf.placeholder(tf.float32, [None, image_size, image_size, 3
                                           ])  # input is the bunch of n_batchs
        x_list = tf.split(x_sb, N_GPUS, axis=0)  # split it into each batch
        adv_x_sb = tf.placeholder(tf.float32,
                                  [None, image_size, image_size, 3])
        adv_x_list = tf.split(adv_x_sb, N_GPUS, axis=0)

        x_test = tf.placeholder(tf.float32, [None, image_size, image_size, 3])

        y_sb = tf.placeholder(tf.float32,
                              [None, 10])  # input is the bunch of n_batchs
        y_list = tf.split(y_sb, N_GPUS, axis=0)  # split it into each batch
        adv_y_sb = tf.placeholder(tf.float32,
                                  [None, 10])  # input is the bunch of n_batchs
        # adv_y_list = tf.split(adv_y_sb, N_GPUS, axis=0) # split it into each batch

        y_test = tf.placeholder(tf.float32, [None, 10])

        # re-arrange the input samples
        _split_adv_y_sb = tf.split(adv_y_sb, N_AUX_GPUS, axis=0)
        reorder_adv_y_sb = []
        for i in range(N_GPUS):
            reorder_adv_y_sb.append(
                tf.concat([
                    _split_adv_y_sb[i + N_GPUS * atk_index]
                    for atk_index in range(len(attacks))
                ],
                          axis=0))

        tower_pretrain_grads = []
        tower_train_grads = []
        all_train_loss = []

        pretrain_opt = tf.train.AdamOptimizer(learning_rate)
        train_opt = tf.train.GradientDescentOptimizer(learning_rate)

        # batch index
        bi = 0
        for gpu in GPU_IDX:
            # putting ops on each tower (GPU)
            with tf.device('/gpu:{}'.format(gpu)):
                print('Train inference GPU placement')
                print('/gpu:{}'.format(gpu))
                # Auto-Encoder #
                # pretrain_adv and pretrain_benign are cost tensor of the encoding layer
                with tf.variable_scope(scope_conv1) as scope:
                    Enc_Layer2 = EncLayer(inpt=adv_x_list[bi],
                                          n_filter_in=3,
                                          n_filter_out=128,
                                          filter_size=3,
                                          W=kernel1,
                                          b=biases1,
                                          activation=tf.nn.relu)
                    pretrain_adv = Enc_Layer2.get_train_ops2(
                        xShape=tf.shape(adv_x_list[bi])[0],
                        Delta=Delta2,
                        epsilon=epsilon2,
                        batch_size=L,
                        learning_rate=learning_rate,
                        W=kernel1,
                        b=biases1,
                        perturbFMx=adv_noise,
                        perturbFM_h=FM_h,
                        bn_index=bi)
                    Enc_Layer3 = EncLayer(inpt=x_list[bi],
                                          n_filter_in=3,
                                          n_filter_out=128,
                                          filter_size=3,
                                          W=kernel1,
                                          b=biases1,
                                          activation=tf.nn.relu)
                    pretrain_benign = Enc_Layer3.get_train_ops2(
                        xShape=tf.shape(x_list[bi])[0],
                        Delta=Delta2,
                        epsilon=epsilon2,
                        batch_size=L,
                        learning_rate=learning_rate,
                        W=kernel1,
                        b=biases1,
                        perturbFMx=noise,
                        perturbFM_h=FM_h,
                        bn_index=bi)
                    pretrain_cost = pretrain_adv + pretrain_benign
                # this cost is not used
                # cost = tf.reduce_sum((Enc_Layer2.cost + Enc_Layer3.cost)/2.0);

                # benign conv output
                x_image = x_list[bi] + noise
                y_conv = inference(x_image,
                                   FM_h,
                                   params,
                                   scopes,
                                   training=True,
                                   bn_index=bi)
                # softmax_y_conv = tf.nn.softmax(y_conv)

                # adv conv output
                adv_x_image = adv_x_list[bi] + adv_noise
                y_adv_conv = inference(adv_x_image,
                                       FM_h,
                                       params,
                                       scopes,
                                       training=True,
                                       bn_index=bi)

                # Calculate loss. Apply Taylor Expansion for the output layer
                perturbW = perturbFM * params[8]
                train_loss = cifar10.TaylorExp(y_conv, y_list[bi], y_adv_conv,
                                               reorder_adv_y_sb[bi], L, alpha,
                                               perturbW)
                all_train_loss.append(train_loss)

                # list of variables to train
                pretrain_var_list = tf.get_collection(AECODER_VARIABLES)
                train_var_list = tf.get_collection(CONV_VARIABLES)

                # compute tower gradients
                pretrain_grads = pretrain_opt.compute_gradients(
                    pretrain_cost, var_list=pretrain_var_list)
                train_grads = train_opt.compute_gradients(
                    train_loss, var_list=train_var_list)
                # get_pretrain_grads(pretrain_cost, global_step, learning_rate, pretrain_var_list)
                # train_grads = get_train_grads(train_loss, global_step, learning_rate, train_var_list)

                # note this list contains grads and variables
                tower_pretrain_grads.append(pretrain_grads)
                tower_train_grads.append(train_grads)

                # batch index
                bi += 1

        # average the gradient from each tower
        pretrain_var_dict = {}
        all_pretrain_grads = {}
        avg_pretrain_grads = []
        for var in tf.get_collection(AECODER_VARIABLES):
            if var.name not in all_pretrain_grads:
                all_pretrain_grads[var.name] = []
                pretrain_var_dict[var.name] = var
        for tower in tower_pretrain_grads:
            for var_grad in tower:
                all_pretrain_grads[var_grad[1].name].append(var_grad[0])
        for var_name in all_pretrain_grads:
            # expand dim 0, then concat on dim 0, then reduce mean on dim 0
            expand_pretrain_grads = [
                tf.expand_dims(g, 0) for g in all_pretrain_grads[var_name]
            ]
            concat_pretrain_grads = tf.concat(expand_pretrain_grads, axis=0)
            reduce_pretrain_grads = tf.reduce_mean(concat_pretrain_grads, 0)
            # rebuild (grad, var) list
            avg_pretrain_grads.append(
                (reduce_pretrain_grads, pretrain_var_dict[var_name]))
        print('*****************************')
        print("avg_pretrain_grads:")
        for avg_pretrain_grad in avg_pretrain_grads:
            print('grads')
            print((avg_pretrain_grad[0].name, avg_pretrain_grad[0].shape))
            print('var')
            print((avg_pretrain_grad[1].name, avg_pretrain_grad[1].shape))
            print('------')

        train_var_dict = {}
        all_train_grads = {}
        avg_train_grads = []
        for var in tf.get_collection(CONV_VARIABLES):
            if var.name not in all_train_grads:
                all_train_grads[var.name] = []
                train_var_dict[var.name] = var
        for tower in tower_train_grads:
            for var_grad in tower:
                all_train_grads[var_grad[1].name].append(var_grad[0])
        for var_name in all_train_grads:
            # expand dim 0, then concat on dim 0, then reduce mean on dim 0
            expand_train_grads = [
                tf.expand_dims(g, 0) for g in all_train_grads[var_name]
            ]
            concat_train_grads = tf.concat(expand_train_grads, axis=0)
            reduce_train_grads = tf.reduce_mean(concat_train_grads, 0)
            # rebuild (grad, var) list
            avg_train_grads.append(
                (reduce_train_grads, train_var_dict[var_name]))
        print('*****************************')
        print("avg_train_grads:")
        for avg_train_grad in avg_train_grads:
            print('grads')
            print((avg_train_grad[0].name, avg_train_grad[0].shape))
            print('var')
            print((avg_train_grad[1].name, avg_train_grad[1].shape))
            print('------')
        print('*****************************')

        # get averaged loss tensor
        avg_loss = tf.reduce_mean(tf.stack(all_train_loss), axis=0)

        # TODO: take the average of the bn variables from each tower/training GPU
        # currently, testing is using the bn variables on bn_index 0 (tower/training GPU 0)

        # build train op (apply average gradient to variables)
        # according to 1.13 doc, updates need to be manually applied
        _update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        print('update ops:')
        print(_update_ops)

        with tf.control_dependencies(_update_ops):
            pretrain_op = pretrain_opt.apply_gradients(avg_pretrain_grads,
                                                       global_step=global_step)
            train_op = train_opt.apply_gradients(avg_train_grads,
                                                 global_step=global_step)

        # start a session with memory growth
        config = tf.ConfigProto(log_device_placement=False)
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        print("session created")

        # init kernel 1 and get some values from it
        sess.run(kernel1.initializer)
        dp_epsilon = 0.005
        parameter_dict['dp_epsilon'] = dp_epsilon
        _gamma = sess.run(gamma)
        _gamma_x = Delta2 / L
        epsilon2_update = epsilon2 / (1.0 + 1.0 / _gamma + 1 / _gamma_x)
        parameter_dict['epsilon2_update'] = epsilon2_update
        print(epsilon2_update / _gamma + epsilon2_update / _gamma_x)
        print(epsilon2_update)
        # NOTE: these values needs to be calculated in testing
        delta_r = fgsm_eps * (image_size**2)
        parameter_dict['delta_r'] = delta_r
        _sensitivityW = sess.run(sensitivity)
        parameter_dict['_sensitivityW'] = _sensitivityW
        delta_h = _sensitivityW * (14**2)
        parameter_dict['delta_h'] = delta_h
        #dp_mult = (Delta2/(L*epsilon2_update))/(delta_r / dp_epsilon) + (2*Delta2/(L*epsilon2_update))/(delta_h / dp_epsilon)
        dp_mult = (Delta2) / (L * epsilon2_update * (delta_h / 2 + delta_r))
        parameter_dict['dp_mult'] = dp_mult

        # place test-time inference into CPU
        with tf.device('/cpu:0'):
            # testing pipeline
            test_x_image = x_test + noise
            test_y_conv = inference(test_x_image,
                                    FM_h,
                                    params,
                                    scopes,
                                    training=True,
                                    bn_index=0)
            test_softmax_y_conv = tf.nn.softmax(test_y_conv)

        # ============== attacks ================
        iter_step_training = 3
        parameter_dict['iter_step_training'] = iter_step_training
        # iter_step_testing = 1000
        aux_dup_count = N_GPUS
        # split input x_super_batch into N_AUX_GPUS parts
        x_attacks = tf.split(x_sb, N_AUX_GPUS, axis=0)
        # split input x_test into aux_dup_count parts
        x_test_split = tf.split(x_test, aux_dup_count, axis=0)

        # setup all attacks
        # attack_switch = {'fgsm':False, 'ifgsm':True, 'deepfool':False, 'mim':True, 'spsa':False, 'cwl2':False, 'madry':True, 'stm':False}

        ch_model_probs = CustomCallableModelWrapper(
            callable_fn=inference_test_input_probs,
            output_layer='probs',
            params=params,
            scopes=scopes,
            image_size=image_size,
            adv_noise=adv_noise)
        attack_tensor_training_dict = {}
        attack_tensor_testing_dict = {}

        # define each attack method's tensor
        mu_alpha = tf.placeholder(tf.float32, [1])

        # build each attack
        for atk_idx in range(len(attacks)):
            atk = attacks[atk_idx]
            print('building attack {} tensors'.format(atk))
            # for each gpu assign to each attack
            attack_tensor_training_dict[atk] = []
            attack_tensor_testing_dict[atk] = []
            for i in range(aux_dup_count):
                if atk == 'ifgsm':
                    with tf.device('/gpu:{}'.format(AUX_GPU_IDX[i])):
                        print('ifgsm GPU placement: /gpu:{}'.format(
                            AUX_GPU_IDX[i]))
                        # ifgsm tensors for training
                        ifgsm_obj = BasicIterativeMethod(model=ch_model_probs,
                                                         sess=sess)
                        attack_tensor_training_dict[atk].append(
                            ifgsm_obj.generate(x=x_attacks[i],
                                               eps=mu_alpha,
                                               eps_iter=mu_alpha /
                                               iter_step_training,
                                               nb_iter=iter_step_training,
                                               clip_min=-1.0,
                                               clip_max=1.0))

                elif atk == 'mim':
                    with tf.device('/gpu:{}'.format(
                            AUX_GPU_IDX[i + 1 * aux_dup_count])):
                        print('mim GPU placement: /gpu:{}'.format(
                            AUX_GPU_IDX[i + 1 * aux_dup_count]))
                        # mim tensors for training
                        mim_obj = MomentumIterativeMethod(model=ch_model_probs,
                                                          sess=sess)
                        attack_tensor_training_dict[atk].append(
                            mim_obj.generate(
                                x=x_attacks[i + 1 * aux_dup_count],
                                eps=mu_alpha,
                                eps_iter=mu_alpha / iter_step_training,
                                nb_iter=iter_step_training,
                                decay_factor=1.0,
                                clip_min=-1.0,
                                clip_max=1.0))

                elif atk == 'madry':
                    with tf.device('/gpu:{}'.format(
                            AUX_GPU_IDX[i + 2 * aux_dup_count])):
                        print('madry GPU placement: /gpu:{}'.format(
                            AUX_GPU_IDX[i + 2 * aux_dup_count]))
                        # madry tensors for training
                        madry_obj = MadryEtAl(model=ch_model_probs, sess=sess)
                        attack_tensor_training_dict[atk].append(
                            madry_obj.generate(
                                x=x_attacks[i + 2 * aux_dup_count],
                                eps=mu_alpha,
                                eps_iter=mu_alpha / iter_step_training,
                                nb_iter=iter_step_training,
                                clip_min=-1.0,
                                clip_max=1.0))

        # combine all attack tensors
        adv_concat_list = []
        for i in range(aux_dup_count):
            adv_concat_list.append(
                tf.concat(
                    [attack_tensor_training_dict[atk][i] for atk in attacks],
                    axis=0))
        # the tensor that contains each batch of adv samples for training
        # has same sample order as the labels
        adv_super_batch_tensor = tf.concat(adv_concat_list, axis=0)

        #====================== attack =========================

        #adv_logits, _ = inference(c_x_adv + W_conv1Noise, perturbFM, params)

        print('******************** debug info **********************')
        # list of variables to train
        pretrain_var_list = tf.get_collection(AECODER_VARIABLES)
        print('pretrain var list')
        for v in pretrain_var_list:
            print((v.name, v.shape))
        print('**********************************')
        train_var_list = tf.get_collection(CONV_VARIABLES)
        print('train var list')
        for v in train_var_list:
            print((v.name, v.shape))
        print('**********************************')

        # all variables
        print('all variables')
        vl = tf.global_variables()
        for v in vl:
            print((v.name, v.shape))
        print('**********************************')

        # all ops
        ops = [n.name for n in tf.get_default_graph().as_graph_def().node]
        print('total number of ops')
        print(len(ops))
        # for op in ops:
        #   print(op)
        print('******************** debug info **********************')
        # exit()

        # Create a saver.
        saver = tf.train.Saver(var_list=tf.all_variables(), max_to_keep=1000)

        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()
        sess.run(init)

        # load the most recent models
        _global_step = 0
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print(ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
            _global_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        else:
            print('No checkpoint file found')

        T = int(int(math.ceil(D / L)) * epochs + 1)  # number of steps
        print('total number of steps: {}'.format(T))
        step_for_epoch = int(math.ceil(D / L))
        #number of steps for one epoch
        parameter_dict['step_for_epoch'] = step_for_epoch
        print('step_for_epoch: {}'.format(step_for_epoch))

        # generate some fixed noise
        perturbH_test = np.random.laplace(0.0, 0, 14 * 14 * 128)  # one time
        perturbH_test = np.reshape(perturbH_test,
                                   [-1, 14, 14, 128])  # one time
        parameter_dict['perturbH_test'] = perturbH_test
        print('perturbH_test')
        print(perturbH_test.shape)

        perturbFM_h = np.random.laplace(0.0,
                                        2 * Delta2 / (epsilon2_update * L),
                                        14 * 14 * 128)  # one time
        perturbFM_h = np.reshape(perturbFM_h, [-1, 14, 14, 128])  # one time
        parameter_dict['perturbFM_h'] = perturbFM_h
        print('perturbFM_h')
        print(perturbFM_h.shape)

        Noise = generateIdLMNoise(image_size, Delta2, epsilon2_update,
                                  L)  # one time
        parameter_dict['Noise'] = Noise
        Noise_test = generateIdLMNoise(image_size, 0, epsilon2_update,
                                       L)  # one time
        parameter_dict['Noise_test'] = Noise_test
        print('Noise and Noise_test')
        print(Noise.shape)
        print(Noise_test.shape)
        # exit()

        # some timing variables
        adv_duration_total = 0.0
        adv_duration_count = 0
        train_duration_total = 0.0
        train_duration_count = 0

        # some debug flag
        adv_batch_flag = True
        batch_flag = True
        L_flag = True
        parameter_flag = True

        _global_step = 0
        for step in xrange(_global_step, _global_step + T):
            start_time = time.time()
            # TODO: fix this
            d_eps = random.random() * 0.5
            # d_eps = 0.25
            print('d_eps: {}'.format(d_eps))

            # version with 3 AUX GPU
            # get two super batchs, one for benign training, one for adv training
            super_batch_images, super_batch_labels = cifar10_data.train.next_super_batch(
                N_GPUS, random=True)
            super_batch_images_for_adv, super_batch_adv_labels = cifar10_data.train.next_super_batch_premix_ensemble(
                N_GPUS, random=True)

            # TODO: re-arrange the adv labels to match the adv samples

            # run adv_tensors_batch_concat to generate adv samples
            super_batch_adv_images = sess.run(adv_super_batch_tensor,
                                              feed_dict={
                                                  x_sb:
                                                  super_batch_images_for_adv,
                                                  adv_noise: Noise,
                                                  mu_alpha: [d_eps]
                                              })

            adv_finish_time = time.time()
            adv_duration = adv_finish_time - start_time
            adv_duration_total += adv_duration
            adv_duration_count += 1

            if adv_batch_flag:
                print(super_batch_images.shape)
                print(super_batch_labels.shape)
                print(super_batch_adv_images.shape)
                print(super_batch_adv_labels.shape)
                adv_batch_flag = False

            if batch_flag:
                print(super_batch_images.shape)
                print(super_batch_labels.shape)
                batch_flag = False

            if L_flag:
                print("L: {}".format(L))
                L_flag = False

            if parameter_flag:
                print('*=*=*=*=*')
                print(parameter_dict)
                print('*=*=*=*=*', flush=True)
                logfile.write('*=*=*=*=*\n')
                logfile.write(str(parameter_dict))
                logfile.write('*=*=*=*=*\n')
                parameter_flag = False

            _, _, avg_loss_value = sess.run(
                [pretrain_op, train_op, avg_loss],
                feed_dict={
                    x_sb: super_batch_images,
                    y_sb: super_batch_labels,
                    adv_x_sb: super_batch_adv_images,
                    adv_y_sb: super_batch_adv_labels,
                    noise: Noise,
                    adv_noise: Noise_test,
                    FM_h: perturbFM_h
                })

            assert not np.isnan(
                avg_loss_value), 'Model diverged with loss = NaN'

            train_finish_time = time.time()
            train_duration = train_finish_time - adv_finish_time
            train_duration_total += train_duration
            train_duration_count += 1

            # save model every 50 epochs
            if step % (50 * step_for_epoch) == 0 and (step >=
                                                      50 * step_for_epoch):
                print('saving model')
                checkpoint_path = os.path.join(os.getcwd() + dirCheckpoint,
                                               'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

            # Save the model checkpoint periodically.
            # if step % (10*step_for_epoch) == 0 and (step > _global_step):
            if step % 10 == 0 and (step > _global_step):
                # print n steps and time
                print("current epoch: {:.2f}".format(step / step_for_epoch))
                num_examples_per_step = L * N_GPUS * 2
                avg_adv_duration = adv_duration_total / adv_duration_count
                avg_train_duration = train_duration_total / train_duration_count
                avg_total_duration = avg_adv_duration + avg_train_duration
                examples_per_sec = num_examples_per_step / avg_total_duration
                sec_per_step = avg_total_duration
                # sec_per_batch = sec_per_step / (N_GPUS * 2)
                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.2f '
                    'sec/step; %.2f sec/adv_gen_op; %.2f sec/train_op)')
                actual_str = format_str % (
                    datetime.now(), step, avg_loss_value, examples_per_sec,
                    sec_per_step, avg_adv_duration, avg_train_duration)
                print(actual_str, flush=True)
                logfile.write(actual_str + '\n')
Ejemplo n.º 18
0
def ml_dcgan(dataset, args):

    z_dim = args.z_dim
    x_dim = dataset.x_dim
    batch_size = args.batch_size

    base_learning_rate = args.lr  # for now we use same learning rate for Ds and Gs
    lr_decay_rate = args.lr_decay

    dataset_size = dataset.dataset_size

    print("Starting session")
    session = get_session()

    dcgan = BDCGAN(x_dim,
                   z_dim,
                   dataset_size=dataset_size,
                   batch_size=batch_size,
                   J=1,
                   ml=True,
                   num_classes=dataset.num_classes)

    tf.global_variables_initializer().run()

    print("Starting training loop")

    test_image_batches, test_label_batches = get_test_batches(
        dataset, batch_size)
    supervised_batches = get_supervised_batches(
        dataset, args.N, batch_size, list(range(dataset.num_classes)))

    if args.adv_test:
        x = tf.placeholder(tf.float32, shape=(batch_size, 28, 28,
                                              1))  # Hardcoded for MNIST
        if args.basic_iterative:
            fgsm = BasicIterativeMethod(dcgan, sess=session)
            dcgan.adv_constructor = fgsm
            fgsm_params = {
                'eps': args.eps,
                'eps_iter': float(args.eps / 4),
                'nb_iter': 4,
                'ord': np.inf,
                'clip_min': 0.,
                'clip_max': 1.
            }
            #,'y_target': None}
        else:
            fgsm = FastGradientMethod(dcgan, sess=session)
            dcgan.adv_constructor = fgsm
            eval_params = {'batch_size': batch_size}
            fgsm_params = {'eps': args.eps, 'clip_min': 0., 'clip_max': 1.}

        adv_x = fgsm.generate(x, **fgsm_params)
        preds = dcgan.get_probs(adv_x)

    for train_iter in range(args.train_iter):

        batch_z = np.random.uniform(-1, 1, [batch_size, z_dim])
        image_batch, _ = dataset.next_batch(batch_size, class_id=None)
        labeled_image_batches, label_batches = next(supervised_batches)

        learning_rate = base_learning_rate * np.exp(-lr_decay_rate * min(
            1.0, (train_iter * batch_size) / float(dataset_size)))

        if args.adv_train:
            adv_labeled = session.run(
                fgsm.generate(labeled_image_ph, **fgsm_targeted_params),
                feed_dict={labeled_image_ph: labeled_image_batch})
            adv_unlabeled = session.run(
                fgsm.generate(unlabeled_batch_ph, **fgsm_params),
                feed_dict={unlabeled_batch_ph: image_batch})
            _, d_loss = session.run(
                [dcgan.d_optim_semi, dcgan.d_loss_semi],
                feed_dict={
                    dcgan.labeled_inputs: labeled_image_batch,
                    dcgan.labels: get_gan_labels(labels),
                    dcgan.inputs: image_batch,
                    dcgan.z: batch_z,
                    dcgan.d_semi_learning_rate: learning_rate,
                    dcgan.adv_unlab: adv_unlabeled,
                    dcgan.adv_labeled: adv_labeled
                })
        else:
            _, d_loss = session.run(
                [dcgan.d_optim_semi, dcgan.d_loss_semi],
                feed_dict={
                    dcgan.labeled_inputs: labeled_image_batches,
                    dcgan.labels: get_gan_labels(label_batches),
                    dcgan.inputs: image_batch,
                    dcgan.z: batch_z,
                    dcgan.d_semi_learning_rate: learning_rate,
                })

        _, s_loss = session.run([dcgan.s_optim, dcgan.s_loss],
                                feed_dict={
                                    dcgan.inputs: labeled_image_batches,
                                    dcgan.lbls: label_batches
                                })
        # compute g_sample loss
        batch_z = np.random.uniform(-1, 1, [batch_size, z_dim])
        _, g_loss = session.run(
            [dcgan.g_optims[0], dcgan.generation["g_losses"][0]],
            feed_dict={
                dcgan.z: batch_z,
                dcgan.g_learning_rate: learning_rate
            })

        if train_iter % args.n_save == 0:
            # get test set performance on real labels only for both GAN-based classifier and standard one
            d_logits, s_logits, lbls = get_test_stats(session, dcgan,
                                                      test_image_batches,
                                                      test_label_batches)

            if args.adv_test:
                adv_set = []
                for test_images in test_image_batches:
                    adv_set.append(
                        session.run(adv_x, feed_dict={x: test_images}))
                adv_sup_acc, adv_ss_acc, correct_uncertainty, incorrect_uncertainty, adv_acc, adv_ex_prob = get_adv_test_accuracy(
                    session, dcgan, adv_set, test_label_batches)

            print("saving results")
            np.savez_compressed(os.path.join(args.out_dir,
                                             'results_%i.npz' % train_iter),
                                d_logits=d_logits,
                                s_logits=s_logits,
                                lbls=lbls,
                                adv_sup_acc=adv_sup_acc,
                                adv_sup_acc_unf=adv_ss_acc,
                                correct_uncertainty=correct_uncertainty,
                                incorrect_uncertainty=incorrect_uncertainty,
                                adv_acc=adv_acc)

            var_dict = {}
            for var in tf.trainable_variables():
                var_dict[var.name] = session.run(var.name)

            np.savez_compressed(
                os.path.join(args.out_dir, "weights_%i.npz" % train_iter),
                **var_dict)

            print("done")

    print("closing session")
    session.close()
    tf.reset_default_graph()
Ejemplo n.º 19
0
def train_zero_knowledge_gandef_model(train_start=0,
                                      train_end=60000,
                                      test_start=0,
                                      test_end=10000,
                                      smoke_test=True,
                                      save=False,
                                      testing=False,
                                      backprop_through_attack=False,
                                      num_threads=None):
    """
    MNIST cleverhans tutorial
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :param nb_epochs: number of epochs to train model
    :param train_batch_size: size of training batches
    :param test_batch_size: size of testing batches
    :param learning_rate: learning rate for training
    :param save: if true, the final model will be saved
    :param testing: if true, complete an AccuracyReport for unit tests
                    to verify that performance is adequate
    :param backprop_through_attack: If True, backprop through adversarial
                                    example construction process during
                                    adversarial training.
    :return: an AccuracyReport object
    """

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    # Set logging level to see debug information
    set_log_level(logging.DEBUG)

    # Create TF session
    if num_threads:
        config_args = dict(intra_op_parallelism_threads=1)
    else:
        config_args = {}
    sess = tf.Session(config=tf.ConfigProto(**config_args))

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_fashion_mnist()
    if smoke_test:
        X_train, Y_train, X_test, Y_test = X_train[:
                                                   256], Y_train[:
                                                                 256], X_test[:
                                                                              256], Y_test[:
                                                                                           256]

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y_soft = tf.placeholder(tf.float32, shape=(None, 10))

    # Prepare optimizer
    learning_rate = 1e-4
    clf_opt = tf.train.AdamOptimizer(learning_rate)
    dic_opt = tf.train.AdamOptimizer(learning_rate * 10)

    # Train an MNIST model
    train_params = {
        'nb_epochs': 80,
        'batch_size': 128,
        'trade_off': 2,
        'inner_epochs': 1
    }
    rng = np.random.RandomState([2017, 8, 30])

    # Adversarial training
    print("Start adversarial training")
    zero_knowledge_gandef_model = make_zero_knowledge_gandef_model(
        name="model_zero_knowledge_gandef")
    aug_x = gaussian_augment(x, std=1)
    preds_clean = zero_knowledge_gandef_model(x)
    preds_aug = zero_knowledge_gandef_model(aug_x)

    def cross_entropy(truth, preds, mean=True):
        # Get the logits operator
        op = preds.op
        if op.type == "Softmax":
            logits, = op.inputs
        else:
            logits = preds

        # Calculate cross entropy loss
        out = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                      labels=truth)

        # Take average loss and return
        if mean:
            out = tf.reduce_mean(out)
        return out

    def sigmoid_entropy(truth, preds, mean=True):
        # Get the logits operator
        op = preds.op
        if op.type == "Softmax":
            logits, = op.inputs
        else:
            logits = preds

        # Calculate cross entropy loss
        out = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
                                                      labels=truth)

        # Take average loss and return
        if mean:
            out = tf.reduce_mean(out)
        return out

    # Perform and evaluate adversarial training
    gan_train_v2(sess,
                 x,
                 y_soft,
                 preds_clean,
                 X_train,
                 Y_train,
                 loss_func=[cross_entropy, sigmoid_entropy],
                 optimizer=[clf_opt, dic_opt],
                 predictions_adv=preds_aug,
                 evaluate=None,
                 args=train_params,
                 rng=rng,
                 var_list=zero_knowledge_gandef_model.get_gan_params())

    # Evaluate the accuracy of the MNIST model on Clean examples
    preds_clean = zero_knowledge_gandef_model(x)
    eval_params = {
        'batch_size': 128,
        'use_dic': False,
        'is_clean': True,
        'reject_threshold': 0.5
    }
    clean_acc = confident_model_eval(sess,
                                     x,
                                     y_soft,
                                     preds_clean,
                                     X_test,
                                     Y_test,
                                     args=eval_params)
    print('Test accuracy on Clean test examples: %0.4f\n' % clean_acc)
    report.adv_train_clean_eval = clean_acc

    # Evaluate the accuracy of the MNIST model on FGSM examples
    fgsm_params = {'eps': 0.6, 'clip_min': -1., 'clip_max': 1.}
    fgsm_att = FastGradientMethod(zero_knowledge_gandef_model, sess=sess)
    fgsm_adv = fgsm_att.generate(x, **fgsm_params)
    preds_fgsm_adv = zero_knowledge_gandef_model(fgsm_adv)
    eval_params = {
        'batch_size': 128,
        'use_dic': False,
        'is_clean': False,
        'reject_threshold': 0.5
    }
    fgsm_acc = confident_model_eval(sess,
                                    x,
                                    y_soft,
                                    preds_fgsm_adv,
                                    X_test,
                                    Y_test,
                                    args=eval_params)
    print('Test accuracy on FGSM test examples: %0.4f\n' % fgsm_acc)
    report.adv_train_adv_eval = fgsm_acc

    # Evaluate the accuracy of the MNIST model on BIM examples
    bim_params = {'eps': 0.6, 'eps_iter': 0.1, 'clip_min': -1., 'clip_max': 1.}
    bim_att = BasicIterativeMethod(zero_knowledge_gandef_model, sess=sess)
    bim_adv = bim_att.generate(x, **bim_params)
    preds_bim_adv = zero_knowledge_gandef_model(bim_adv)
    eval_params = {
        'batch_size': 128,
        'use_dic': False,
        'is_clean': False,
        'reject_threshold': 0.5
    }
    bim_acc = confident_model_eval(sess,
                                   x,
                                   y_soft,
                                   preds_bim_adv,
                                   X_test,
                                   Y_test,
                                   args=eval_params)
    print('Test accuracy on BIM test examples: %0.4f\n' % bim_acc)
    report.adv_train_adv_eval = bim_acc

    # Evaluate the accuracy of the MNIST model on PGD examples
    pgd_params = {
        'eps': 0.6,
        'eps_iter': 0.02,
        'nb_iter': 40,
        'clip_min': -1.,
        'clip_max': 1.,
        'rand_init': True
    }
    pgd_att = MadryEtAl(zero_knowledge_gandef_model, sess=sess)
    pgd_adv = pgd_att.generate(x, **bim_params)
    preds_pgd_adv = zero_knowledge_gandef_model(pgd_adv)
    eval_params = {
        'batch_size': 128,
        'use_dic': False,
        'is_clean': False,
        'reject_threshold': 0.5
    }
    pgd_acc = confident_model_eval(sess,
                                   x,
                                   y_soft,
                                   preds_pgd_adv,
                                   X_test,
                                   Y_test,
                                   args=eval_params)
    print('Test accuracy on PGD test examples: %0.4f\n' % pgd_acc)
    report.adv_train_adv_eval = pgd_acc

    # Save model
    if save:
        model_path = "models/zero_knowledge_gandef"
        vars_to_save = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope='model_zero_knowledge_gandef*')
        assert len(vars_to_save) > 0
        saver = tf.train.Saver(var_list=vars_to_save)
        saver.save(sess, model_path)
        print('Model saved\n')
    else:
        print('Model not saved\n')
Ejemplo n.º 20
0
def main(argv):
    report = AccuracyReport()
    print("Start Main")
    tf.logging.set_verbosity(tf.logging.INFO)
    input_shape = (32, 32, 3)
    num_classes = 10
    x_train = np.load(
        '/work/cse496dl/shared/hackathon/05/cifar10_train_data.npy')
    y_train = np.load(
        '/work/cse496dl/shared/hackathon/05/cifar10_train_labels.npy')
    x_test = np.load(
        '/work/cse496dl/shared/hackathon/05/cifar10_test_data.npy')
    y_test = np.load(
        '/work/cse496dl/shared/hackathon/05/cifar10_test_labels.npy')
    train_data = imgs
    #original_train_data = imgs_large/255.
    test_data = test_data / 255.
    #input_shape = train_data.shape[0]
    input_shape = (64, 64, 3)
    num_classes = 201
    print("image load complete")

    def model_arch():
        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), padding='same'))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10))
        model.add(Activation('softmax'))
        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adadelta(),
                      metrics=['accuracy'])
        return model

    sess = tf.Session()
    keras.backend.set_session(sess)
    model_2 = model_arch()
    X_train = train_data
    Y_train = labels
    X_test = test_data
    Y_test = test_labels
    Y_train = keras.utils.to_categorical(Y_train, 10)
    Y_test = keras.utils.to_categorical(Y_test, 10)
    x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
    y = tf.placeholder(tf.float32, shape=(None, 10))
    preds_2 = model_2(x)
    bim_params = {
        'eps_iter': 0.03,
        'nb_iter': 8,
        'clip_min': 0.,
        'clip_max': 1.
    }
    fgsm_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.}
    fgsm2 = BasicIterativeMethod(model_2, sess=sess)
    adv_x_2 = fgsm2.generate(x, **bim_params)
    if not backprop_through_attack:
        # For the fgsm attack used in this tutorial, the attack has zero
        # gradient so enabling this flag does not change the gradient.
        # For some other attacks, enabling this flag increases the cost of
        # training, but gives the defender the ability to anticipate how
        # the atacker will change their strategy in response to updates to
        # the defender's parameters.
        adv_x_2 = tf.stop_gradient(adv_x_2)
    preds_2_adv = model_2(adv_x_2)
    print("model and fgsm created successfully")

    def evaluate_2():
        # Accuracy of adversarially trained model on legitimate test inputs
        eval_params = {'batch_size': 128}
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate examples: %0.4f' % accuracy)
        report.adv_train_clean_eval = accuracy

        # Accuracy of the adversarially trained model on adversarial examples
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2_adv,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on adversarial examples: %0.4f' % accuracy)
        report.adv_train_adv_eval = accuracy

    train_params = {'nb_epochs': 30, 'batch_size': 20, 'learning_rate': 0.01}
    print("train start")
    rng = np.random.RandomState([2017, 8, 30])
    # Perform and evaluate adversarial training
    model_train(sess,
                x,
                y,
                preds_2,
                X_train / 255.,
                Y_train,
                predictions_adv=preds_2_adv,
                evaluate=evaluate_2,
                args=train_params,
                rng=rng)
    model_2.save('bim_retrained_cifar.h5')
    print("train_end")
def SSGD_resnet_testing(TIN_data, resnet_params, train_params, test_params,
                        all_params):
    # dict for encoding layer variables and output layer variables
    pre_define_vars = {}

    # list of variables to train
    train_vars = []

    with tf.Graph().as_default(), tf.device('/cpu:0'):
        global_step = tf.Variable(0, trainable=False)

        # Parameters Declarification
        ######################################

        # encoding (pretrain) layer variables
        with tf.variable_scope('enc_layer', reuse=tf.AUTO_REUSE) as scope:
            kernel1 = tf.get_variable(
                'kernel1',
                shape=[
                    train_params.enc_kernel_size, train_params.enc_kernel_size,
                    3, train_params.enc_filters
                ],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer_conv2d())
            biases1 = tf.get_variable('biases1',
                                      shape=[train_params.enc_filters],
                                      dtype=tf.float32,
                                      initializer=tf.constant_initializer(0.0))
        pre_define_vars['kernel1'] = kernel1
        pre_define_vars['biases1'] = biases1
        train_vars.append(kernel1)
        train_vars.append(biases1)

        dp_mult = all_params['dp_mult']

        # output layer variables
        with tf.variable_scope('fc2', reuse=tf.AUTO_REUSE) as scope:
            stdv = 1.0 / math.sqrt(train_params.hk)
            final_w = tf.get_variable(
                'kernel',
                shape=[train_params.hk, train_params.num_classes],
                dtype=tf.float32,
                initializer=tf.random_uniform_initializer(-stdv, stdv))
            final_b = tf.get_variable('bias',
                                      shape=[train_params.num_classes],
                                      dtype=tf.float32,
                                      initializer=tf.constant_initializer(0.0))
        pre_define_vars['final_w'] = final_w
        pre_define_vars['final_b'] = final_b
        train_vars.append(final_w)
        train_vars.append(final_b)
        ######################################

        # Build a Graph that computes the logits predictions from the inputs
        ######################################
        # input placeholders
        x_sb = tf.placeholder(
            tf.float32,
            [None, train_params.image_size, train_params.image_size, 3],
            name='x_sb')  # input is the bunch of n_batchs
        x_test = tf.placeholder(
            tf.float32,
            [None, train_params.image_size, train_params.image_size, 3],
            name='x_test')

        y_sb = tf.placeholder(
            tf.float32, [None, train_params.num_classes],
            name='y_sb')  # input is the bunch of n_batchs (super batch)
        y_test = tf.placeholder(tf.float32, [None, train_params.num_classes],
                                name='y_test')

        noise = tf.placeholder(tf.float32, [
            None, train_params.enc_h_size, train_params.enc_h_size,
            train_params.enc_filters
        ],
                               name='noise')  # one time

        keep_prob = tf.placeholder(tf.float32, shape=(), name='keep_prob')

        with tf.device('/gpu:0'):
            # the model for testing
            y_logits_test, _ = test_inference(
                x_sb, train_params.attack_norm_bound * noise, keep_prob,
                pre_define_vars, resnet_params, train_params)
            y_softmax_test = tf.nn.softmax(y_logits_test)
        correct_prediction = tf.equal(tf.argmax(y_logits_test, 1),
                                      tf.argmax(y_sb, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # print all variables
        print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
        all_vars = tf.global_variables()
        print_var_list('all vars', all_vars)
        print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')

        # add selected vars into list
        # ('res4' in var.name and ('gamma' in var.name or 'beta' in var.name)) or
        for var in tf.global_variables():
            if 'resnet_model' in var.name and \
              ('conv0' in var.name or
              'fc' in var.name or
              'res3' in var.name or
              'res4' in var.name or
              'res1' in var.name or
              'res2' in var.name) and \
                ('gamma' in var.name or
                  'beta' in var.name or
                  'kernel' in var.name or
                  'bias' in var.name):
                if var not in train_vars:
                    train_vars.append(var)
            elif 'enc_layer' in var.name and \
              ('kernel' in var.name or
                'bias' in var.name or
                'gamma' in var.name or
                'beta' in var.name):
                if var not in train_vars:
                    train_vars.append(var)

        print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
        print_var_list('train_vars', train_vars)
        print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')

        ######################################

        # Create a saver.
        saver = tf.train.Saver(var_list=tf.all_variables(), max_to_keep=1000)

        # start a session with memory growth
        config = tf.ConfigProto(log_device_placement=False)
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        print("session created")

        # list all checkpoints in ckpt_path
        checkpoint_path_read = os.path.join(os.getcwd() +
                                            test_params.check_point_dir)
        ckpts = tf.train.get_checkpoint_state(checkpoint_path_read)
        print(ckpts)
        # find the ckpt we need to load and load it
        for ckpt in ckpts.all_model_checkpoint_paths:
            # print(ckpt)
            ckpt_step = int(ckpt.split('-')[-1])
            if ckpt_step == test_params.step_to_load:
                saver.restore(sess, ckpt)
                print('model loaded from {}'.format(ckpt))

        # #######################################

        # # setup all attacks
        attack_switch = {
            'fgsm': False,
            'ifgsm': True,
            'deepfool': False,
            'mim': True,
            'spsa': False,
            'cwl2': False,
            'madry': True,
            'stm': False
        }

        ch_model_probs = CustomCallableModelWrapper(
            callable_fn=inference_test_output_probs,
            output_layer='probs',
            keep_prob=keep_prob,
            pre_define_vars=pre_define_vars,
            resnet_params=resnet_params,
            train_params=train_params)
        attack_tensor_testing_dict = {}

        # define each attack method's tensor
        mu_alpha = tf.placeholder(tf.float32, [1])

        # Iterative FGSM (BasicIterativeMethod/ProjectedGradientMethod with no random init)
        with tf.device('/gpu:0'):
            if attack_switch['ifgsm']:
                print('creating attack tensor of BasicIterativeMethod')
                ifgsm_obj = BasicIterativeMethod(model=ch_model_probs,
                                                 sess=sess)
                attack_tensor_testing_dict['ifgsm'] = ifgsm_obj.generate(
                    x=x_sb,
                    eps=mu_alpha,
                    eps_iter=mu_alpha / train_params.iter_step_testing,
                    nb_iter=train_params.iter_step_testing,
                    clip_min=-1.0,
                    clip_max=1.0)

        # MomentumIterativeMethod
        with tf.device('/gpu:0'):
            if attack_switch['mim']:
                print('creating attack tensor of MomentumIterativeMethod')
                mim_obj = MomentumIterativeMethod(model=ch_model_probs,
                                                  sess=sess)
                attack_tensor_testing_dict['mim'] = mim_obj.generate(
                    x=x_sb,
                    eps=mu_alpha,
                    eps_iter=mu_alpha / train_params.iter_step_testing,
                    nb_iter=train_params.iter_step_testing,
                    decay_factor=1.0,
                    clip_min=-1.0,
                    clip_max=1.0)

        # MadryEtAl (Projected Grdient with random init, same as rand+fgsm)
        with tf.device('/gpu:0'):
            if attack_switch['madry']:
                print('creating attack tensor of MadryEtAl')
                madry_obj = MadryEtAl(model=ch_model_probs, sess=sess)
                attack_tensor_testing_dict['madry'] = madry_obj.generate(
                    x=x_sb,
                    eps=mu_alpha,
                    eps_iter=mu_alpha / train_params.iter_step_testing,
                    nb_iter=train_params.iter_step_testing,
                    clip_min=-1.0,
                    clip_max=1.0)

        # #######################################

        sigmaEGM = all_params['sigmaEGM']

        __noiseE = all_params['__noiseE']

        grad_redis = all_params['grad_redis']

        _sensitivity_2 = all_params['_sensitivity_2']

        _sensitivityW = all_params['_sensitivityW']

        Delta_redis = all_params['Delta_redis']

        sigmaHGM = all_params['sigmaHGM']

        __noiseH = all_params['__noiseH']

        __noise_zero = all_params['__noise_zero']

        ####################################

        ####################################
        print('start testing')
        start_time = time.time()
        log_file_path = os.getcwd() + test_params.log_file_path
        log_file = open(log_file_path, 'a', encoding='utf-8')
        attacks_and_benign = test_params.attacks + ['benign']
        #===================adv samples=====================
        # for each eps setting
        for fgsm_eps in test_params.fgsm_eps_list:
            adv_acc_dict = {}
            robust_adv_acc_dict = {}
            robust_adv_utility_dict = {}
            log_str = ''
            eps_start_time = time.time()
            # cover all test data
            for i in range(test_params.test_epochs):
                test_batch = TIN_data.test.next_batch(
                    test_params.test_batch_size)
                adv_images_dict = {}
                # test for each attack
                for atk in attacks_and_benign:
                    start_time = time.time()
                    if atk not in adv_acc_dict:
                        adv_acc_dict[atk] = 0.0
                        robust_adv_acc_dict[atk] = 0.0
                        robust_adv_utility_dict[atk] = 0.0
                    if atk == 'benign':
                        testing_img = test_batch[0]
                    elif attack_switch[atk]:
                        # if only one gpu available, generate adv samples in-place
                        if atk not in adv_images_dict:
                            adv_images_dict[atk] = sess.run(
                                attack_tensor_testing_dict[atk],
                                feed_dict={
                                    x_sb: test_batch[0],
                                    mu_alpha: [fgsm_eps],
                                    keep_prob: 1.0
                                })
                        testing_img = adv_images_dict[atk]
                    else:
                        continue
                    print('adv gen time: {}s'.format(time.time() - start_time))
                    start_time = time.time()

                    ### PixelDP Robustness ###
                    predictions_form_argmax = np.zeros([
                        test_params.test_batch_size, train_params.num_classes
                    ])
                    softmax_predictions = sess.run(
                        y_softmax_test,
                        feed_dict={
                            x_sb: testing_img,
                            noise: (__noiseE + __noiseH) / 2,
                            keep_prob: 1.0
                        })
                    argmax_predictions = np.argmax(softmax_predictions, axis=1)
                    for n_draws in range(1, test_params.num_samples + 1):
                        if n_draws % 100 == 0:
                            print(
                                'current draws: {}, avg draw time: {}s'.format(
                                    n_draws,
                                    (time.time() - start_time) / n_draws))
                        _noiseE = np.random.normal(
                            0.0, sigmaEGM**2,
                            train_params.enc_h_size * train_params.enc_h_size *
                            train_params.enc_filters).astype(np.float32)
                        _noiseE = np.reshape(_noiseE, [
                            -1, train_params.enc_h_size,
                            train_params.enc_h_size, train_params.enc_filters
                        ])
                        _noise = np.random.normal(
                            0.0, sigmaHGM**2,
                            train_params.enc_h_size * train_params.enc_h_size *
                            train_params.enc_filters).astype(np.float32)
                        _noise = np.reshape(_noise, [
                            -1, train_params.enc_h_size,
                            train_params.enc_h_size, train_params.enc_filters
                        ]) * grad_redis
                        for j in range(test_params.test_batch_size):
                            pred = argmax_predictions[j]
                            predictions_form_argmax[j, pred] += 1
                        softmax_predictions = sess.run(
                            y_softmax_test,
                            feed_dict={
                                x_sb:
                                testing_img,
                                noise: (__noiseE + __noiseH) / 2 +
                                (_noiseE + _noise) / 4,
                                keep_prob:
                                1.0
                            })
                        argmax_predictions = np.argmax(softmax_predictions,
                                                       axis=1)
                    final_predictions = predictions_form_argmax
                    is_correct = []
                    is_robust = []
                    for j in range(test_params.test_batch_size):
                        is_correct.append(
                            np.argmax(test_batch[1][j]) == np.argmax(
                                final_predictions[j]))
                        robustness_from_argmax = robustnessGGaussian.robustness_size_argmax(
                            counts=predictions_form_argmax[j],
                            eta=0.05,
                            dp_attack_size=fgsm_eps,
                            dp_epsilon=train_params.dp_epsilon,
                            dp_delta=0.05,
                            dp_mechanism='gaussian') / dp_mult
                        is_robust.append(robustness_from_argmax >= fgsm_eps)
                    adv_acc_dict[atk] += np.sum(
                        is_correct) * 1.0 / test_params.test_batch_size
                    robust_adv_acc_dict[atk] += np.sum([
                        a and b for a, b in zip(is_robust, is_correct)
                    ]) * 1.0 / np.sum(is_robust)
                    robust_adv_utility_dict[atk] += np.sum(
                        is_robust) * 1.0 / test_params.test_batch_size

                    dt = time.time() - start_time
                    print('atk test time: {}s'.format(dt), flush=True)
            ##############################
            # average all acc for whole test data
            log_str += datetime.now().strftime("%Y-%m-%d_%H:%M:%S\n")
            log_str += 'model trained epoch: {}\n'.format(
                test_params.epoch_to_test)
            log_str += 'fgsm_eps: {}\n'.format(fgsm_eps)
            log_str += 'iter_step_testing: {}\n'.format(
                test_params.iter_step_testing)
            log_str += 'num_samples: {}\n'.format(test_params.num_samples)
            for atk in attacks_and_benign:
                adv_acc_dict[atk] = adv_acc_dict[atk] / test_params.test_epochs
                robust_adv_acc_dict[
                    atk] = robust_adv_acc_dict[atk] / test_params.test_epochs
                robust_adv_utility_dict[atk] = robust_adv_utility_dict[
                    atk] / test_params.test_epochs
                # added robust prediction
                log_str += " {}: {:.6f} {:.6f} {:.6f} {:.6f}\n".format(
                    atk, adv_acc_dict[atk], robust_adv_acc_dict[atk],
                    robust_adv_utility_dict[atk],
                    robust_adv_acc_dict[atk] * robust_adv_utility_dict[atk])
            dt = time.time() - eps_start_time
            print('total test time: {}s'.format(dt), flush=True)
            print(log_str, flush=True)
            print('*******************')

            log_file.write(log_str)
            log_file.write('*******************\n')
            log_file.flush()

            dt = time.time() - start_time
        log_file.close()
Ejemplo n.º 22
0
def PDP_resnet_with_pretrain_adv(TIN_data, resnet_params, train_params, params_to_save):
  # dict for encoding layer variables and output layer variables
  pre_define_vars = {}

  # list of variables to train
  train_vars = []
  pretrain_vars = []

  with tf.Graph().as_default(), tf.device('/cpu:0'):
    global_step = tf.Variable(0, trainable=False)
    
    # Parameters Declarification
    ######################################
    
    # encoding (pretrain) layer variables
    with tf.variable_scope('enc_layer', reuse=tf.AUTO_REUSE) as scope:
      kernel1 = tf.get_variable('kernel1', shape=[train_params.enc_kernel_size, train_params.enc_kernel_size, 
                                3, train_params.enc_filters], dtype=tf.float32, 
                                initializer=tf.contrib.layers.xavier_initializer_conv2d())
      biases1 = tf.get_variable('biases1', shape=[train_params.enc_filters], dtype=tf.float32, 
                                initializer=tf.constant_initializer(0.0))
    pre_define_vars['kernel1'] = kernel1
    pre_define_vars['biases1'] = biases1 
    train_vars.append(kernel1)
    train_vars.append(biases1)
    pretrain_vars.append(kernel1)
    pretrain_vars.append(biases1)

    shape     = kernel1.get_shape().as_list()
    w_t       = tf.reshape(kernel1, [-1, shape[-1]])
    w         = tf.transpose(w_t)
    sing_vals = tf.svd(w, compute_uv=False)
    sensitivity = tf.reduce_max(sing_vals)
    gamma = 2*train_params.Delta2/(train_params.effective_batch_size * sensitivity)
    print('gamma: {}'.format(gamma))
    
    # output layer variables
    with tf.variable_scope('fc2', reuse=tf.AUTO_REUSE) as scope:
      stdv = 1.0 / math.sqrt(train_params.hk)
      final_w = tf.get_variable('kernel', shape=[train_params.hk, train_params.num_classes], dtype=tf.float32, 
                                initializer=tf.random_uniform_initializer(-stdv, stdv))
      final_b = tf.get_variable('bias', shape=[train_params.num_classes], dtype=tf.float32, 
                                initializer=tf.constant_initializer(0.0))
    pre_define_vars['final_w'] = final_w
    pre_define_vars['final_b'] = final_b 
    train_vars.append(final_w)
    train_vars.append(final_b)
    ######################################
    
    # Build a Graph that computes the logits predictions from the inputs
    ######################################
    # input placeholders
    x_sb = tf.placeholder(tf.float32, [None,train_params.image_size,train_params.image_size,3], name='x_sb') # input is the bunch of n_batchs
    x_sb_adv = tf.placeholder(tf.float32, [None,train_params.image_size,train_params.image_size,3], name='x_sb_adv')
    x_test = tf.placeholder(tf.float32, [None,train_params.image_size,train_params.image_size,3], name='x_test')

    y_sb = tf.placeholder(tf.float32, [None, train_params.num_classes], name='y_sb') # input is the bunch of n_batchs (super batch)
    y_sb_adv = tf.placeholder(tf.float32, [None, train_params.num_classes], name='y_sb_adv')
    y_test = tf.placeholder(tf.float32, [None, train_params.num_classes], name='y_test')

    FM_h = tf.placeholder(tf.float32, [None, train_params.enc_h_size, train_params.enc_h_size, train_params.enc_filters], name='FM_h') # one time
    noise = tf.placeholder(tf.float32, [None, train_params.image_size, train_params.image_size, 3], name='noise') # one time
    adv_noise = tf.placeholder(tf.float32, [None, train_params.image_size, train_params.image_size, 3], name='adv_noise') # one time

    learning_rate = tf.placeholder(tf.float32, shape=(), name='learning_rate')
    keep_prob = tf.placeholder(tf.float32, shape=(), name='keep_prob')

    # list of grads for each GPU
    tower_pretrain_grads = []
    tower_train_grads = []
    all_train_loss = []

    # optimizers
    pretrain_opt = tf.train.AdamOptimizer(learning_rate)
    train_opt = tf.train.AdamOptimizer(learning_rate)

    # model and loss on one GPU
    with tf.device('/gpu:{}'.format(GPU_IDX[0])):
      # setup encoding layer training
      with tf.variable_scope('enc_layer', reuse=tf.AUTO_REUSE) as scope:
        Enc_Layer2 = EncLayer(inpt=x_sb, n_filter_in=None, n_filter_out=None, filter_size=None, 
                              W=kernel1, b=biases1, activation=tf.nn.relu)
        pretrain_adv = Enc_Layer2.get_train_ops2(xShape=tf.shape(x_sb_adv)[0], Delta=train_params.Delta2, 
                                                epsilon=train_params.epsilon2, batch_size=None, learning_rate=None,
                                                W=kernel1, b=biases1, perturbFMx=adv_noise, perturbFM_h=FM_h)
        Enc_Layer3 = EncLayer(inpt=x_sb, n_filter_in=None, n_filter_out=None, filter_size=None, 
                              W=kernel1, b=biases1, activation=tf.nn.relu)
        pretrain_benign = Enc_Layer3.get_train_ops2(xShape=tf.shape(x_sb)[0], Delta=train_params.Delta2, 
                                                    epsilon=train_params.epsilon2, batch_size=None, learning_rate=None,
                                                    W=kernel1, b=biases1, perturbFMx=noise, perturbFM_h=FM_h)
        pretrain_cost = tf.reduce_mean(pretrain_adv + pretrain_benign)
      print_var('pretrain_cost', pretrain_cost)
      
      # use standard loss first
      y_logits = inference(x_sb + noise, FM_h, keep_prob, pre_define_vars, resnet_params, train_params)
      y_softmax = tf.nn.softmax(y_logits)

      y_logits_adv = inference(x_sb_adv + adv_noise, FM_h, keep_prob, pre_define_vars, resnet_params, train_params)
      y_softmax_adv = tf.nn.softmax(y_logits_adv)

      # taylor exp
      # TODO: use noise here
      perturbW = train_params.perturbFM * final_w
      # train_loss = TaylorExp_no_noise(y_softmax, y_sb, y_softmax_adv, y_sb_adv, 
      #                        train_params.effective_batch_size, train_params.alpha)
      train_loss = TaylorExp(y_softmax, y_sb, y_softmax_adv, y_sb_adv, 
                             train_params.effective_batch_size, train_params.alpha, perturbW)
      print_var('train_loss', train_loss)
      all_train_loss.append(train_loss)
    
    # split testing in each gpu
    x_sb_tests = tf.split(x_sb, N_ALL_GPUS, axis=0)
    y_softmax_test_list = []
    for gpu in range(N_ALL_GPUS):
      with tf.device('/gpu:{}'.format(gpu)):
        # testing graph now in each gpu
        y_logits_test = test_inference(x_sb_tests[gpu] + noise, FM_h, keep_prob, pre_define_vars, resnet_params, train_params)
        y_softmax_test_list.append(tf.nn.softmax(y_logits_test))
    y_softmax_test_concat = tf.concat(y_softmax_test_list, axis=0)

    print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
    all_vars = tf.global_variables()
    print_var_list('all vars', all_vars)
    print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')

    # add selected vars into trainable variable list
    # ('res4' in var.name and ('gamma' in var.name or 'beta' in var.name)) or
    for var in tf.global_variables():
      if 'resnet_model' in var.name and \
        ('conv0' in var.name or 
        'fc' in var.name or 
        'res3' in var.name or 
        'res4' in var.name or 
        'res1' in var.name or 
        'res2' in var.name) and \
          ('gamma' in var.name or 
            'beta' in var.name or 
            'kernel' in var.name or
            'bias' in var.name):
        if var not in train_vars:
          train_vars.append(var)
      elif 'enc_layer' in var.name and \
        ('kernel' in var.name or
          'bias' in var.name):
        if var not in pretrain_vars:
          pretrain_vars.append(var)
        if var not in train_vars:
          train_vars.append(var)
      elif 'enc_layer' in var.name and \
        ('gamma' in var.name or 
          'beta' in var.name):
        if var not in pretrain_vars:
          pretrain_vars.append(var)
    
    print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
    print_var_list('train_vars', train_vars)
    print_var_list('pretrain_vars', pretrain_vars)
    print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')

    # op for compute grads on one gpu
    with tf.device('/gpu:{}'.format(GPU_IDX[0])):
      # get all update_ops (updates of moving averageand std) for batch normalizations
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      print_op_list('update ops', update_ops)
      enc_update_ops = [op for op in update_ops if 'enc_layer' in op.name]
      print_op_list('enc layer update ops', enc_update_ops)

      # when the gradients are computed, update the batch_norm
      with tf.control_dependencies(enc_update_ops):
        pretrain_grads = pretrain_opt.compute_gradients(pretrain_cost, var_list=pretrain_vars)
        print('*********** pretrain_grads ***********')
        for x in pretrain_grads:
          print(x)
        print('**********************')
      with tf.control_dependencies(update_ops):
        train_grads = train_opt.compute_gradients(train_loss, var_list=train_vars)
        print('*********** train_grads ***********')
        for x in train_grads:
          print(x)
        print('**********************')
      avg_pretrain_grads = pretrain_grads
      avg_train_grads = train_grads
      
      # get averaged loss tensor for pretrain and train ops
      total_loss = tf.reduce_sum(tf.stack(all_train_loss))
      total_pretrain_loss = tf.reduce_mean(pretrain_cost)

    # prepare to save gradients for large batch
    pretrain_grads_save = [g for g,v in pretrain_grads]
    # print('*********** pretrain_grads_save ***********' + str(pretrain_grads_save) + '**********************')
    train_grads_save = [g for g,v in train_grads]
    # print('*********** train_grads_save ***********' + str(train_grads_save) + '**********************')
    pretrain_grads_shapes = [g.shape.as_list() for g in pretrain_grads_save]
    train_grads_shapes = [g.shape.as_list() for g in train_grads_save]

    # placeholders for importing saved gradients
    pretrain_grads_placeholders = []
    for g,v in pretrain_grads:
      pretrain_grads_placeholders.append(tf.placeholder(tf.float32, v.shape))

    train_grads_placeholders = []
    for g,v in train_grads:
      train_grads_placeholders.append(tf.placeholder(tf.float32, v.shape))

    # construct the (grad, var) list
    assemble_pretrain_grads = []
    for i in range(len(pretrain_vars)):
      assemble_pretrain_grads.append((pretrain_grads_placeholders[i], pretrain_vars[i]))
    
    assemble_train_grads = []
    for i in range(len(train_grads)):
      assemble_train_grads.append((train_grads_placeholders[i], train_vars[i]))
    
    # apply the saved gradients
    pretrain_op = pretrain_opt.apply_gradients(assemble_pretrain_grads, global_step=global_step)
    train_op = train_opt.apply_gradients(assemble_train_grads, global_step=global_step)
    ######################################

    # Create a saver.
    saver = tf.train.Saver(var_list=tf.all_variables(), max_to_keep=1000)
    
    # start a session with memory growth
    config = tf.ConfigProto(log_device_placement=False)
    config.gpu_options.allow_growth=True
    sess = tf.Session(config=config)
    print("session created")

    # get some initial values
    sess.run(kernel1.initializer)
    _gamma = sess.run(gamma)
    _gamma_x = train_params.Delta2 / train_params.effective_batch_size
    epsilon2_update = train_params.epsilon2/(1.0 + 1.0/_gamma + 1/_gamma_x)
    delta_r = train_params.fgsm_eps * (train_params.image_size ** 2)
    _sensitivityW = sess.run(sensitivity)
    delta_h = _sensitivityW*(train_params.enc_h_size ** 2)
    #dp_mult = (train_params.Delta2 / (train_params.effective_batch_size * epsilon2_update)) / (delta_r / train_params.dp_epsilon) + \
    #  (2 * train_params.Delta2 / (train_params.effective_batch_size * epsilon2_update))/(delta_h / train_params.dp_epsilon)
    dp_mult = (train_params.Delta2*train_params.dp_epsilon) / (train_params.effective_batch_size*epsilon2_update * (delta_h / 2 + delta_r))
    # save some valus for testing
    params_to_save['epsilon2_update'] = epsilon2_update
    params_to_save['dp_mult'] = dp_mult

    #######################################
    # ADV attacks
    #######################################

    # split input for attacks
    x_attacks = tf.split(x_sb, 3, axis=0) # split it into each batch
    
    # currently only ifgsm, mim, and madry attacks are available
    attack_switch = {'fgsm':False, 'ifgsm':True, 'deepfool':False, 'mim':True, 'spsa':False, 'cwl2':False, 'madry':True, 'stm':False}
    
    # wrap the inference
    ch_model_probs = CustomCallableModelWrapper(callable_fn=inference_test_output_probs, output_layer='probs', 
                                                adv_noise=adv_noise, keep_prob=keep_prob, pre_define_vars=pre_define_vars, 
                                                resnet_params=resnet_params, train_params=train_params)
    
    # to save the reference to the attack tensors
    attack_tensor_training_dict = {}
    attack_tensor_testing_dict = {}

    # placeholder for eps parameter
    mu_alpha = tf.placeholder(tf.float32, [1])
      
    # Iterative FGSM (BasicIterativeMethod/ProjectedGradientMethod with no random init)
    # place on specific GPU
    with tf.device('/gpu:{}'.format(AUX_GPU_IDX[0])):
      print('ifgsm GPU placement')
      print('/gpu:{}'.format(AUX_GPU_IDX[0]))
      if attack_switch['ifgsm']:
          print('creating attack tensor of BasicIterativeMethod')
          ifgsm_obj = BasicIterativeMethod(model=ch_model_probs, sess=sess)
          attack_tensor_training_dict['ifgsm'] = ifgsm_obj.generate(x=x_attacks[0], eps=mu_alpha, eps_iter=mu_alpha/train_params.iter_step_training, nb_iter=train_params.iter_step_training, clip_min=-1.0, clip_max=1.0)
          attack_tensor_testing_dict['ifgsm'] = ifgsm_obj.generate(x=x_sb, eps=mu_alpha, eps_iter=mu_alpha/train_params.iter_step_testing, nb_iter=train_params.iter_step_testing, clip_min=-1.0, clip_max=1.0)

    # MomentumIterativeMethod
    # place on specific GPU
    with tf.device('/gpu:{}'.format(AUX_GPU_IDX[1])):
      print('mim GPU placement')
      print('/gpu:{}'.format(AUX_GPU_IDX[1]))
      if attack_switch['mim']:
          print('creating attack tensor of MomentumIterativeMethod')
          mim_obj = MomentumIterativeMethod(model=ch_model_probs, sess=sess)
          attack_tensor_training_dict['mim'] = mim_obj.generate(x=x_attacks[1], eps=mu_alpha, eps_iter=mu_alpha/train_params.iter_step_training, nb_iter=train_params.iter_step_training, decay_factor=1.0, clip_min=-1.0, clip_max=1.0)
          attack_tensor_testing_dict['mim'] = mim_obj.generate(x=x_sb, eps=mu_alpha, eps_iter=mu_alpha/train_params.iter_step_testing, nb_iter=train_params.iter_step_testing, decay_factor=1.0, clip_min=-1.0, clip_max=1.0)
      
    # MadryEtAl (Projected Grdient with random init, same as rand+fgsm)
    # place on specific GPU
    with tf.device('/gpu:{}'.format(AUX_GPU_IDX[2])):
      print('madry GPU placement')
      print('/gpu:{}'.format(AUX_GPU_IDX[2]))
      if attack_switch['madry']:
          print('creating attack tensor of MadryEtAl')
          madry_obj = MadryEtAl(model=ch_model_probs, sess=sess)
          attack_tensor_training_dict['madry'] = madry_obj.generate(x=x_attacks[2], eps=mu_alpha, eps_iter=mu_alpha/train_params.iter_step_training, nb_iter=train_params.iter_step_training, clip_min=-1.0, clip_max=1.0)
          attack_tensor_testing_dict['madry'] = madry_obj.generate(x=x_sb, eps=mu_alpha, eps_iter=mu_alpha/train_params.iter_step_testing, nb_iter=train_params.iter_step_testing, clip_min=-1.0, clip_max=1.0)

    # combine the tensors
    adv_tensors_concat = tf.concat([attack_tensor_training_dict[x] for x in train_params.attacks], axis=0)
    #######################################

    # init op
    print('initialize_all_variables')
    init = tf.initialize_all_variables()
    sess.run(init)

    # load pretrained variables of RESNET
    if train_params.load_weights:
      # first we need to load variable name convert table
      tgt_var_name_dict = {}
      with open(train_params.weight_table_path, 'r', encoding='utf-8') as inf:
        lines = inf.readlines()
        for line in lines:
          var_names = line.strip().split(' ')
          if var_names[1] == 'NONE':
            continue
          else:
            tgt_var_name_dict[var_names[0]] = var_names[1]

      # load variables dict from checkpoint
      pretrained_var_dict = load_pretrained_vars()

      # load pre-trained vars using name convert table
      for var in tf.global_variables():
        if var.name in tgt_var_name_dict:
          # print('var \"{}\" found'.format(var.name))
          try:
            var.load(pretrained_var_dict[tgt_var_name_dict[var.name]], session=sess)
            print('{} loaded'.format(var.name))
          except:
            print('var {} not loaded since shape changed'.format(var.name))
        else:
          if 'Adam' not in var.name:
            print('var \"{}\" NOT FOUND'.format(var.name))
    else:
      print('Training model from scratch')


    #####################################
    # init noise and save for testing
    perturbH_test = np.random.laplace(0.0, 0, train_params.enc_h_size*train_params.enc_h_size*train_params.enc_filters)
    perturbH_test = np.reshape(perturbH_test, [-1, train_params.enc_h_size, train_params.enc_h_size, train_params.enc_filters])
    params_to_save['perturbH_test'] = perturbH_test
    
    perturbFM_h = np.random.laplace(0.0, 2*train_params.Delta2/(epsilon2_update*train_params.effective_batch_size), 
                                        train_params.enc_h_size*train_params.enc_h_size*train_params.enc_filters)
    perturbFM_h = np.reshape(perturbFM_h, [-1, train_params.enc_h_size, train_params.enc_h_size, train_params.enc_filters])
    params_to_save['perturbFM_h'] = perturbFM_h

    Noise = generateIdLMNoise(train_params.image_size, train_params.Delta2, epsilon2_update, train_params.effective_batch_size)
    params_to_save['Noise'] = Noise

    Noise_test = generateIdLMNoise(train_params.image_size, 0, epsilon2_update, train_params.effective_batch_size)
    params_to_save['Noise_test'] = Noise_test

    # save params for testing
    with open(os.getcwd() + train_params.params_save_path, 'wb') as outf:
      pickle.dump(params_to_save, outf)
      print('params saved')

    ####################################
    print('start pretrain')
    start_time = time.time()
    lr_schedule_list = sorted(train_params.lr_schedule_pretrain.keys())
    attacks_and_benign = train_params.attacks + ['benign']
    # build zeros numpy arrays for accumulate grads
    accumu_pretrain_grads = [np.zeros(g_shape, dtype=np.float32) for g_shape in pretrain_grads_shapes]
    total_pretrain_loss_value = 0.0
    step = 0
    # pretrain loop
    while True:
      # if enough steps, break
      if step > train_params.pretrain_steps:
        break
      # add steps here so not forgot
      else:
        step += 1

      # manual schedule learning rate
      current_epoch = step // (train_params.epoch_steps)
      current_lr = train_params.lr_schedule_pretrain[get_lr(current_epoch, lr_schedule_list)]

      # benign and adv batch
      super_batch = TIN_data.train.next_super_batch(N_GPUS, ensemble=False, random=True)
      adv_super_batch = TIN_data.train.next_super_batch(N_GPUS, ensemble=False, random=True)

      # get pretrain grads
      pretrain_grads_save_np, _pretain_loss_value = sess.run([pretrain_grads_save, total_pretrain_loss], feed_dict={x_sb: super_batch[0], 
                                                                                                                    x_sb_adv: adv_super_batch[0], 
                                                                                                                    learning_rate: current_lr,
                                                                                                                    adv_noise: Noise_test, 
                                                                                                                    noise: Noise, 
                                                                                                                    FM_h: perturbFM_h})
      # accumulate grads
      for i in range(len(accumu_pretrain_grads)):
        accumu_pretrain_grads[i] = accumu_pretrain_grads[i] + pretrain_grads_save_np[i]
      
      # accumulate loss values
      total_pretrain_loss_value = total_pretrain_loss_value + _pretain_loss_value

      # use accumulated gradients to update variables
      if step % train_params.batch_multi == 0 and step > 0:
        # print('effective batch reached at step: {}, epoch: {}'.format(step, step / train_params.epoch_steps))
        # compute the average grads and build the feed dict
        pretrain_feed_dict = {}
        for i in range(len(accumu_pretrain_grads)):
          pretrain_feed_dict[pretrain_grads_placeholders[i]] = accumu_pretrain_grads[i] / train_params.batch_multi
        pretrain_feed_dict[learning_rate] = current_lr

        # run train ops by feeding the gradients
        sess.run(pretrain_op, feed_dict=pretrain_feed_dict)

        # get loss value
        avg_pretrain_loss_value = total_pretrain_loss_value / train_params.batch_multi

        # reset the average grads
        accumu_pretrain_grads = [np.zeros(g_shape, dtype=np.float32) for g_shape in pretrain_grads_shapes]
        total_pretrain_loss_value = 0.0

      # print loss
      if step % (1*train_params.epoch_steps) == 0 and step >= (1*train_params.epoch_steps):
        print('pretrain report at step: {}, epoch: {}'.format(step, step / train_params.epoch_steps))
        dt = time.time() - start_time
        avg_epoch_time = dt / (step / train_params.epoch_steps)
        print('epoch: {:.4f}, avg epoch time: {:.4f}, current_lr: {}'.format(step/train_params.epoch_steps, avg_epoch_time, current_lr), flush=True)
        print('pretrain_loss: {:.6f}'.format(avg_pretrain_loss_value))

    ####################################
    print('start train')
    start_time = time.time()
    lr_schedule_list = sorted(train_params.lr_schedule.keys())
    # train whole model
    # build zeros numpy arrays for accumulate grads
    accumu_pretrain_grads = [np.zeros(g_shape, dtype=np.float32) for g_shape in pretrain_grads_shapes]
    accumu_train_grads = [np.zeros(g_shape, dtype=np.float32) for g_shape in train_grads_shapes]
    total_pretrain_loss_value = 0.0
    total_train_loss_value = 0.0
    step = 0
    # train loop
    while True:
      # if enough steps, break
      if step > train_params.train_steps:
        break
      # add steps here so not forgot
      else:
        step += 1

      # compute the grads every step
      # random eps value for trianing
      d_eps = random.random()*train_params.random_eps_range

      # manual schedule learning rate
      current_epoch = step // (train_params.epoch_steps)
      current_lr = train_params.lr_schedule[get_lr(current_epoch, lr_schedule_list)]
      
      # benign and adv batch
      super_batch = TIN_data.train.next_super_batch(N_GPUS, ensemble=False, random=True)
      adv_super_batch = TIN_data.train.next_super_batch(N_GPUS, ensemble=False, random=True)

      # create adv samples
      super_batch_adv_images = sess.run(adv_tensors_concat, 
                                        feed_dict={x_sb:adv_super_batch[0], keep_prob:1.0,
                                                    adv_noise: Noise, mu_alpha:[d_eps]})   

      # get pretrain and train grads
      pretrain_grads_save_np, _pretain_loss_value = sess.run([pretrain_grads_save, total_pretrain_loss], feed_dict={x_sb: super_batch[0], 
                                                                                                                    x_sb_adv: super_batch_adv_images, 
                                                                                                                    learning_rate: current_lr,
                                                                                                                    adv_noise: Noise_test, 
                                                                                                                    noise: Noise, 
                                                                                                                    FM_h: perturbFM_h})
      train_grads_save_np, _train_loss_value = sess.run([train_grads_save, total_loss], feed_dict = {x_sb: super_batch[0], y_sb: super_batch[1],
                                                                  x_sb_adv: super_batch_adv_images, y_sb_adv: adv_super_batch[1],
                                                                  keep_prob: train_params.keep_prob, learning_rate: current_lr,
                                                                  noise: Noise, adv_noise: Noise_test, FM_h: perturbFM_h})

      # accumulate grads
      for i in range(len(accumu_pretrain_grads)):
        accumu_pretrain_grads[i] = accumu_pretrain_grads[i] + pretrain_grads_save_np[i]

      for i in range(len(accumu_train_grads)):
        accumu_train_grads[i] = accumu_train_grads[i] + train_grads_save_np[i]

      # accumulate loss values
      total_pretrain_loss_value = total_pretrain_loss_value + _pretain_loss_value
      total_train_loss_value = total_train_loss_value + _train_loss_value
      
      # use accumulated gradients to update variables
      if step % train_params.batch_multi == 0 and step > 0:
        # compute the average grads and build the feed dict
        pretrain_feed_dict = {}
        for i in range(len(accumu_pretrain_grads)):
          pretrain_feed_dict[pretrain_grads_placeholders[i]] = accumu_pretrain_grads[i] / train_params.batch_multi
        pretrain_feed_dict[learning_rate] = current_lr
        # pretrain_feed_dict[keep_prob] = 0.5

        train_feed_dict = {}
        for i in range(len(accumu_train_grads)):
          train_feed_dict[train_grads_placeholders[i]] = accumu_train_grads[i] / train_params.batch_multi
        train_feed_dict[learning_rate] = current_lr
        # train_feed_dict[keep_prob] = 0.5

        # run train ops
        sess.run(pretrain_op, feed_dict=pretrain_feed_dict)
        sess.run(train_op, feed_dict=train_feed_dict)

        # get loss value
        avg_pretrain_loss_value = total_pretrain_loss_value / train_params.batch_multi
        avg_train_loss_value = total_train_loss_value / train_params.batch_multi

        # reset the average grads
        accumu_pretrain_grads = [np.zeros(g_shape, dtype=np.float32) for g_shape in pretrain_grads_shapes]
        accumu_train_grads = [np.zeros(g_shape, dtype=np.float32) for g_shape in train_grads_shapes]
        total_pretrain_loss_value = 0.0
        total_train_loss_value = 0.0

      # print status every epoch
      if step % int(train_params.epoch_steps) == 0:
        dt = time.time() - start_time
        avg_epoch_time = dt / (step / train_params.epoch_steps)
        print('epoch: {:.4f}, avg epoch time: {:.4f}s, current_lr: {}'.format(step/train_params.epoch_steps, avg_epoch_time, current_lr), flush=True)

      # save model
      if step % int(train_params.epoch_steps) == 0 and int(step / train_params.epoch_steps) in train_params.epochs_to_save:
        print('saving model at epoch {}'.format(step / train_params.epoch_steps))
        checkpoint_path = os.path.join(os.getcwd() + train_params.check_point_dir, 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)
        
      # testing during training
      if step % int(train_params.epoch_steps) == 0 and int(step / train_params.epoch_steps) in train_params.epochs_to_test:
        test_start = time.time()
        print('train test reported at step: {}, epoch: {}'.format(step, step / train_params.epoch_steps))
        dt = time.time() - start_time
        avg_epoch_time = dt / (step / train_params.epoch_steps)
        print('epoch: {:.4f}, avg epoch time: {:.4f}s, current_lr: {}'.format(step/train_params.epoch_steps, avg_epoch_time, current_lr), flush=True)
        print('pretrain_loss: {:.6f}, train_loss: {:.6f}'.format(avg_pretrain_loss_value, avg_train_loss_value))
        # print('output layer: \n\t{}'.format(output_layer_value))

        #===================adv samples=====================
        adv_acc_dict = {}
        robust_adv_acc_dict = {}
        robust_adv_utility_dict = {}
        log_str = ''
        # cover all test data
        for i in range(train_params.test_epochs):
          test_batch = TIN_data.test.next_batch(train_params.test_batch_size)
          # if more GPUs available, generate testing adv samples at once
          if N_AUX_GPUS > 1:
            adv_images_dict = sess.run(attack_tensor_testing_dict, feed_dict ={x_sb: test_batch[0], 
                                                                               adv_noise: Noise_test, 
                                                                               mu_alpha: [train_params.fgsm_eps],
                                                                               keep_prob: 1.0})
          else:
            adv_images_dict = {}
          # test for each attack
          for atk in attacks_and_benign:
            if atk not in adv_acc_dict:
              adv_acc_dict[atk] = 0.0
              robust_adv_acc_dict[atk] = 0.0
              robust_adv_utility_dict[atk] = 0.0
            if atk == 'benign':
              testing_img = test_batch[0]
            elif attack_switch[atk]:
              # if only one gpu available, generate adv samples in-place
              if atk not in adv_images_dict:
                adv_images_dict[atk] = sess.run(attack_tensor_testing_dict[atk], feed_dict ={x_sb:test_batch[0], 
                                                                                             adv_noise: Noise_test, 
                                                                                             mu_alpha:[train_params.fgsm_eps],
                                                                                             keep_prob: 1.0})
              testing_img = adv_images_dict[atk]
            else:
              continue
            ### PixelDP Robustness ###
            predictions_form_argmax = np.zeros([train_params.test_batch_size, train_params.num_classes])
            softmax_predictions = sess.run(y_softmax_test_concat, feed_dict={x_sb: testing_img, noise: Noise, FM_h: perturbFM_h, keep_prob: 1.0})
            argmax_predictions = np.argmax(softmax_predictions, axis=1)
            for n_draws in range(0, train_params.num_samples):
              _BenignLNoise = generateIdLMNoise(train_params.image_size, train_params.Delta2, epsilon2_update, train_params.effective_batch_size)
              _perturbFM_h = np.random.laplace(0.0, 2*train_params.Delta2/(epsilon2_update*train_params.effective_batch_size), 
                                              train_params.enc_h_size*train_params.enc_h_size*train_params.enc_filters)
              _perturbFM_h = np.reshape(_perturbFM_h, [-1, train_params.enc_h_size, train_params.enc_h_size, train_params.enc_filters])
              for j in range(train_params.test_batch_size):
                pred = argmax_predictions[j]
                predictions_form_argmax[j, pred] += 1
              softmax_predictions = sess.run(y_softmax_test_concat, feed_dict={x_sb: testing_img, noise: (_BenignLNoise/10 + Noise), FM_h: perturbFM_h, keep_prob: 1.0}) * \
                sess.run(y_softmax_test_concat, feed_dict={x_sb: testing_img, noise: Noise, FM_h: (_perturbFM_h/10 + perturbFM_h), keep_prob: 1.0})
              argmax_predictions = np.argmax(softmax_predictions, axis=1)
            final_predictions = predictions_form_argmax
            is_correct = []
            is_robust = []
            for j in range(train_params.test_batch_size):
              is_correct.append(np.argmax(test_batch[1][j]) == np.argmax(final_predictions[j]))
              robustness_from_argmax = robustness.robustness_size_argmax(counts=predictions_form_argmax[j],
                                                                        eta=0.05, dp_attack_size=train_params.fgsm_eps, 
                                                                        dp_epsilon=train_params.dp_epsilon, dp_delta=0.05, 
                                                                        dp_mechanism='laplace') / dp_mult
              is_robust.append(robustness_from_argmax >= train_params.fgsm_eps)
            adv_acc_dict[atk] += np.sum(is_correct)*1.0/train_params.test_batch_size
            robust_adv_acc_dict[atk] += np.sum([a and b for a,b in zip(is_robust, is_correct)])*1.0/np.sum(is_robust)
            robust_adv_utility_dict[atk] += np.sum(is_robust)*1.0/train_params.test_batch_size
        ##############################
        # average all acc for whole test data
        for atk in attacks_and_benign:
          adv_acc_dict[atk] = adv_acc_dict[atk] / train_params.test_epochs
          robust_adv_acc_dict[atk] = robust_adv_acc_dict[atk] / train_params.test_epochs
          robust_adv_utility_dict[atk] = robust_adv_utility_dict[atk] / train_params.test_epochs
          # added robust prediction
          log_str += " {}: {:.6f} {:.6f} {:.6f} {:.6f}\n".format(atk, adv_acc_dict[atk], robust_adv_acc_dict[atk], robust_adv_utility_dict[atk], robust_adv_acc_dict[atk] * robust_adv_utility_dict[atk])
        dt = time.time() - test_start
        print('testing time: {}'.format(dt))
        print(log_str, flush=True)
        print('*******************')
Ejemplo n.º 23
0
def mnist_tutorial(train_start=0,
                   train_end=60000,
                   test_start=0,
                   test_end=10000,
                   nb_epochs=NB_EPOCHS,
                   batch_size=BATCH_SIZE,
                   learning_rate=LEARNING_RATE,
                   train_dir=TRAIN_DIR,
                   filename=FILENAME,
                   load_model=LOAD_MODEL,
                   testing=True,
                   label_smoothing=0.1):
    """
  MNIST CleverHans tutorial
  :param train_start: index of first training set example
  :param train_end: index of last training set example
  :param test_start: index of first test set example
  :param test_end: index of last test set example
  :param nb_epochs: number of epochs to train model
  :param batch_size: size of training batches
  :param learning_rate: learning rate for training
  :param train_dir: Directory storing the saved model
  :param filename: Filename to save model under
  :param load_model: True for load, False for not load
  :param testing: if true, test error is calculated
  :param label_smoothing: float, amount of label smoothing for cross entropy
  :return: an AccuracyReport object
  """
    tf.keras.backend.set_learning_phase(0)

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    if keras.backend.image_data_format() != 'channels_last':
        raise NotImplementedError(
            "this tutorial requires keras to be configured to channels_last format"
        )

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    keras.backend.set_session(sess)

    # Get MNIST test data
    mnist = MNIST(train_start=train_start,
                  train_end=train_end,
                  test_start=test_start,
                  test_end=test_end)
    x_train, y_train = mnist.get_set('train')
    x_test, y_test = mnist.get_set('test')

    # Obtain Image Parameters
    img_rows, img_cols, nchannels = x_train.shape[1:4]
    nb_classes = y_train.shape[1]

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels))
    y = tf.placeholder(tf.float32, shape=(None, nb_classes))

    # Define TF model graph
    model = cnn_model(img_rows=img_rows,
                      img_cols=img_cols,
                      channels=nchannels,
                      nb_filters=64,
                      nb_classes=nb_classes)
    preds = model(x)
    print("Defined TensorFlow model graph.")

    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        eval_params = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
        report.clean_train_clean_eval = acc
        print('Test accuracy on legitimate examples: %0.4f' % acc)

    # Train an MNIST model
    train_params = {
        'nb_epochs': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': learning_rate,
        'train_dir': train_dir,
        'filename': filename
    }

    rng = np.random.RandomState([2017, 8, 30])
    if not os.path.exists(train_dir):
        os.mkdir(train_dir)

    ckpt = tf.train.get_checkpoint_state(train_dir)
    print(train_dir, ckpt)
    ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
    wrap = KerasModelWrapper(model)

    if load_model and ckpt_path:
        saver = tf.train.Saver()
        print(ckpt_path)
        saver.restore(sess, ckpt_path)
        print("Model loaded from: {}".format(ckpt_path))
        evaluate()
    else:
        print("Model was not loaded, training from scratch.")
        loss = CrossEntropy(wrap, smoothing=label_smoothing)
        train(sess,
              loss,
              x_train,
              y_train,
              evaluate=evaluate,
              args=train_params,
              rng=rng)

    # Calculate training error
    if testing:
        eval_params = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds, x_train, y_train, args=eval_params)
        report.train_clean_train_clean_eval = acc

    bim = BasicIterativeMethod(wrap, sess=sess)
    bim_params = {
        'eps': 0.3,
        'clip_min': 0.,
        'clip_max': 1.,
        'nb_iter': 50,
        'eps_iter': .01
    }
    adv_x = bim.generate(x, **bim_params)

    x_adv_test = sess.run(adv_x, feed_dict={x: x_test})
    x_adv_train = sess.run(adv_x, feed_dict={x: x_train})

    def evaluate_adv():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        eval_params = {'batch_size': batch_size}
        acc = model_eval(sess,
                         x,
                         y,
                         preds,
                         x_adv_test,
                         y_test,
                         args=eval_params)
        report.clean_train_clean_eval = acc
        print('Test accuracy on legitimate examples: %0.4f' % acc)

    evaluate_adv()

    save_list = [x_adv_train, x_adv_test]
    print(x_adv_train.shape)
    print(x_adv_test.shape)
    pickle.dump(save_list, open("./bim.pkl", 'wb'))
def train(cifar10_data, epochs, L, learning_rate, scale3, Delta2, epsilon2,
          eps2_ratio, alpha, perturbFM, fgsm_eps, total_eps, logfile):
    logfile.write("fgsm_eps \t %g, LR \t %g, alpha \t %d , epsilon \t %d \n" %
                  (fgsm_eps, learning_rate, alpha, total_eps))
    """Train CIFAR-10 for a number of steps."""
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)

        eps_benign = 1 / (1 + eps2_ratio) * (epsilon2)
        eps_adv = eps2_ratio / (1 + eps2_ratio) * (epsilon2)

        # Parameters Declarification
        #with tf.variable_scope('conv1') as scope:
        kernel1 = _variable_with_weight_decay(
            'kernel1',
            shape=[4, 4, 3, 128],
            stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
            wd=0.0,
            collect=[AECODER_VARIABLES])
        biases1 = _bias_on_cpu('biases1', [128],
                               tf.constant_initializer(0.0),
                               collect=[AECODER_VARIABLES])

        shape = kernel1.get_shape().as_list()
        w_t = tf.reshape(kernel1, [-1, shape[-1]])
        w = tf.transpose(w_t)
        sing_vals = tf.svd(w, compute_uv=False)
        sensitivity = tf.reduce_max(sing_vals)
        gamma = 2 * Delta2 / (L * sensitivity
                              )  #2*3*(14*14 + 2)*16/(L*sensitivity)

        #with tf.variable_scope('conv2') as scope:
        kernel2 = _variable_with_weight_decay(
            'kernel2',
            shape=[5, 5, 128, 128],
            stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
            wd=0.0,
            collect=[CONV_VARIABLES])
        biases2 = _bias_on_cpu('biases2', [128],
                               tf.constant_initializer(0.1),
                               collect=[CONV_VARIABLES])
        #with tf.variable_scope('conv3') as scope:
        kernel3 = _variable_with_weight_decay(
            'kernel3',
            shape=[5, 5, 256, 256],
            stddev=np.sqrt(2.0 / (5 * 5 * 256)) / math.ceil(5 / 2),
            wd=0.0,
            collect=[CONV_VARIABLES])
        biases3 = _bias_on_cpu('biases3', [256],
                               tf.constant_initializer(0.1),
                               collect=[CONV_VARIABLES])
        #with tf.variable_scope('local4') as scope:
        kernel4 = _variable_with_weight_decay(
            'kernel4',
            shape=[int(image_size / 4)**2 * 256, hk],
            stddev=0.04,
            wd=0.004,
            collect=[CONV_VARIABLES])
        biases4 = _bias_on_cpu('biases4', [hk],
                               tf.constant_initializer(0.1),
                               collect=[CONV_VARIABLES])
        #with tf.variable_scope('local5') as scope:
        kernel5 = _variable_with_weight_decay(
            'kernel5', [hk, 10],
            stddev=np.sqrt(2.0 /
                           (int(image_size / 4)**2 * 256)) / math.ceil(5 / 2),
            wd=0.0,
            collect=[CONV_VARIABLES])
        biases5 = _bias_on_cpu('biases5', [10],
                               tf.constant_initializer(0.1),
                               collect=[CONV_VARIABLES])

        #scale2 = tf.Variable(tf.ones([hk]))
        #beta2 = tf.Variable(tf.zeros([hk]))

        params = [
            kernel1, biases1, kernel2, biases2, kernel3, biases3, kernel4,
            biases4, kernel5, biases5
        ]
        ########

        # Build a Graph that computes the logits predictions from the
        # inference model.
        FM_h = tf.placeholder(tf.float32, [None, 14, 14, 128])
        noise = tf.placeholder(tf.float32, [None, image_size, image_size, 3])
        adv_noise = tf.placeholder(tf.float32,
                                   [None, image_size, image_size, 3])

        x = tf.placeholder(tf.float32, [None, image_size, image_size, 3])
        adv_x = tf.placeholder(tf.float32, [None, image_size, image_size, 3])

        # Auto-Encoder #
        Enc_Layer2 = EncLayer(inpt=adv_x,
                              n_filter_in=3,
                              n_filter_out=128,
                              filter_size=3,
                              W=kernel1,
                              b=biases1,
                              activation=tf.nn.relu)
        pretrain_adv = Enc_Layer2.get_train_ops2(xShape=tf.shape(adv_x)[0],
                                                 Delta=Delta2,
                                                 epsilon=epsilon2,
                                                 batch_size=L,
                                                 learning_rate=learning_rate,
                                                 W=kernel1,
                                                 b=biases1,
                                                 perturbFMx=adv_noise,
                                                 perturbFM_h=FM_h)
        Enc_Layer3 = EncLayer(inpt=x,
                              n_filter_in=3,
                              n_filter_out=128,
                              filter_size=3,
                              W=kernel1,
                              b=biases1,
                              activation=tf.nn.relu)
        pretrain_benign = Enc_Layer3.get_train_ops2(
            xShape=tf.shape(x)[0],
            Delta=Delta2,
            epsilon=epsilon2,
            batch_size=L,
            learning_rate=learning_rate,
            W=kernel1,
            b=biases1,
            perturbFMx=noise,
            perturbFM_h=FM_h)
        cost = tf.reduce_sum((Enc_Layer2.cost + Enc_Layer3.cost) / 2.0)
        ###

        x_image = x + noise
        y_conv = inference(x_image, FM_h, params)
        softmax_y_conv = tf.nn.softmax(y_conv)
        y_ = tf.placeholder(tf.float32, [None, 10])

        adv_x += adv_noise
        y_adv_conv = inference(adv_x, FM_h, params)
        adv_y_ = tf.placeholder(tf.float32, [None, 10])

        # Calculate loss. Apply Taylor Expansion for the output layer
        perturbW = perturbFM * params[8]
        loss = cifar10.TaylorExp(y_conv, y_, y_adv_conv, adv_y_, L, alpha,
                                 perturbW)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        #pretrain_step = tf.train.AdamOptimizer(1e-4).minimize(pretrain_adv, global_step=global_step, var_list=[kernel1, biases1]);
        pretrain_var_list = tf.get_collection(AECODER_VARIABLES)
        train_var_list = tf.get_collection(CONV_VARIABLES)
        #print(pretrain_var_list)
        #print(train_var_list)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            pretrain_step = tf.train.AdamOptimizer(learning_rate).minimize(
                pretrain_adv + pretrain_benign,
                global_step=global_step,
                var_list=pretrain_var_list)
            train_op = cifar10.train(loss,
                                     global_step,
                                     learning_rate,
                                     _var_list=train_var_list)
        sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))

        sess.run(kernel1.initializer)
        dp_epsilon = 1.0
        _gamma = sess.run(gamma)
        _gamma_x = Delta2 / L
        epsilon2_update = epsilon2 / (1.0 + 1.0 / _gamma + 1 / _gamma_x)
        print(epsilon2_update / _gamma + epsilon2_update / _gamma_x)
        print(epsilon2_update)
        delta_r = fgsm_eps * (image_size**2)
        _sensitivityW = sess.run(sensitivity)
        delta_h = _sensitivityW * (14**2)
        #delta_h = 1.0 * delta_r; #sensitivity*(14**2) = sensitivity*(\beta**2) can also be used
        #dp_mult = (Delta2/(L*epsilon2))/(delta_r / dp_epsilon) + (2*Delta2/(L*epsilon2))/(delta_h / dp_epsilon)
        dp_mult = (Delta2 / (L * epsilon2_update)) / (delta_r / dp_epsilon) + (
            2 * Delta2 / (L * epsilon2_update)) / (delta_h / dp_epsilon)

        dynamic_eps = tf.placeholder(tf.float32)
        """y_test = inference(x, FM_h, params)
    softmax_y = tf.nn.softmax(y_test);
    c_x_adv = fgsm(x, softmax_y, eps=dynamic_eps/3, clip_min=-1.0, clip_max=1.0)
    x_adv = tf.reshape(c_x_adv, [L, image_size, image_size, 3])"""

        attack_switch = {
            'fgsm': True,
            'ifgsm': True,
            'deepfool': False,
            'mim': True,
            'spsa': False,
            'cwl2': False,
            'madry': True,
            'stm': False
        }

        ch_model_probs = CustomCallableModelWrapper(
            callable_fn=inference_test_input_probs,
            output_layer='probs',
            params=params,
            image_size=image_size,
            adv_noise=adv_noise)

        # define each attack method's tensor
        mu_alpha = tf.placeholder(tf.float32, [1])
        attack_tensor_dict = {}
        # FastGradientMethod
        if attack_switch['fgsm']:
            print('creating attack tensor of FastGradientMethod')
            fgsm_obj = FastGradientMethod(model=ch_model_probs, sess=sess)
            #x_adv_test_fgsm = fgsm_obj.generate(x=x, eps=fgsm_eps, clip_min=-1.0, clip_max=1.0, ord=2) # testing now
            x_adv_test_fgsm = fgsm_obj.generate(x=x,
                                                eps=mu_alpha,
                                                clip_min=-1.0,
                                                clip_max=1.0)  # testing now
            attack_tensor_dict['fgsm'] = x_adv_test_fgsm

        # Iterative FGSM (BasicIterativeMethod/ProjectedGradientMethod with no random init)
        # default: eps_iter=0.05, nb_iter=10
        if attack_switch['ifgsm']:
            print('creating attack tensor of BasicIterativeMethod')
            ifgsm_obj = BasicIterativeMethod(model=ch_model_probs, sess=sess)
            #x_adv_test_ifgsm = ifgsm_obj.generate(x=x, eps=fgsm_eps, eps_iter=fgsm_eps/10, nb_iter=10, clip_min=-1.0, clip_max=1.0, ord=2)
            x_adv_test_ifgsm = ifgsm_obj.generate(x=x,
                                                  eps=mu_alpha,
                                                  eps_iter=fgsm_eps / 3,
                                                  nb_iter=3,
                                                  clip_min=-1.0,
                                                  clip_max=1.0)
            attack_tensor_dict['ifgsm'] = x_adv_test_ifgsm

        # MomentumIterativeMethod
        # default: eps_iter=0.06, nb_iter=10
        if attack_switch['mim']:
            print('creating attack tensor of MomentumIterativeMethod')
            mim_obj = MomentumIterativeMethod(model=ch_model_probs, sess=sess)
            #x_adv_test_mim = mim_obj.generate(x=x, eps=fgsm_eps, eps_iter=fgsm_eps/10, nb_iter=10, decay_factor=1.0, clip_min=-1.0, clip_max=1.0, ord=2)
            x_adv_test_mim = mim_obj.generate(x=x,
                                              eps=mu_alpha,
                                              eps_iter=fgsm_eps / 3,
                                              nb_iter=3,
                                              decay_factor=1.0,
                                              clip_min=-1.0,
                                              clip_max=1.0)
            attack_tensor_dict['mim'] = x_adv_test_mim

        # MadryEtAl (Projected Grdient with random init, same as rand+fgsm)
        # default: eps_iter=0.01, nb_iter=40
        if attack_switch['madry']:
            print('creating attack tensor of MadryEtAl')
            madry_obj = MadryEtAl(model=ch_model_probs, sess=sess)
            #x_adv_test_madry = madry_obj.generate(x=x, eps=fgsm_eps, eps_iter=fgsm_eps/10, nb_iter=10, clip_min=-1.0, clip_max=1.0, ord=2)
            x_adv_test_madry = madry_obj.generate(x=x,
                                                  eps=mu_alpha,
                                                  eps_iter=fgsm_eps / 3,
                                                  nb_iter=3,
                                                  clip_min=-1.0,
                                                  clip_max=1.0)
            attack_tensor_dict['madry'] = x_adv_test_madry

        #====================== attack =========================

        #adv_logits, _ = inference(c_x_adv + W_conv1Noise, perturbFM, params)

        # Create a saver.
        saver = tf.train.Saver(tf.all_variables())

        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()
        sess.run(init)

        # Start the queue runners.
        #tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(os.getcwd() + dirCheckpoint,
                                               sess.graph)

        # load the most recent models
        _global_step = 0
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print(ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
            _global_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        else:
            print('No checkpoint file found')

        T = int(int(math.ceil(D / L)) * epochs + 1)  # number of steps
        step_for_epoch = int(math.ceil(D / L))
        #number of steps for one epoch

        perturbH_test = np.random.laplace(0.0, 0, 14 * 14 * 128)
        perturbH_test = np.reshape(perturbH_test, [-1, 14, 14, 128])

        #W_conv1Noise = np.random.laplace(0.0, Delta2/(L*epsilon2), 32 * 32 * 3).astype(np.float32)
        #W_conv1Noise = np.reshape(_W_conv1Noise, [32, 32, 3])

        perturbFM_h = np.random.laplace(0.0,
                                        2 * Delta2 / (epsilon2_update * L),
                                        14 * 14 * 128)
        perturbFM_h = np.reshape(perturbFM_h, [-1, 14, 14, 128])

        #_W_adv = np.random.laplace(0.0, 0, 32 * 32 * 3).astype(np.float32)
        #_W_adv = np.reshape(_W_adv, [32, 32, 3])
        #_perturbFM_h_adv = np.random.laplace(0.0, 0, 10*10*128)
        #_perturbFM_h_adv = np.reshape(_perturbFM_h_adv, [10, 10, 128]);

        test_size = len(cifar10_data.test.images)
        #beta = redistributeNoise(os.getcwd() + '/LRP_0_25_v12.txt')
        #BenignLNoise = generateIdLMNoise(image_size, Delta2, eps_benign, L) #generateNoise(image_size, Delta2, eps_benign, L, beta);
        #AdvLnoise = generateIdLMNoise(image_size, Delta2, eps_adv, L)
        Noise = generateIdLMNoise(image_size, Delta2, epsilon2_update, L)
        #generateNoise(image_size, Delta2, eps_adv, L, beta);
        Noise_test = generateIdLMNoise(
            image_size, 0, epsilon2_update,
            L)  #generateNoise(image_size, 0, 2*epsilon2, test_size, beta);

        emsemble_L = int(L / 3)
        preT_epochs = 100
        pre_T = int(int(math.ceil(D / L)) * preT_epochs + 1)
        """logfile.write("pretrain: \n")
    for step in range(_global_step, _global_step + pre_T):
        d_eps = random.random()*0.5;
        batch = cifar10_data.train.next_batch(L); #Get a random batch.
        adv_images = sess.run(x_adv, feed_dict = {x: batch[0], dynamic_eps: d_eps, FM_h: perturbH_test})
        for iter in range(0, 2):
            adv_images = sess.run(x_adv, feed_dict = {x: adv_images, dynamic_eps: d_eps, FM_h: perturbH_test})
        #sess.run(pretrain_step, feed_dict = {x: batch[0], noise: AdvLnoise, FM_h: perturbFM_h});
        batch = cifar10_data.train.next_batch(L);
        sess.run(pretrain_step, feed_dict = {x: np.append(batch[0], adv_images, axis = 0), noise: Noise, FM_h: perturbFM_h});
        if step % int(25*step_for_epoch) == 0:
            cost_value = sess.run(cost, feed_dict={x: cifar10_data.test.images, noise: Noise_test, FM_h: perturbH_test})/(test_size*128)
            logfile.write("step \t %d \t %g \n"%(step, cost_value))
            print(cost_value)
    print('pre_train finished')"""

        _global_step = 0
        for step in xrange(_global_step, _global_step + T):
            start_time = time.time()
            d_eps = random.random() * 0.5
            batch = cifar10_data.train.next_batch(emsemble_L)
            #Get a random batch.
            y_adv_batch = batch[1]
            """adv_images = sess.run(x_adv, feed_dict = {x: batch[0], dynamic_eps: d_eps, FM_h: perturbH_test})
      for iter in range(0, 2):
          adv_images = sess.run(x_adv, feed_dict = {x: adv_images, dynamic_eps: d_eps, FM_h: perturbH_test})"""
            adv_images_ifgsm = sess.run(attack_tensor_dict['ifgsm'],
                                        feed_dict={
                                            x: batch[0],
                                            adv_noise: Noise,
                                            mu_alpha: [d_eps]
                                        })
            batch = cifar10_data.train.next_batch(emsemble_L)
            y_adv_batch = np.append(y_adv_batch, batch[1], axis=0)
            adv_images_mim = sess.run(attack_tensor_dict['mim'],
                                      feed_dict={
                                          x: batch[0],
                                          adv_noise: Noise,
                                          mu_alpha: [d_eps]
                                      })
            batch = cifar10_data.train.next_batch(emsemble_L)
            y_adv_batch = np.append(y_adv_batch, batch[1], axis=0)
            adv_images_madry = sess.run(attack_tensor_dict['madry'],
                                        feed_dict={
                                            x: batch[0],
                                            adv_noise: Noise,
                                            mu_alpha: [d_eps]
                                        })
            adv_images = np.append(np.append(adv_images_ifgsm,
                                             adv_images_mim,
                                             axis=0),
                                   adv_images_madry,
                                   axis=0)

            batch = cifar10_data.train.next_batch(L)
            #Get a random batch.

            sess.run(pretrain_step,
                     feed_dict={
                         x: batch[0],
                         adv_x: adv_images,
                         adv_noise: Noise_test,
                         noise: Noise,
                         FM_h: perturbFM_h
                     })
            _, loss_value = sess.run(
                [train_op, loss],
                feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    adv_x: adv_images,
                    adv_y_: y_adv_batch,
                    noise: Noise,
                    adv_noise: Noise_test,
                    FM_h: perturbFM_h
                })
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            # report the result periodically
            if step % (50 * step_for_epoch) == 0 and step >= (300 *
                                                              step_for_epoch):
                '''predictions_form_argmax = np.zeros([test_size, 10])
          softmax_predictions = sess.run(softmax_y_conv, feed_dict={x: cifar10_data.test.images, noise: Noise_test, FM_h: perturbH_test})
          argmax_predictions = np.argmax(softmax_predictions, axis=1)
          """for n_draws in range(0, 2000):
            _BenignLNoise = generateIdLMNoise(image_size, Delta2, epsilon2, L)
            _perturbFM_h = np.random.laplace(0.0, 2*Delta2/(epsilon2*L), 14*14*128)
            _perturbFM_h = np.reshape(_perturbFM_h, [-1, 14, 14, 128]);"""
          for j in range(test_size):
            pred = argmax_predictions[j]
            predictions_form_argmax[j, pred] += 2000;
          """softmax_predictions = sess.run(softmax_y_conv, feed_dict={x: cifar10_data.test.images, noise: _BenignLNoise, FM_h: _perturbFM_h})
            argmax_predictions = np.argmax(softmax_predictions, axis=1)"""
          final_predictions = predictions_form_argmax;
          is_correct = []
          is_robust = []
          for j in range(test_size):
              is_correct.append(np.argmax(cifar10_data.test.labels[j]) == np.argmax(final_predictions[j]))
              robustness_from_argmax = robustness.robustness_size_argmax(counts=predictions_form_argmax[j],eta=0.05,dp_attack_size=fgsm_eps, dp_epsilon=1.0, dp_delta=0.05, dp_mechanism='laplace') / dp_mult
              is_robust.append(robustness_from_argmax >= fgsm_eps)
          acc = np.sum(is_correct)*1.0/test_size
          robust_acc = np.sum([a and b for a,b in zip(is_robust, is_correct)])*1.0/np.sum(is_robust)
          robust_utility = np.sum(is_robust)*1.0/test_size
          log_str = "step: {:.1f}\t epsilon: {:.1f}\t benign: {:.4f} \t {:.4f} \t {:.4f} \t {:.4f} \t".format(step, total_eps, acc, robust_acc, robust_utility, robust_acc*robust_utility)'''

                #===================adv samples=====================
                log_str = "step: {:.1f}\t epsilon: {:.1f}\t".format(
                    step, total_eps)
                """adv_images_dict = {}
          for atk in attack_switch.keys():
              if attack_switch[atk]:
                  adv_images_dict[atk] = sess.run(attack_tensor_dict[atk], feed_dict ={x:cifar10_data.test.images})
          print("Done with the generating of Adversarial samples")"""
                #===================adv samples=====================
                adv_acc_dict = {}
                robust_adv_acc_dict = {}
                robust_adv_utility_dict = {}
                test_bach_size = 5000
                for atk in attack_switch.keys():
                    print(atk)
                    if atk not in adv_acc_dict:
                        adv_acc_dict[atk] = -1
                        robust_adv_acc_dict[atk] = -1
                        robust_adv_utility_dict[atk] = -1
                    if attack_switch[atk]:
                        test_bach = cifar10_data.test.next_batch(
                            test_bach_size)
                        adv_images_dict = sess.run(attack_tensor_dict[atk],
                                                   feed_dict={
                                                       x: test_bach[0],
                                                       adv_noise: Noise_test,
                                                       mu_alpha: [fgsm_eps]
                                                   })
                        print("Done adversarial examples")
                        ### PixelDP Robustness ###
                        predictions_form_argmax = np.zeros(
                            [test_bach_size, 10])
                        softmax_predictions = sess.run(softmax_y_conv,
                                                       feed_dict={
                                                           x: adv_images_dict,
                                                           noise: Noise,
                                                           FM_h: perturbFM_h
                                                       })
                        argmax_predictions = np.argmax(softmax_predictions,
                                                       axis=1)
                        for n_draws in range(0, 1000):
                            _BenignLNoise = generateIdLMNoise(
                                image_size, Delta2, epsilon2_update, L)
                            _perturbFM_h = np.random.laplace(
                                0.0, 2 * Delta2 / (epsilon2_update * L),
                                14 * 14 * 128)
                            _perturbFM_h = np.reshape(_perturbFM_h,
                                                      [-1, 14, 14, 128])
                            if n_draws == 500:
                                print("n_draws = 500")
                            for j in range(test_bach_size):
                                pred = argmax_predictions[j]
                                predictions_form_argmax[j, pred] += 1
                            softmax_predictions = sess.run(
                                softmax_y_conv,
                                feed_dict={
                                    x: adv_images_dict,
                                    noise: (_BenignLNoise / 10 + Noise),
                                    FM_h: perturbFM_h
                                }) * sess.run(
                                    softmax_y_conv,
                                    feed_dict={
                                        x: adv_images_dict,
                                        noise: Noise,
                                        FM_h: (_perturbFM_h / 10 + perturbFM_h)
                                    })
                            #softmax_predictions = sess.run(softmax_y_conv, feed_dict={x: adv_images_dict, noise: (_BenignLNoise), FM_h: perturbFM_h}) * sess.run(softmax_y_conv, feed_dict={x: adv_images_dict, noise: Noise, FM_h: (_perturbFM_h)})
                            argmax_predictions = np.argmax(softmax_predictions,
                                                           axis=1)
                        final_predictions = predictions_form_argmax
                        is_correct = []
                        is_robust = []
                        for j in range(test_bach_size):
                            is_correct.append(
                                np.argmax(test_bach[1][j]) == np.argmax(
                                    final_predictions[j]))
                            robustness_from_argmax = robustness.robustness_size_argmax(
                                counts=predictions_form_argmax[j],
                                eta=0.05,
                                dp_attack_size=fgsm_eps,
                                dp_epsilon=dp_epsilon,
                                dp_delta=0.05,
                                dp_mechanism='laplace') / dp_mult
                            is_robust.append(
                                robustness_from_argmax >= fgsm_eps)
                        adv_acc_dict[atk] = np.sum(
                            is_correct) * 1.0 / test_bach_size
                        robust_adv_acc_dict[atk] = np.sum([
                            a and b for a, b in zip(is_robust, is_correct)
                        ]) * 1.0 / np.sum(is_robust)
                        robust_adv_utility_dict[atk] = np.sum(
                            is_robust) * 1.0 / test_bach_size
                        ##############################
                for atk in attack_switch.keys():
                    if attack_switch[atk]:
                        # added robust prediction
                        log_str += " {}: {:.4f} {:.4f} {:.4f} {:.4f}".format(
                            atk, adv_acc_dict[atk], robust_adv_acc_dict[atk],
                            robust_adv_utility_dict[atk],
                            robust_adv_acc_dict[atk] *
                            robust_adv_utility_dict[atk])
                print(log_str)
                logfile.write(log_str + '\n')

            # Save the model checkpoint periodically.
            if step % (10 * step_for_epoch) == 0 and (step > _global_step):
                num_examples_per_step = L
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)
                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))
            """if step % (50*step_for_epoch) == 0 and (step >= 900*step_for_epoch):
Ejemplo n.º 25
0
def mnist_tutorial(train_start=0,
                   train_end=60000,
                   test_start=0,
                   test_end=10000,
                   nb_epochs=6,
                   batch_size=128,
                   epsilon=0.3,
                   learning_rate=0.001,
                   train_dir="/tmp",
                   filename="mnist.ckpt",
                   load_model=False,
                   testing=False):
    """
    MNIST CleverHans tutorial
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :param nb_epochs: number of epochs to train model
    :param batch_size: size of training batches
    :param learning_rate: learning rate for training
    :param train_dir: Directory storing the saved model
    :param filename: Filename to save model under
    :param load_model: True for load, False for not load
    :param testing: if true, test error is calculated
    :return: an AccuracyReport object
    """
    keras.layers.core.K.set_learning_phase(0)

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    if not hasattr(backend, "tf"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the TensorFlow backend.")

    if keras.backend.image_dim_ordering() != 'tf':
        keras.backend.set_image_dim_ordering('tf')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
              "'th', temporarily setting to 'tf'")

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    keras.backend.set_session(sess)

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
                                                  train_end=train_end,
                                                  test_start=test_start,
                                                  test_end=test_end)

    # Use label smoothing
    assert Y_train.shape[1] == 10
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    # Define TF model graph
    model = cnn_model_BIM()
    preds = model(x)
    print("Defined TensorFlow model graph.")

    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        eval_params = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)
        report.clean_train_clean_eval = acc
        assert X_test.shape[0] == test_end - test_start, X_test.shape
        print('Test accuracy on legitimate examples: %0.4f' % acc)

    # Train an MNIST model
    train_params = {
        'nb_epochs': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': learning_rate,
        'train_dir': train_dir,
        'filename': filename
    }
    ckpt = tf.train.get_checkpoint_state(train_dir)
    ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path

    rng = np.random.RandomState([2017, 8, 30])
    if load_model and ckpt_path:
        saver = tf.train.Saver()
        saver.restore(sess, ckpt_path)
        print("Model loaded from: {}".format(ckpt_path))
        evaluate()
    else:
        print("Model was not loaded, training from scratch.")
        model_train(sess,
                    x,
                    y,
                    preds,
                    X_train,
                    Y_train,
                    evaluate=evaluate,
                    args=train_params,
                    save=False,
                    rng=rng)

    # Calculate training error
    if testing:
        eval_params = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds, X_train, Y_train, args=eval_params)
        report.train_clean_train_clean_eval = acc

    # Initialize the Fast Gradient Sign Method (FGSM) attack object and graph
    wrap = KerasModelWrapper(model)

    print("FastGradientMethod")
    fgsm1 = FastGradientMethod(wrap, sess=sess)
    for epsilon in [0.005, 0.01, 0.05, 0.1, 0.5, 1.0]:
        print("Epsilon =", epsilon),
        fgsm_params = {'eps': epsilon, 'clip_min': None, 'clip_max': None}
        adv_x = fgsm1.generate(x, **fgsm_params)
        # Consider the attack to be constant
        adv_x = tf.stop_gradient(adv_x)
        preds_adv = model(adv_x)

        # Evaluate the accuracy of the MNIST model on adversarial examples
        eval_par = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Test accuracy on adversarial examples: %0.4f\n' % acc)
        report.clean_train_adv_eval = acc

    print("BasicIterativeMethod")
    bim = BasicIterativeMethod(wrap, sess=sess)
    for epsilon, order in zip(
        [0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 0.5, 1.0],
        [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 2, 2]):
        print("Epsilon =", epsilon),
        fgsm_params = {
            'eps': epsilon,
            'clip_min': 0.,
            'clip_max': 1.,
            'ord': order
        }
        adv_x = bim.generate(x, **fgsm_params)
        # Consider the attack to be constant
        adv_x = tf.stop_gradient(adv_x)
        preds_adv = model(adv_x)

        # Evaluate the accuracy of the MNIST model on adversarial examples
        eval_par = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Test accuracy on adversarial examples: %0.4f\n' % acc)
        report.clean_train_adv_eval = acc

    # Calculating train error
    if testing:
        eval_par = {'batch_size': batch_size}
        acc = model_eval(sess,
                         x,
                         y,
                         preds_adv,
                         X_train,
                         Y_train,
                         args=eval_par)
        report.train_clean_train_adv_eval = acc
    return

    print("Repeating the process, using adversarial training")
    # Redefine TF model graph
    model_2 = cnn_model()
    preds_2 = model_2(x)
    wrap_2 = KerasModelWrapper(model_2)
    #fgsm2 = FastGradientMethod(wrap_2, sess=sess)
    bim2 = BasicIterativeMethod(wrap_2, sess=sess)
    preds_2_adv = model_2(bim2.generate(x, **fgsm_params))

    def evaluate_2():
        # Accuracy of adversarially trained model on legitimate test inputs
        eval_params = {'batch_size': batch_size}
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate examples: %0.4f' % accuracy)
        report.adv_train_clean_eval = accuracy

        # Accuracy of the adversarially trained model on adversarial examples
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2_adv,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on adversarial examples: %0.4f' % accuracy)
        report.adv_train_adv_eval = accuracy

    # Perform and evaluate adversarial training
    model_train(sess,
                x,
                y,
                preds_2,
                X_train,
                Y_train,
                predictions_adv=preds_2_adv,
                evaluate=evaluate_2,
                args=train_params,
                save=False,
                rng=rng)

    # Calculate training errors
    if testing:
        eval_params = {'batch_size': batch_size}
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2,
                              X_train,
                              Y_train,
                              args=eval_params)
        report.train_adv_train_clean_eval = accuracy
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2_adv,
                              X_train,
                              Y_train,
                              args=eval_params)
        report.train_adv_train_adv_eval = accuracy

    return report
                                              x: x_iterloclin_proj,
                                              y: y_test
                                          })
        print('\nAccuracy: {:.2f}'.format(acc['iterloclin_proj']))

# Iterative FGSM attack
if attack_params['fgsm_iter']['run'] is True:
    fgsm_iter_params = attack_params['fgsm_iter']['params']
    batch_size = attack_params['fgsm_iter']['batch_size']
    print(
        '\nStarting iterative FGSM with eps = {:.2f}, delta = {:.2f}, niter = {}'
        .format(fgsm_iter_params['eps'], fgsm_iter_params['eps_iter'],
                fgsm_iter_params['nb_iter']))

    fgsm_iter = BasicIterativeMethod(model, sess=sess)
    adv_fgsm_iter = fgsm_iter.generate(x, y=y, **fgsm_iter_params)
    x_fgsm_iter = np.zeros(x_test.shape)
    for i in trange(num_images // batch_size):
        batch = slice(i * batch_size, (i + 1) * batch_size)
        x_fgsm_iter[batch] = sess.run(adv_fgsm_iter,
                                      feed_dict={
                                          x: x_test[batch],
                                          y: y_test[batch]
                                      })

    acc['fgsm_iter'] = sess.run(accuracy,
                                feed_dict={
                                    x: x_fgsm_iter,
                                    y: y_test
                                })
    print('\nAccuracy: {:.2f}'.format(acc['fgsm_iter']))
Ejemplo n.º 27
0
def train(fgsm_eps, _dp_epsilon, _attack_norm_bound, log_filename, ratio):
    FLAGS = None

    #ratio = 16
    #target_eps = [0.125,0.25,0.5,1,2,4,8]
    #target_eps = [0.25 + 0.25*ratio]
    target_eps = [0.2 + 0.2 * ratio]
    #print(target_eps[0])
    #fgsm_eps = 0.1
    dp_epsilon = _dp_epsilon
    image_size = 28
    _log_filename = log_filename + str(target_eps[0]) + '_fgsm_' + str(
        fgsm_eps) + '_dpeps_' + str(dp_epsilon) + '_attack_norm_bound_' + str(
            _attack_norm_bound) + '.txt'

    clip_bound = 0.001  # 'the clip bound of the gradients'
    clip_bound_2 = 1 / 1.5  # 'the clip bound for r_kM'

    small_num = 1e-5  # 'a small number'
    large_num = 1e5  # a large number'
    num_images = 50000  # 'number of images N'

    batch_size = 125  # 'batch_size L'
    sample_rate = batch_size / 50000  # 'sample rate q = L / N'
    # 900 epochs
    num_steps = 1800000  # 'number of steps T = E * N / L = E / q'
    num_epoch = 24  # 'number of epoches E'

    sigma = 5  # 'sigma'
    delta = 1e-5  # 'delta'

    lambd = 1e3  # 'exponential distribution parameter'

    iterative_clip_step = 2  # 'iterative_clip_step'

    clip = 1  # 'whether to clip the gradient'
    noise = 0  # 'whether to add noise'
    redistribute = 0  # 'whether to redistribute the noise'

    D = 50000

    sess = tf.InteractiveSession()

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    y_ = tf.placeholder(tf.float32, [None, 10])
    keep_prob = tf.placeholder(tf.float32)

    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])
    W_fc1 = weight_variable([7 * 7 * 64, 25])
    b_fc1 = bias_variable([25])
    W_fc2 = weight_variable([25, 10])
    b_fc2 = bias_variable([10])

    def inference(x, dp_mult):
        x_image = tf.reshape(x, [-1, 28, 28, 1])
        h_conv1 = tf.nn.relu((conv2d(x_image, W_conv1) + b_conv1) + dp_mult)
        h_pool1 = max_pool_2x2(h_conv1)
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = max_pool_2x2(h_conv2)
        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

        y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
        return y_conv, h_conv1

    def inference_prob(x):
        logits, _ = inference(x, 0)
        y_prob = tf.nn.softmax(logits)
        return y_prob

    shape = W_conv1.get_shape().as_list()
    w_t = tf.reshape(W_conv1, [-1, shape[-1]])
    w = tf.transpose(w_t)
    sing_vals = tf.svd(w, compute_uv=False)
    sensitivityW = tf.reduce_max(sing_vals)
    dp_delta = 0.05
    attack_norm_bound = _attack_norm_bound
    dp_mult = attack_norm_bound * math.sqrt(
        2 * math.log(1.25 / dp_delta)) / dp_epsilon
    noise = tf.placeholder(tf.float32, [None, 28, 28, 32])

    #y_conv, h_conv1 = inference(x, dp_mult * noise)
    y_conv, h_conv1 = inference(x, attack_norm_bound * noise)
    softmax_y = tf.nn.softmax(y_conv)
    # Define loss and optimizer

    priv_accountant = accountant.GaussianMomentsAccountant(D)
    privacy_accum_op = priv_accountant.accumulate_privacy_spending(
        [None, None], sigma, batch_size)

    # sess.run(tf.initialize_all_variables())
    sess.run(tf.global_variables_initializer())

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
    #train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy);
    #train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)

    # noise redistribution #
    grad, = tf.gradients(cross_entropy, h_conv1)
    normalized_grad = tf.sign(grad)
    normalized_grad = tf.stop_gradient(normalized_grad)
    normalized_grad_r = tf.abs(tf.reduce_mean(normalized_grad, axis=(0)))
    #print(normalized_grad_r)
    sum_r = tf.reduce_sum(normalized_grad_r, axis=(0, 1, 2), keepdims=False)
    #print(sum_r)
    normalized_grad_r = 256 * 32 * normalized_grad_r / sum_r
    print(normalized_grad_r)

    shape_grad = normalized_grad_r.get_shape().as_list()
    grad_t = tf.reshape(normalized_grad_r, [-1, shape_grad[-1]])
    g = tf.transpose(grad_t)
    sing_g_vals = tf.svd(g, compute_uv=False)
    sensitivity_2 = tf.reduce_max(sing_g_vals)
    ########################

    opt = GradientDescentOptimizer(learning_rate=1e-1)

    # compute gradient
    gw_W1 = tf.gradients(cross_entropy, W_conv1)[0]  # gradient of W1
    gb1 = tf.gradients(cross_entropy, b_conv1)[0]  # gradient of b1

    gw_W2 = tf.gradients(cross_entropy, W_conv2)[0]  # gradient of W2
    gb2 = tf.gradients(cross_entropy, b_conv2)[0]  # gradient of b2

    gw_Wf1 = tf.gradients(cross_entropy, W_fc1)[0]  # gradient of W_fc1
    gbf1 = tf.gradients(cross_entropy, b_fc1)[0]  # gradient of b_fc1

    gw_Wf2 = tf.gradients(cross_entropy, W_fc2)[0]  # gradient of W_fc2
    gbf2 = tf.gradients(cross_entropy, b_fc2)[0]  # gradient of b_fc2

    # clip gradient
    gw_W1 = tf.clip_by_norm(gw_W1, clip_bound)
    gw_W2 = tf.clip_by_norm(gw_W2, clip_bound)
    gw_Wf1 = tf.clip_by_norm(gw_Wf1, clip_bound)
    gw_Wf2 = tf.clip_by_norm(gw_Wf2, clip_bound)

    # sigma = FLAGS.sigma # when comp_eps(lmbda,q,sigma,T,delta)==epsilon

    # sensitivity = 2 * FLAGS.clip_bound #adjacency matrix with one tuple different
    sensitivity = clip_bound  # adjacency matrix with one more tuple

    gw_W1 += tf.random_normal(shape=tf.shape(gw_W1),
                              mean=0.0,
                              stddev=(sigma * sensitivity)**2,
                              dtype=tf.float32)
    gb1 += tf.random_normal(shape=tf.shape(gb1),
                            mean=0.0,
                            stddev=(sigma * sensitivity)**2,
                            dtype=tf.float32)
    gw_W2 += tf.random_normal(shape=tf.shape(gw_W2),
                              mean=0.0,
                              stddev=(sigma * sensitivity)**2,
                              dtype=tf.float32)
    gb2 += tf.random_normal(shape=tf.shape(gb2),
                            mean=0.0,
                            stddev=(sigma * sensitivity)**2,
                            dtype=tf.float32)
    gw_Wf1 += tf.random_normal(shape=tf.shape(gw_Wf1),
                               mean=0.0,
                               stddev=(sigma * sensitivity)**2,
                               dtype=tf.float32)
    gbf1 += tf.random_normal(shape=tf.shape(gbf1),
                             mean=0.0,
                             stddev=(sigma * sensitivity)**2,
                             dtype=tf.float32)
    gw_Wf2 += tf.random_normal(shape=tf.shape(gw_Wf2),
                               mean=0.0,
                               stddev=(sigma * sensitivity)**2,
                               dtype=tf.float32)
    gbf2 += tf.random_normal(shape=tf.shape(gbf2),
                             mean=0.0,
                             stddev=(sigma * sensitivity)**2,
                             dtype=tf.float32)

    train_step = opt.apply_gradients([(gw_W1, W_conv1), (gb1, b_conv1),
                                      (gw_W2, W_conv2), (gb2, b_conv2),
                                      (gw_Wf1, W_fc1), (gbf1, b_fc1),
                                      (gw_Wf2, W_fc2), (gbf2, b_fc2)])

    # craft adversarial samples from x for testing
    #softmax_y_test = tf.nn.softmax(y_conv)

    #====================== attack =========================

    attack_switch = {
        'fgsm': True,
        'ifgsm': True,
        'deepfool': False,
        'mim': True,
        'spsa': False,
        'cwl2': False,
        'madry': True,
        'stm': False
    }

    # define cleverhans abstract models for using cleverhans attacks
    ch_model_logits = CallableModelWrapper(callable_fn=inference,
                                           output_layer='logits')
    ch_model_probs = CallableModelWrapper(callable_fn=inference_prob,
                                          output_layer='probs')

    # define each attack method's tensor
    attack_tensor_dict = {}
    # FastGradientMethod
    if attack_switch['fgsm']:
        print('creating attack tensor of FastGradientMethod')
        fgsm_obj = FastGradientMethod(model=ch_model_probs, sess=sess)
        x_adv_test_fgsm = fgsm_obj.generate(x=x,
                                            eps=fgsm_eps,
                                            clip_min=0.0,
                                            clip_max=1.0)  # testing now
        attack_tensor_dict['fgsm'] = x_adv_test_fgsm

    # Iterative FGSM (BasicIterativeMethod/ProjectedGradientMethod with no random init)
    # default: eps_iter=0.05, nb_iter=10
    if attack_switch['ifgsm']:
        print('creating attack tensor of BasicIterativeMethod')
        ifgsm_obj = BasicIterativeMethod(model=ch_model_probs, sess=sess)
        x_adv_test_ifgsm = ifgsm_obj.generate(x=x,
                                              eps=fgsm_eps,
                                              eps_iter=fgsm_eps / 10,
                                              nb_iter=10,
                                              clip_min=0.0,
                                              clip_max=1.0)
        attack_tensor_dict['ifgsm'] = x_adv_test_ifgsm

    # MomentumIterativeMethod
    # default: eps_iter=0.06, nb_iter=10
    if attack_switch['mim']:
        print('creating attack tensor of MomentumIterativeMethod')
        mim_obj = MomentumIterativeMethod(model=ch_model_probs, sess=sess)
        x_adv_test_mim = mim_obj.generate(x=x,
                                          eps=fgsm_eps,
                                          eps_iter=fgsm_eps / 10,
                                          nb_iter=10,
                                          decay_factor=1.0,
                                          clip_min=0.0,
                                          clip_max=1.0)
        attack_tensor_dict['mim'] = x_adv_test_mim

    # MadryEtAl (Projected Grdient with random init, same as rand+fgsm)
    # default: eps_iter=0.01, nb_iter=40
    if attack_switch['madry']:
        print('creating attack tensor of MadryEtAl')
        madry_obj = MadryEtAl(model=ch_model_probs, sess=sess)
        x_adv_test_madry = madry_obj.generate(x=x,
                                              eps=fgsm_eps,
                                              eps_iter=fgsm_eps / 10,
                                              nb_iter=10,
                                              clip_min=0.0,
                                              clip_max=1.0)
        attack_tensor_dict['madry'] = x_adv_test_madry

    #====================== attack =========================

    #Define the correct prediction and accuracy#
    correct_prediction_x = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy_x = tf.reduce_mean(tf.cast(correct_prediction_x, tf.float32))

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    s = math.log(sqrt(2.0 / math.pi) * 1e+5)
    sigmaEGM = sqrt(2.0) * 1.0 * (sqrt(s) +
                                  sqrt(s + dp_epsilon)) / (2.0 * dp_epsilon)
    print(sigmaEGM)
    __noiseE = np.random.normal(0.0, sigmaEGM**2,
                                28 * 28 * 32).astype(np.float32)
    __noiseE = np.reshape(__noiseE, [-1, 28, 28, 32])

    start_time = time.time()
    logfile = open(_log_filename, 'w')
    last_eval_time = -1
    accum_time = 0
    accum_epoch = 0
    max_benign_acc = -1
    max_adv_acc_dict = {}
    test_size = len(mnist.test.images)
    print("Computing The Noise Redistribution Vector")
    for i in range(4000):
        batch = mnist.train.next_batch(batch_size)
        sess.run([train_step],
                 feed_dict={
                     x: batch[0],
                     y_: batch[1],
                     keep_prob: 0.5,
                     noise: __noiseE * 0
                 })
    batch = mnist.train.next_batch(batch_size * 10)
    grad_redis = sess.run([normalized_grad_r],
                          feed_dict={
                              x: batch[0],
                              y_: batch[1],
                              keep_prob: 1.0,
                              noise: __noiseE * 0
                          })
    #print(grad_redis)
    _sensitivity_2 = sess.run([sensitivity_2],
                              feed_dict={
                                  x: batch[0],
                                  y_: batch[1],
                                  keep_prob: 1.0,
                                  noise: __noiseE * 0
                              })
    #print(_sensitivity_2)

    _sensitivityW = sess.run(sensitivityW)
    #print(_sensitivityW)
    Delta_redis = _sensitivityW / sqrt(_sensitivity_2[0])
    #print(Delta_redis)
    sigmaHGM = sqrt(2.0) * Delta_redis * (sqrt(s) + sqrt(s + dp_epsilon)) / (
        2.0 * dp_epsilon)
    #print(sigmaHGM)
    __noiseH = np.random.normal(0.0, sigmaHGM**2,
                                28 * 28 * 32).astype(np.float32)
    __noiseH = np.reshape(__noiseH, [-1, 28, 28, 32]) * grad_redis

    sess.run(tf.global_variables_initializer())
    print("Training")
    for i in range(num_steps):
        batch = mnist.train.next_batch(batch_size)
        sess.run(
            [train_step],
            feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob: 0.5,
                noise: (__noiseE + __noiseH) / 2
            })
        sess.run([privacy_accum_op])
        spent_eps_deltas = priv_accountant.get_privacy_spent(
            sess, target_eps=target_eps)
        if i % 1000 == 0:
            print(i, spent_eps_deltas)
        _break = False
        for _eps, _delta in spent_eps_deltas:
            if _delta >= delta:
                _break = True
                break
        if _break == True:
            break
    print("Testing")
    benign_acc = accuracy_x.eval(
        feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels,
            keep_prob: 1.0,
            noise: (__noiseE + __noiseH) / 2
        })
    ### PixelDP Robustness ###
    adv_acc_dict = {}
    robust_adv_acc_dict = {}
    robust_adv_utility_dict = {}
    for atk in attack_switch.keys():
        if atk not in adv_acc_dict:
            adv_acc_dict[atk] = -1
            robust_adv_acc_dict[atk] = -1
            robust_adv_utility_dict[atk] = -1

        if attack_switch[atk]:
            adv_images_dict = sess.run(attack_tensor_dict[atk],
                                       feed_dict={
                                           x: mnist.test.images,
                                           y_: mnist.test.labels,
                                           keep_prob: 1.0
                                       })
            #grad_redis = sess.run([normalized_grad_r], feed_dict={x: adv_images_dict, y_: mnist.test.labels, keep_prob: 1.0, noise:__noise})
            ### Robustness ###
            predictions_form_argmax = np.zeros([test_size, 10])
            softmax_predictions = softmax_y.eval(
                feed_dict={
                    x: adv_images_dict,
                    keep_prob: 1.0,
                    noise: (__noiseE + __noiseH) / 2
                })
            argmax_predictions = np.argmax(softmax_predictions, axis=1)
            for n_draws in range(0, 2000):
                if n_draws % 1000 == 0:
                    print(n_draws)
                _noiseE = np.random.normal(0.0, sigmaEGM**2,
                                           28 * 28 * 32).astype(np.float32)
                _noiseE = np.reshape(_noiseE, [-1, 28, 28, 32])
                _noise = np.random.normal(0.0, sigmaHGM**2,
                                          28 * 28 * 32).astype(np.float32)
                _noise = np.reshape(_noise, [-1, 28, 28, 32]) * grad_redis
                for j in range(test_size):
                    pred = argmax_predictions[j]
                    predictions_form_argmax[j, pred] += 1
                softmax_predictions = softmax_y.eval(
                    feed_dict={
                        x: adv_images_dict,
                        keep_prob: 1.0,
                        noise: (__noiseE + __noiseH) / 2 +
                        (_noiseE + _noise) / 4
                    })
                argmax_predictions = np.argmax(softmax_predictions, axis=1)
            final_predictions = predictions_form_argmax
            is_correct = []
            is_robust = []
            for j in range(test_size):
                is_correct.append(
                    np.argmax(mnist.test.labels[j]) == np.argmax(
                        final_predictions[j]))
                robustness_from_argmax = robustnessGGaussian.robustness_size_argmax(
                    counts=predictions_form_argmax[j],
                    eta=0.05,
                    dp_attack_size=fgsm_eps,
                    dp_epsilon=dp_epsilon,
                    dp_delta=1e-5,
                    dp_mechanism='gaussian') / dp_mult
                is_robust.append(robustness_from_argmax >= fgsm_eps)
            adv_acc_dict[atk] = np.sum(is_correct) * 1.0 / test_size
            robust_adv_acc_dict[atk] = np.sum([
                a and b for a, b in zip(is_robust, is_correct)
            ]) * 1.0 / np.sum(is_robust)
            robust_adv_utility_dict[atk] = np.sum(is_robust) * 1.0 / test_size
            print(" {}: {:.4f} {:.4f} {:.4f} {:.4f}".format(
                atk, adv_acc_dict[atk], robust_adv_acc_dict[atk],
                robust_adv_utility_dict[atk],
                robust_adv_acc_dict[atk] * robust_adv_utility_dict[atk]))
            ##############################
    log_str = "step: {}\t target_epsilon: {}\t dp_epsilon: {:.1f}\t attack_norm_bound: {:.1f}\t benign_acc: {:.4f}\t".format(
        i, target_eps, dp_epsilon, attack_norm_bound, benign_acc)
    for atk in attack_switch.keys():
        if attack_switch[atk]:
            log_str += " {}: {:.4f} {:.4f} {:.4f} {:.4f}".format(
                atk, adv_acc_dict[atk], robust_adv_acc_dict[atk],
                robust_adv_utility_dict[atk],
                robust_adv_acc_dict[atk] * robust_adv_utility_dict[atk])
    print(log_str)
    logfile.write(log_str + '\n')
    ##############################
    duration = time.time() - start_time
    logfile.write(str(duration) + '\n')
    logfile.flush()
    logfile.close()
Ejemplo n.º 28
0
def JSMA_FGSM_BIM(train_start=0,
                  train_end=60000,
                  test_start=0,
                  test_end=10000,
                  nb_epochs=6,
                  batch_size=128,
                  learning_rate=0.001,
                  clean_train=True,
                  testing=False,
                  backprop_through_attack=False,
                  nb_filters=64):
    """
    MNIST cleverhans tutorial
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :param nb_epochs: number of epochs to train model
    :param batch_size: size of training batches
    :param learning_rate: learning rate for training
    :param clean_train: perform normal training on clean examples only
                        before performing adversarial training.
    :param testing: if true, complete an AccuracyReport for unit tests
                    to verify that performance is adequate
    :param backprop_through_attack: If True, backprop through adversarial
                                    example construction process during
                                    adversarial training.
    :param clean_train: if true, train on clean examples
    :return: an AccuracyReport object
    """

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    # Set logging level to see debug information
    set_log_level(logging.DEBUG)

    # Create TF session
    sess = tf.Session()

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
                                                  train_end=train_end,
                                                  test_start=test_start,
                                                  test_end=test_end)
    source_samples = batch_size
    # Use label smoothing
    # Hopefully this doesn't screw up JSMA...
    assert Y_train.shape[1] == 10
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    model_path = "models/mnist"
    # Train an MNIST model
    train_params = {
        'nb_epochs': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': learning_rate
    }
    eval_par = {'batch_size': batch_size}
    rng = np.random.RandomState([2017, 8, 30])

    if clean_train:
        model = make_basic_cnn(nb_filters=nb_filters)
        preds = model.get_probs(x)

        def evaluate():
            # Evaluate the accuracy of the MNIST model on legitimate test
            # examples
            eval_params = {'batch_size': batch_size}
            acc = model_eval(sess,
                             x,
                             y,
                             preds,
                             X_test,
                             Y_test,
                             args=eval_params)
            report.clean_train_clean_eval = acc
            assert X_test.shape[0] == test_end - test_start, X_test.shape
            print('Test accuracy on legitimate examples: %0.4f' % acc)

        model_train(sess,
                    x,
                    y,
                    preds,
                    X_train,
                    Y_train,
                    evaluate=evaluate,
                    args=train_params,
                    rng=rng)
        print("#####Starting attacks on clean model#####")
        #################################################################
        #Clean test against JSMA
        jsma_params = {
            'theta': 1.,
            'gamma': 0.1,
            'clip_min': 0.,
            'clip_max': 1.,
            'y_target': None
        }

        jsma = SaliencyMapMethod(model, back='tf', sess=sess)
        adv_x = jsma.generate(x, **jsma_params)
        preds_adv = model.get_probs(adv_x)

        # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Clean test accuracy on JSMA adversarial examples: %0.4f' % acc)
        ################################################################
        #Clean test against FGSM
        fgsm_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.}

        fgsm = FastGradientMethod(model, sess=sess)
        adv_x = fgsm.generate(x, **fgsm_params)
        preds_adv = model.get_probs(adv_x)

        # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Clean test accuracy on FGSM adversarial examples: %0.4f' % acc)
        ################################################################
        #Clean test against BIM
        bim_params = {
            'eps': 0.3,
            'eps_iter': 0.01,
            'nb_iter': 100,
            'clip_min': 0.,
            'clip_max': 1.
        }
        bim = BasicIterativeMethod(model, sess=sess)
        adv_x = bim.generate(x, **bim_params)
        preds_adv = model.get_probs(adv_x)

        # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Clean test accuracy on BIM adversarial examples: %0.4f' % acc)
        ################################################################
        #Clean test against EN
        en_params = {
            'binary_search_steps': 1,
            #'y': None,
            'max_iterations': 100,
            'learning_rate': 0.1,
            'batch_size': source_samples,
            'initial_const': 10
        }
        en = ElasticNetMethod(model, back='tf', sess=sess)
        adv_x = en.generate(x, **en_params)
        preds_adv = model.get_probs(adv_x)

        # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Clean test accuracy on EN adversarial examples: %0.4f' % acc)
        ################################################################
        #Clean test against DF
        deepfool_params = {
            'nb_candidate': 10,
            'overshoot': 0.02,
            'max_iter': 50,
            'clip_min': 0.,
            'clip_max': 1.
        }
        deepfool = DeepFool(model, sess=sess)
        adv_x = deepfool.generate(x, **deepfool_params)
        preds_adv = model.get_probs(adv_x)

        # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Clean test accuracy on DF adversarial examples: %0.4f' % acc)
        ################################################################
        #Clean test against VAT
        vat_params = {
            'eps': 2.0,
            'num_iterations': 1,
            'xi': 1e-6,
            'clip_min': 0.,
            'clip_max': 1.
        }
        vat = VirtualAdversarialMethod(model, sess=sess)
        adv_x = vat.generate(x, **vat_params)
        preds_adv = model.get_probs(adv_x)

        # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Clean test accuracy on VAT adversarial examples: %0.4f\n' % acc)
        ################################################################
        print("Repeating the process, using adversarial training\n")
    # Redefine TF model graph
    model_2 = make_basic_cnn(nb_filters=nb_filters)
    preds_2 = model_2(x)
    #################################################################
    #Adversarial test against JSMA
    jsma_params = {
        'theta': 1.,
        'gamma': 0.1,
        'clip_min': 0.,
        'clip_max': 1.,
        'y_target': None
    }

    jsma = SaliencyMapMethod(model, back='tf', sess=sess)
    adv_x = jsma.generate(x, **jsma_params)
    preds_adv_jsma = model.get_probs(adv_x)
    ################################################################
    #Adversarial test against FGSM
    fgsm_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.}

    fgsm = FastGradientMethod(model, sess=sess)
    adv_x = fgsm.generate(x, **fgsm_params)
    preds_adv_fgsm = model.get_probs(adv_x)
    ################################################################
    #Adversarial test against BIM
    bim_params = {
        'eps': 0.3,
        'eps_iter': 0.01,
        'nb_iter': 100,
        'clip_min': 0.,
        'clip_max': 1.
    }
    bim = BasicIterativeMethod(model, sess=sess)
    adv_x = bim.generate(x, **bim_params)
    preds_adv_bim = model.get_probs(adv_x)
    ################################################################
    #Adversarial test against EN
    en_params = {
        'binary_search_steps': 5,
        #'y': None,
        'max_iterations': 100,
        'learning_rate': 0.1,
        'batch_size': source_samples,
        'initial_const': 10
    }
    en = ElasticNetMethod(model, back='tf', sess=sess)
    adv_x = en.generate(x, **en_params)
    preds_adv_en = model.get_probs(adv_x)
    ################################################################
    #Adversarial test against DF
    deepfool_params = {
        'nb_candidate': 10,
        'overshoot': 0.02,
        'max_iter': 200,
        'clip_min': 0.,
        'clip_max': 1.
    }
    deepfool = DeepFool(model, sess=sess)
    adv_x = deepfool.generate(x, **deepfool_params)
    preds_adv_df = model.get_probs(adv_x)
    ################################################################
    #Adversarial test against VAT
    vat_params = {
        'eps': 2.0,
        'num_iterations': 1,
        'xi': 1e-6,
        'clip_min': 0.,
        'clip_max': 1.
    }
    vat = VirtualAdversarialMethod(model, sess=sess)
    adv_x = vat.generate(x, **vat_params)
    preds_adv_vat = model.get_probs(adv_x)
    ################################################################
    print("#####Evaluate trained model#####")

    def evaluate_2():
        # Evaluate the accuracy of the MNIST model on JSMA adversarial examples
        acc = model_eval(sess,
                         x,
                         y,
                         preds_adv_jsma,
                         X_test,
                         Y_test,
                         args=eval_par)
        print('Test accuracy on JSMA adversarial examples: %0.4f' % acc)

        # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
        acc = model_eval(sess,
                         x,
                         y,
                         preds_adv_fgsm,
                         X_test,
                         Y_test,
                         args=eval_par)
        print('Test accuracy on FGSM adversarial examples: %0.4f' % acc)

        # Evaluate the accuracy of the MNIST model on BIM adversarial examples
        acc = model_eval(sess,
                         x,
                         y,
                         preds_adv_bim,
                         X_test,
                         Y_test,
                         args=eval_par)
        print('Test accuracy on BIM adversarial examples: %0.4f' % acc)

        # Evaluate the accuracy of the MNIST model on EN adversarial examples
        acc = model_eval(sess,
                         x,
                         y,
                         preds_adv_en,
                         X_test,
                         Y_test,
                         args=eval_par)
        print('Test accuracy on EN adversarial examples: %0.4f' % acc)

        # Evaluate the accuracy of the MNIST model on DF adversarial examples
        acc = model_eval(sess,
                         x,
                         y,
                         preds_adv_df,
                         X_test,
                         Y_test,
                         args=eval_par)
        print('Test accuracy on DF adversarial examples: %0.4f' % acc)

        # Evaluate the accuracy of the MNIST model on VAT adversarial examples
        acc = model_eval(sess,
                         x,
                         y,
                         preds_adv_vat,
                         X_test,
                         Y_test,
                         args=eval_par)
        print('Test accuracy on VAT adversarial examples: %0.4f\n' % acc)

    preds_2_adv = [
        preds_adv_jsma, preds_adv_fgsm, preds_adv_bim
        # ,preds_adv_en
        # ,preds_adv_df
    ]

    model_train(sess,
                x,
                y,
                preds_2,
                X_train,
                Y_train,
                predictions_adv=preds_2_adv,
                evaluate=evaluate_2,
                args=train_params,
                rng=rng)
def mnist_tutorial(train_start=0,
                   train_end=60000,
                   test_start=0,
                   test_end=10000,
                   nb_epochs=6,
                   batch_size=128,
                   learning_rate=0.001,
                   clean_train=True,
                   testing=False,
                   backprop_through_attack=False,
                   nb_filters=64):
    """
    MNIST cleverhans tutorial
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :param nb_epochs: number of epochs to train model
    :param batch_size: size of training batches
    :param learning_rate: learning rate for training
    :param clean_train: perform normal training on clean examples only
                        before performing adversarial training.
    :param testing: if true, complete an AccuracyReport for unit tests
                    to verify that performance is adequate
    :param backprop_through_attack: If True, backprop through adversarial
                                    example construction process during
                                    adversarial training.
    :param clean_train: if true, train on clean examples
    :return: an AccuracyReport object
    """

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    # Set logging level to see debug information
    set_log_level(logging.DEBUG)

    # Create TF session
    sess = tf.Session()

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
                                                  train_end=train_end,
                                                  test_start=test_start,
                                                  test_end=test_end)

    # Use label smoothing
    assert Y_train.shape[1] == 10
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    model_path = "models/mnist"
    # Train an MNIST model
    train_params = {
        'nb_epochs': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': learning_rate
    }
    fgsm_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.}
    bim_params = {
        'eps': 0.3,
        'eps_iter': 0.01,
        'nb_iter': 100,
        'clip_min': 0.,
        'clip_max': 1.
    }
    rng = np.random.RandomState([2017, 8, 30])

    if clean_train:
        model = make_basic_cnn(nb_filters=nb_filters)
        preds = model.get_probs(x)

        def evaluate():
            # Evaluate the accuracy of the MNIST model on legitimate test
            # examples
            eval_params = {'batch_size': batch_size}
            acc = model_eval(sess,
                             x,
                             y,
                             preds,
                             X_test,
                             Y_test,
                             args=eval_params)
            report.clean_train_clean_eval = acc
            assert X_test.shape[0] == test_end - test_start, X_test.shape
            print('Test accuracy on legitimate examples: %0.4f' % acc)

        model_train(sess,
                    x,
                    y,
                    preds,
                    X_train,
                    Y_train,
                    evaluate=evaluate,
                    args=train_params,
                    rng=rng)

        # Calculate training error
        if testing:
            eval_params = {'batch_size': batch_size}
            acc = model_eval(sess,
                             x,
                             y,
                             preds,
                             X_train,
                             Y_train,
                             args=eval_params)
            report.train_clean_train_clean_eval = acc

        # Initialize the Fast Gradient Sign Method (FGSM) attack object and
        # graph
        fgsm = FastGradientMethod(model, sess=sess)
        adv_x = fgsm.generate(x, **fgsm_params)
        preds_adv = model.get_probs(adv_x)

        # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
        eval_par = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
        print('Test accuracy on FGSM adversarial examples: %0.4f\n' % acc)

        # Calculate training error
        if testing:
            eval_par = {'batch_size': batch_size}
            acc = model_eval(sess,
                             x,
                             y,
                             preds_adv,
                             X_train,
                             Y_train,
                             args=eval_par)
            report.train_clean_train_adv_eval = acc

        # Init the Basic Iterative Method attack object and graph
        bim = BasicIterativeMethod(model, sess=sess)
        adv_x_2 = bim.generate(x, **bim_params)
        preds_adv_2 = model.get_probs(adv_x_2)

        # Evaluate the accuracy of the MNIST model on Basic Iterative Method adversarial examples
        acc = model_eval(sess,
                         x,
                         y,
                         preds_adv_2,
                         X_test,
                         Y_test,
                         args=eval_par)
        print(
            'Test accuracy on Basic Iterative Method adversarial examples: %0.4f\n'
            % acc)

        # Calculate training error
        if testing:
            eval_par = {'batch_size': batch_size}
            acc = model_eval(sess,
                             x,
                             y,
                             preds_adv,
                             X_train,
                             Y_train,
                             args=eval_par)
            report.train_clean_train_adv_eval = acc

        print("Repeating the process, using adversarial training")
    # Redefine TF model graph
    model_2 = make_basic_cnn(nb_filters=nb_filters)
    preds_2 = model_2(x)
    fgsm2 = FastGradientMethod(model_2, sess=sess)
    adv_x_fgsm = fgsm2.generate(x, **fgsm_params)
    if not backprop_through_attack:
        # For the fgsm attack used in this tutorial, the attack has zero
        # gradient so enabling this flag does not change the gradient.
        # For some other attacks, enabling this flag increases the cost of
        # training, but gives the defender the ability to anticipate how
        # the atacker will change their strategy in response to updates to
        # the defender's parameters.
        adv_x_fgsm = tf.stop_gradient(adv_x_fgsm)
    preds_2_adv_fgsm = model_2(adv_x_fgsm)

    bim2 = BasicIterativeMethod(model_2, sess=sess)
    adv_x_bim = bim2.generate(x, **bim_params)
    preds_2_adv_bim = model_2(adv_x_bim)

    # X_train_rep = np.tile(X_train, [2, 1, 1, 1])
    # Y_train_rep = np.tile(Y_train, [2, 1])
    # preds_2_rep = tf.tile(preds_2, [2, 1])
    # preds_2_adv = model_2(tf.concat([adv_x_fgsm, adv_x_bim], 0))
    #
    # # Perform and evaluate adversarial training
    # model_train(sess, x, y, preds_2_rep, X_train_rep, Y_train_rep,
    #             predictions_adv=preds_2_adv,
    #             args=train_params, rng=rng)

    def evaluate_2():
        # evaluate the final result of the model
        eval_params = {'batch_size': batch_size}
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate examples: %0.4f' % accuracy)

        # Accuracy of the adversarially trained model on FGSM adversarial examples
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2_adv_fgsm,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on FGSM adversarial examples: %0.4f' % accuracy)

        # Accuracy of the adversarially trained model on Basic Iterative Method adversarial examples
        accuracy = model_eval(sess,
                              x,
                              y,
                              preds_2_adv_bim,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on BIM adversarial examples: %0.4f' % accuracy)

    preds_2_adv = [preds_2_adv_fgsm, preds_2_adv_bim]
    model_train(sess,
                x,
                y,
                preds_2,
                X_train,
                Y_train,
                predictions_adv=preds_2_adv,
                evaluate=evaluate_2,
                args=train_params,
                rng=rng)

    return report
def mnist_tutorial_jsma(train_start=0, train_end=5500, test_start=0,
                        test_end=1000, nb_epochs=8,
                        batch_size=100, nb_classes=10,
                        nb_filters=64,
                        learning_rate=0.001):
    """
    MNIST tutorial for the Jacobian-based saliency map approach (JSMA)
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :param nb_epochs: number of epochs to train model
    :param batch_size: size of training batches
    :param nb_classes: number of output classes
    :param learning_rate: learning rate for training
    :return: an AccuracyReport object
    """
    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    print("Created TensorFlow session.")

    set_log_level(logging.DEBUG)

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
                                                  train_end=train_end,
                                                  test_start=test_start,
                                                  test_end=test_end)

    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    # Define TF model graph
    model = make_basic_cnn()
    preds = model(x)
    print("Defined TensorFlow model graph.")

    ###########################################################################
    # Training the model using TensorFlow
    ###########################################################################

    # Train an MNIST model
    train_params = {
        'nb_epochs': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': learning_rate
    }
    # sess.run(tf.global_variables_initializer())
    rng = np.random.RandomState([2017, 8, 30])

    print("x_train shape: ", X_train.shape)
    print("y_train shape: ", Y_train.shape)

    # do not log
    model_train(sess, x, y, preds, X_train, Y_train, args=train_params,verbose=False,
                rng=rng)

    f_out_clean = open("Clean_jsma_elastic_against5.log", "w")

    # Evaluate the accuracy of the MNIST model on legitimate test examples
    eval_params = {'batch_size': batch_size}
    accuracy = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)
    assert X_test.shape[0] == test_end - test_start, X_test.shape
    print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
    f_out_clean.write('Test accuracy on legitimate test examples: ' + str(accuracy) + '\n')


    # Clean test against JSMA
    jsma_params = {'theta': 1., 'gamma': 0.1,
                   'clip_min': 0., 'clip_max': 1.,
                   'y_target': None}

    jsma = SaliencyMapMethod(model, back='tf', sess=sess)
    adv_x_jsma = jsma.generate(x, **jsma_params)
    preds_adv_jsma = model.get_probs(adv_x_jsma)

    # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
    acc = model_eval(sess, x, y, preds_adv_jsma, X_test, Y_test, args=eval_params)
    print('Clean test accuracy on JSMA adversarial examples: %0.4f' % acc)
    f_out_clean.write('Clean test accuracy on JSMA adversarial examples: ' + str(acc) + '\n')

    ################################################################
    # Clean test against FGSM
    fgsm_params = {'eps': 0.3,
                   'clip_min': 0.,
                   'clip_max': 1.}

    fgsm = FastGradientMethod(model, sess=sess)
    adv_x_fgsm = fgsm.generate(x, **fgsm_params)
    preds_adv_fgsm = model.get_probs(adv_x_fgsm)

    # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
    acc = model_eval(sess, x, y, preds_adv_fgsm, X_test, Y_test, args=eval_params)
    print('Clean test accuracy on FGSM adversarial examples: %0.4f' % acc)
    f_out_clean.write('Clean test accuracy on FGSM adversarial examples: ' + str(acc) + '\n')


    ################################################################
    # Clean test against BIM
    bim_params = {'eps': 0.3,
                  'eps_iter': 0.01,
                  'nb_iter': 100,
                  'clip_min': 0.,
                  'clip_max': 1.}
    bim = BasicIterativeMethod(model, sess=sess)
    adv_x_bim = bim.generate(x, **bim_params)
    preds_adv_bim = model.get_probs(adv_x_bim)

    # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
    acc = model_eval(sess, x, y, preds_adv_bim, X_test, Y_test, args=eval_params)
    print('Clean test accuracy on BIM adversarial examples: %0.4f' % acc)
    f_out_clean.write('Clean test accuracy on BIM adversarial examples: ' + str(acc) + '\n')

    ################################################################
    # Clean test against EN
    en_params = {'binary_search_steps': 1,
                 # 'y': None,
                 'max_iterations': 100,
                 'learning_rate': 0.1,
                 'batch_size': batch_size,
                 'initial_const': 10}
    en = ElasticNetMethod(model, back='tf', sess=sess)
    adv_x_en = en.generate(x, **en_params)
    preds_adv_en = model.get_probs(adv_x_en)

    # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
    acc = model_eval(sess, x, y, preds_adv_en, X_test, Y_test, args=eval_params)
    print('Clean test accuracy on EN adversarial examples: %0.4f' % acc)
    f_out_clean.write('Clean test accuracy on EN adversarial examples: ' + str(acc) + '\n')
    ################################################################
    # Clean test against DF
    deepfool_params = {'nb_candidate': 10,
                       'overshoot': 0.02,
                       'max_iter': 50,
                       'clip_min': 0.,
                       'clip_max': 1.}
    deepfool = DeepFool(model, sess=sess)
    adv_x_df = deepfool.generate(x, **deepfool_params)
    preds_adv_df = model.get_probs(adv_x_df)

    # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
    acc = model_eval(sess, x, y, preds_adv_df, X_test, Y_test, args=eval_params)
    print('Clean test accuracy on DF adversarial examples: %0.4f' % acc)
    f_out_clean.write('Clean test accuracy on DF adversarial examples: ' + str(acc) + '\n')

    ################################################################
    # Clean test against VAT
    vat_params = {'eps': 2.0,
                  'num_iterations': 1,
                  'xi': 1e-6,
                  'clip_min': 0.,
                  'clip_max': 1.}
    vat = VirtualAdversarialMethod(model, sess=sess)
    adv_x_vat = vat.generate(x, **vat_params)
    preds_adv_vat = model.get_probs(adv_x_vat)

    # Evaluate the accuracy of the MNIST model on FGSM adversarial examples
    acc = model_eval(sess, x, y, preds_adv_vat, X_test, Y_test, args=eval_params)
    print('Clean test accuracy on VAT adversarial examples: %0.4f\n' % acc)
    f_out_clean.write('Clean test accuracy on VAT adversarial examples: ' + str(acc) + '\n')

    f_out_clean.close()

    ###########################################################################
    # Craft adversarial examples using the Jacobian-based saliency map approach
    ###########################################################################
    print('Crafting ' + str(X_train.shape[0]) + ' * ' + str(nb_classes-1) +
          ' adversarial examples')


    model_2 = make_basic_cnn()
    preds_2 = model(x)

    # need this for constructing the array
    sess.run(tf.global_variables_initializer())

    # run this again
    # sess.run(tf.global_variables_initializer())

    # 1. Instantiate a SaliencyMapMethod attack object
    jsma = SaliencyMapMethod(model_2, back='tf', sess=sess)
    jsma_params = {'theta': 1., 'gamma': 0.1,
                   'clip_min': 0., 'clip_max': 1.,
                   'y_target': None}
    adv_random = jsma.generate(x, **jsma_params)
    preds_adv_random = model_2.get_probs(adv_random)

    # 2. Instantiate FGSM attack
    fgsm_params = {'eps': 0.3,
                   'clip_min': 0.,
                   'clip_max': 1.}
    fgsm = FastGradientMethod(model_2, sess=sess)
    adv_x_fgsm = fgsm.generate(x, **fgsm_params)
    preds_adv_fgsm = model_2.get_probs(adv_x_fgsm)


    # 3. Instantiate Elastic net attack
    en_params = {'binary_search_steps': 5,
         #'y': None,
         'max_iterations': 100,
         'learning_rate': 0.1,
         'batch_size': batch_size,
         'initial_const': 10}
    enet = ElasticNetMethod(model_2, sess=sess)
    adv_x_en = enet.generate(x, **en_params)
    preds_adv_elastic_net = model_2.get_probs(adv_x_en)

    # 4. Deepfool
    deepfool_params = {'nb_candidate':10,
                       'overshoot':0.02,
                       'max_iter': 50,
                       'clip_min': 0.,
                       'clip_max': 1.}
    deepfool = DeepFool(model_2, sess=sess)
    adv_x_df = deepfool.generate(x, **deepfool_params)
    preds_adv_deepfool = model_2.get_probs(adv_x_df)

    # 5. Base Iterative
    bim_params = {'eps': 0.3,
                  'eps_iter': 0.01,
                  'nb_iter': 100,
                  'clip_min': 0.,
                  'clip_max': 1.}
    base_iter = BasicIterativeMethod(model_2, sess=sess)
    adv_x_bi = base_iter.generate(x, **bim_params)
    preds_adv_base_iter = model_2.get_probs(adv_x_bi)

    # 6. C & W Attack
    cw = CarliniWagnerL2(model_2, back='tf', sess=sess)
    cw_params = {'binary_search_steps': 1,
                 # 'y': None,
                 'max_iterations': 100,
                 'learning_rate': 0.1,
                 'batch_size': batch_size,
                 'initial_const': 10}
    adv_x_cw = cw.generate(x, **cw_params)
    preds_adv_cw = model_2.get_probs(adv_x_cw)

    #7
    vat_params = {'eps': 2.0,
                  'num_iterations': 1,
                  'xi': 1e-6,
                  'clip_min': 0.,
                  'clip_max': 1.}
    vat = VirtualAdversarialMethod(model_2, sess=sess)
    adv_x = vat.generate(x, **vat_params)
    preds_adv_vat = model_2.get_probs(adv_x)


    # ==> generate 10 targeted classes for every train data regardless
    # This call runs the Jacobian-based saliency map approach
    # Loop over the samples we want to perturb into adversarial examples

    X_train_adv_set = []
    Y_train_adv_set = []
    for index in range(X_train.shape[0]):
        print('--------------------------------------')
        x_val = X_train[index:(index+1)]
        y_val = Y_train[index]


        # add normal sample in!!!!
        X_train_adv_set.append(x_val)
        Y_train_adv_set.append(y_val)

        # We want to find an adversarial example for each possible target class
        # (i.e. all classes that differ from the label given in the dataset)
        current_class = int(np.argmax(y_val))
        target_classes = other_classes(nb_classes, current_class)
        # Loop over all target classes
        for target in target_classes:
            # print('Generating adv. example for target class %i' % target)
            # This call runs the Jacobian-based saliency map approach

            one_hot_target = np.zeros((1, nb_classes), dtype=np.float32)
            one_hot_target[0, target] = 1
            jsma_params['y_target'] = one_hot_target
            adv_x = jsma.generate_np(x_val, **jsma_params)

            # append to X_train_adv_set and Y_train_adv_set
            X_train_adv_set.append(adv_x)
            Y_train_adv_set.append(y_val)

            # shape is: (1, 28, 28, 1)
            # print("adv_x shape is: ", adv_x.shape)

            # check for success rate
            # res = int(model_argmax(sess, x, preds, adv_x) == target)

    print('-------------Finished Generating Np Adversarial Data-------------------------')

    X_train_data = np.concatenate(X_train_adv_set, axis=0)
    Y_train_data = np.stack(Y_train_adv_set, axis=0)
    print("X_train_data shape is: ", X_train_data.shape)
    print("Y_train_data shape is: ", Y_train_data.shape)

    # saves the output so later no need to re-fun file
    np.savez("jsma_training_data.npz", x_train=X_train_data
             , y_train=Y_train_data)

    # >>> data = np.load('/tmp/123.npz')
    # >>> data['a']

    f_out = open("Adversarial_jsma_elastic_against5.log", "w")

    # evaluate the function against 5 attacks
    # fgsm, base iterative, jsma, elastic net, and deepfool
    def evaluate_against_all():
            # 1 Clean Data
            eval_params = {'batch_size': batch_size}
            accuracy = model_eval(sess, x, y, preds, X_test, Y_test,
                                  args=eval_params)
            print('Legitimate accuracy: %0.4f' % accuracy)

            tmp = 'Legitimate accuracy: '+ str(accuracy) + "\n"
            f_out.write(tmp)


            # 2 JSMA
            accuracy = model_eval(sess, x, y, preds_adv_random, X_test,
                                  Y_test, args=eval_params)

            print('JSMA accuracy: %0.4f' % accuracy)
            tmp = 'JSMA accuracy:'+ str(accuracy) + "\n"
            f_out.write(tmp)


            # 3 FGSM
            accuracy = model_eval(sess, x, y, preds_adv_fgsm, X_test,
                                  Y_test, args=eval_params)

            print('FGSM accuracy: %0.4f' % accuracy)
            tmp = 'FGSM accuracy:' + str(accuracy) + "\n"
            f_out.write(tmp)

            # 4 Base Iterative
            accuracy = model_eval(sess, x, y, preds_adv_base_iter, X_test,
                                  Y_test, args=eval_params)

            print('Base Iterative accuracy: %0.4f' % accuracy)
            tmp = 'Base Iterative accuracy:' + str(accuracy) + "\n"
            f_out.write(tmp)

            # 5 Elastic Net
            accuracy = model_eval(sess, x, y, preds_adv_elastic_net, X_test,
                                  Y_test, args=eval_params)

            print('Elastic Net accuracy: %0.4f' % accuracy)
            tmp = 'Elastic Net accuracy:' + str(accuracy) + "\n"
            f_out.write(tmp)

            # 6 DeepFool
            accuracy = model_eval(sess, x, y, preds_adv_deepfool, X_test,
                                  Y_test, args=eval_params)
            print('DeepFool accuracy: %0.4f' % accuracy)
            tmp = 'DeepFool accuracy:' + str(accuracy) + "\n"
            f_out.write(tmp)

            # 7 C & W Attack
            accuracy = model_eval(sess, x, y, preds_adv_cw, X_test,
                                  Y_test, args=eval_params)
            print('C & W accuracy: %0.4f' % accuracy)
            tmp = 'C & W  accuracy:' + str(accuracy) + "\n"
            f_out.write(tmp)
            f_out.write("*******End of Epoch***********\n\n")

            # 8 Virtual Adversarial
            accuracy = model_eval(sess, x, y, preds_adv_vat, X_test,
                                  Y_test, args=eval_params)
            print('VAT accuracy: %0.4f' % accuracy)
            tmp = 'VAT accuracy:' + str(accuracy) + "\n"
            f_out.write(tmp)
            f_out.write("*******End of Epoch***********\n\n")

            print("*******End of Epoch***********\n\n")

        # report.adv_train_adv_eval = accuracy

    print("Now Adversarial Training with Elastic Net  + modified X_train and Y_train")
    # trained_model.out
    train_params = {
        'nb_epochs': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': learning_rate,
        'train_dir': '/home/stephen/PycharmProjects/jsma-runall-mac/',
        'filename': 'trained_model.out'
    }
    model_train(sess, x, y, preds_2, X_train_data, Y_train_data,
                 predictions_adv=preds_adv_elastic_net,
                evaluate=evaluate_against_all, verbose=False,
                args=train_params, rng=rng)


    # Close TF session
    sess.close()
    return report
Ejemplo n.º 31
0
def cifar10_tutorial(train_start=0,
                     train_end=60000,
                     test_start=0,
                     test_end=10000,
                     nb_epochs=NB_EPOCHS,
                     batch_size=BATCH_SIZE,
                     learning_rate=LEARNING_RATE,
                     clean_train=CLEAN_TRAIN,
                     testing=False,
                     backprop_through_attack=BACKPROP_THROUGH_ATTACK,
                     nb_filters=NB_FILTERS,
                     num_threads=None,
                     label_smoothing=0.1,
                     adversarial_training=ADVERSARIAL_TRAINING):
    """
  CIFAR10 cleverhans tutorial
  :param train_start: index of first training set example
  :param train_end: index of last training set example
  :param test_start: index of first test set example
  :param test_end: index of last test set example
  :param nb_epochs: number of epochs to train model
  :param batch_size: size of training batches
  :param learning_rate: learning rate for training
  :param clean_train: perform normal training on clean examples only
                      before performing adversarial training.
  :param testing: if true, complete an AccuracyReport for unit tests
                  to verify that performance is adequate
  :param backprop_through_attack: If True, backprop through adversarial
                                  example construction process during
                                  adversarial training.
  :param label_smoothing: float, amount of label smoothing for cross entropy
  :param adversarial_training: True means using adversarial training
  :return: an AccuracyReport object
  """

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    # Set logging level to see debug information
    set_log_level(logging.DEBUG)

    # Create TF session
    if num_threads:
        config_args = dict(intra_op_parallelism_threads=1)
    else:
        # put data on cpu and gpu both
        config_args = dict(allow_soft_placement=True)
    sess = tf.Session(config=tf.ConfigProto(**config_args))

    # Get CIFAR10 data
    data = CIFAR10(train_start=train_start,
                   train_end=train_end,
                   test_start=test_start,
                   test_end=test_end)
    dataset_size = data.x_train.shape[0]
    dataset_train = data.to_tensorflow()[0]
    dataset_train = dataset_train.map(
        lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)
    dataset_train = dataset_train.batch(batch_size)
    dataset_train = dataset_train.prefetch(16)
    x_train, y_train = data.get_set('train')
    x_test, y_test = data.get_set('test')

    # Use Image Parameters
    img_rows, img_cols, nchannels = x_test.shape[1:4]
    nb_classes = y_test.shape[1]

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels))
    y = tf.placeholder(tf.float32, shape=(None, nb_classes))

    # Train an MNIST model
    train_params = {
        'nb_epochs': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': learning_rate
    }
    eval_params = {'batch_size': batch_size}
    bim_params = {
        'eps': 0.5,
        'clip_min': 0.,
        'eps_iter': 0.002,
        'nb_iter': 10,
        'clip_max': 1.,
        'ord': np.inf
    }
    rng = np.random.RandomState([2017, 8, 30])

    def do_eval(preds, x_set, y_set, report_key, is_adv=None):
        acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
        setattr(report, report_key, acc)
        if is_adv is None:
            report_text = None
        elif is_adv:
            report_text = 'adversarial'
        else:
            report_text = 'legitimate'
        if report_text:
            print('Test accuracy on %s examples: %0.4f' % (report_text, acc))

    if clean_train:
        model = ModelAllConvolutional('model1',
                                      nb_classes,
                                      nb_filters,
                                      input_shape=[32, 32, 3])

        preds = model.get_logits(x)
        loss = CrossEntropy(model, smoothing=label_smoothing)

        def evaluate():
            do_eval(preds, x_test, y_test, 'clean_train_clean_eval', False)

        """
    when training, evaluating can be happened
    """
        train(sess,
              loss,
              None,
              None,
              dataset_train=dataset_train,
              dataset_size=dataset_size,
              evaluate=evaluate,
              args=train_params,
              rng=rng,
              var_list=model.get_params())
        # save model

        # Calculate training error
        if testing:
            do_eval(preds, x_train, y_train, 'train_clean_train_clean_eval')
        # Initialize the Basic Iterative Method (BIM) attack object and
        # graph
        for i in range(20):
            bim = BasicIterativeMethod(model, sess=sess)
            adv_x = bim.generate(x, **bim_params)
            preds_adv = model.get_logits(adv_x)
            # Evaluate the accuracy of the MNIST model on adversarial examples
            print("eps:%0.2f" %
                  (bim_params["eps_iter"] * bim_params['nb_iter']))
            do_eval(preds_adv, x_test, y_test, 'clean_train_adv_eval', True)
            bim_params["eps_iter"] = bim_params["eps_iter"] + 0.002

        # Calculate training error
        if testing:
            do_eval(preds_adv, x_train, y_train, 'train_clean_train_adv_eval')

    if not adversarial_training:
        return report

    print('Repeating the process, using adversarial training')

    # Create a new model and train it to be robust to BasicIterativeMethod
    model2 = ModelAllConvolutional('model2',
                                   nb_classes,
                                   nb_filters,
                                   input_shape=[32, 32, 3])
    bim2 = BasicIterativeMethod(model2, sess=sess)

    def attack(x):
        return bim2.generate(x, **bim_params)

    # add attack to loss
    loss2 = CrossEntropy(model2, smoothing=label_smoothing, attack=attack)
    preds2 = model2.get_logits(x)
    adv_x2 = attack(x)

    if not backprop_through_attack:
        # For the fgsm attack used in this tutorial, the attack has zero
        # gradient so enabling this flag does not change the gradient.
        # For some other attacks, enabling this flag increases the cost of
        # training, but gives the defender the ability to anticipate how
        # the attacker will change their strategy in response to updates to
        # the defender's parameters.
        adv_x2 = tf.stop_gradient(adv_x2)
    preds2_adv = model2.get_logits(adv_x2)

    def evaluate2():
        # Accuracy of adversarially trained model on legitimate test inputs
        do_eval(preds2, x_test, y_test, 'adv_train_clean_eval', False)
        # Accuracy of the adversarially trained model on adversarial examples
        do_eval(preds2_adv, x_test, y_test, 'adv_train_adv_eval', True)

    # Perform and evaluate adversarial training
    train(sess,
          loss2,
          None,
          None,
          dataset_train=dataset_train,
          dataset_size=dataset_size,
          evaluate=evaluate2,
          args=train_params,
          rng=rng,
          var_list=model2.get_params())

    # Calculate training errors
    if testing:
        do_eval(preds2, x_train, y_train, 'train_adv_train_clean_eval')
        do_eval(preds2_adv, x_train, y_train, 'train_adv_train_adv_eval')

    return report