class DeepFool(Attack):
    """
    DeepFool is an untargeted & iterative attack which is based on an
    iterative linearization of the classifier. The implementation here
    is w.r.t. the L2 norm.
    Paper link: "https://arxiv.org/pdf/1511.04599.pdf"
    """
    def __init__(self, model, back='tf', sess=None):
        """
        Create a DeepFool instance.
        """
        super(DeepFool, self).__init__(model, back, sess)

        if self.back == 'th':
            raise NotImplementedError('Theano version not implemented.')

        self.structural_kwargs = [
            'over_shoot', 'max_iter', 'clip_max', 'clip_min', 'nb_candidate'
        ]

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'logits')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param nb_candidate: The number of classes to test against, i.e.,
                             deepfool only consider nb_candidate classes when
                             attacking(thus accelerate speed). The nb_candidate
                             classes are chosen according to the prediction
                             confidence during implementation.
        :param overshoot: A termination criterion to prevent vanishing updates
        :param max_iter: Maximum number of iteration for deepfool
        :param nb_classes: The number of model output classes
        :param clip_min: Minimum component value for clipping
        :param clip_max: Maximum component value for clipping
        """

        import tensorflow as tf
        from .attacks_tf import jacobian_graph, deepfool_batch

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        # Define graph wrt to this input placeholder
        logits = self.model.get_logits(x)
        self.nb_classes = logits.get_shape().as_list()[-1]
        assert self.nb_candidate <= self.nb_classes,\
            'nb_candidate should not be greater than nb_classes'
        preds = tf.reshape(
            tf.nn.top_k(logits, k=self.nb_candidate)[0],
            [-1, self.nb_candidate])
        # grads will be the shape [batch_size, nb_candidate, image_size]
        grads = tf.stack(jacobian_graph(preds, x, self.nb_candidate), axis=1)

        # Define graph
        def deepfool_wrap(x_val):
            return deepfool_batch(self.sess, x, preds, logits, grads, x_val,
                                  self.nb_candidate, self.overshoot,
                                  self.max_iter, self.clip_min, self.clip_max,
                                  self.nb_classes)

        return tf.py_func(deepfool_wrap, [x], tf.float32)

    def parse_params(self,
                     nb_candidate=10,
                     overshoot=0.02,
                     max_iter=50,
                     nb_classes=None,
                     clip_min=0.,
                     clip_max=1.,
                     **kwargs):
        """
        :param nb_candidate: The number of classes to test against, i.e.,
                             deepfool only consider nb_candidate classes when
                             attacking(thus accelerate speed). The nb_candidate
                             classes are chosen according to the prediction
                             confidence during implementation.
        :param overshoot: A termination criterion to prevent vanishing updates
        :param max_iter: Maximum number of iteration for deepfool
        :param nb_classes: The number of model output classes
        :param clip_min: Minimum component value for clipping
        :param clip_max: Maximum component value for clipping
        """
        if nb_classes is not None:
            warnings.warn("The nb_classes argument is depricated and will "
                          "be removed on 2018-02-11")
        self.nb_candidate = nb_candidate
        self.overshoot = overshoot
        self.max_iter = max_iter
        self.clip_min = clip_min
        self.clip_max = clip_max

        return True
class VirtualAdversarialMethod(Attack):
    """
    This attack was originally proposed by Miyato et al. (2016) and was used
    for virtual adversarial training.
    Paper link: https://arxiv.org/abs/1507.00677

    """
    def __init__(self, model, back='tf', sess=None):
        """
        Note: the model parameter should be an instance of the
        cleverhans.model.Model abstraction provided by CleverHans.
        """
        super(VirtualAdversarialMethod, self).__init__(model, back, sess)

        if self.back == 'th':
            error = "For the Theano version of VAM please call vatm directly."
            raise NotImplementedError(error)

        import tensorflow as tf
        self.feedable_kwargs = {
            'eps': tf.float32,
            'xi': tf.float32,
            'clip_min': tf.float32,
            'clip_max': tf.float32
        }
        self.structural_kwargs = ['num_iterations']

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'logits')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param eps: (optional float ) the epsilon (input variation parameter)
        :param num_iterations: (optional) the number of iterations
        :param xi: (optional float) the finite difference parameter
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        return vatm(self.model,
                    x,
                    self.model.get_logits(x),
                    eps=self.eps,
                    num_iterations=self.num_iterations,
                    xi=self.xi,
                    clip_min=self.clip_min,
                    clip_max=self.clip_max)

    def parse_params(self,
                     eps=2.0,
                     num_iterations=1,
                     xi=1e-6,
                     clip_min=None,
                     clip_max=None,
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param eps: (optional float )the epsilon (input variation parameter)
        :param num_iterations: (optional) the number of iterations
        :param xi: (optional float) the finite difference parameter
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        # Save attack-specific parameters
        self.eps = eps
        self.num_iterations = num_iterations
        self.xi = xi
        self.clip_min = clip_min
        self.clip_max = clip_max
        return True
Esempio n. 3
0
class SuperWhite(Attack):
    """
    The Projected Gradient Descent Attack (Madry et al. 2017).
    Paper link: https://arxiv.org/pdf/1706.06083.pdf
    To counter for randomness & detection, loss function
    is edited in this attack
    """
    def __init__(self, model, back='tf', sess=None):
        """
        Create a MadryEtAl instance.
        """
        super(SuperWhite, self).__init__(model, back, sess)
        self.feedable_kwargs = {
            'eps': np.float32,
            'eps_iter': np.float32,
            'y': np.float32,
            'y_target': np.float32,
            'clip_min': np.float32,
            'clip_max': np.float32
        }
        self.structural_kwargs = [
            'ord', 'nb_iter', 'rand_init', 'batch_size', 'delta_marginal',
            'delta_logit', 'delta_kl', 'detection_lambda', 'kl_prob_vec',
            'combine_logits'
        ]

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'logits')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (optional) A tensor with the model labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        :param rand_init: (optional bool) If True, an initial random
                    perturbation is added.
        """

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        labels, nb_classes = self.get_or_guess_labels(x, kwargs)
        self.targeted = self.y_target is not None

        # Initialize loop variables
        adv_x = self.attack(x, labels)

        return adv_x

    def parse_params(self,
                     eps=0.3,
                     eps_iter=0.01,
                     nb_iter=40,
                     y=None,
                     ord=np.inf,
                     clip_min=None,
                     clip_max=None,
                     y_target=None,
                     rand_init=True,
                     detection_lambda=1.0,
                     delta_marginal=-50,
                     delta_logit=-50,
                     delta_kl=0.0,
                     kl_prob_vec=None,
                     combine_logits='ensemble',
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (optional) A tensor with the model labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        :param rand_init: (optional bool) If True, an initial random
                    perturbation is added.
        """

        # Save attack-specific parameters
        self.eps = eps
        self.eps_iter = eps_iter
        self.nb_iter = nb_iter
        self.y = y
        self.y_target = y_target
        self.ord = ord
        self.clip_min = clip_min
        self.clip_max = clip_max
        self.rand_init = rand_init

        # parameters in YL's 3 detection methods
        self.delta_marginal = delta_marginal  # a scalar
        self.delta_logit = delta_logit  # a vector of shape (n_class,)
        self.kl_prob_vec = kl_prob_vec  # shape (n_class, n_class)
        assert self.kl_prob_vec is not None
        self.delta_kl = delta_kl  # a vector of shape (n_class,)
        self.detection_lambda = detection_lambda
        self.combine_logits = combine_logits
        assert self.combine_logits in ['ensemble', 'bayes']
        print(self.detection_lambda, 'detection_lambda')

        if self.y is not None and self.y_target is not None:
            raise ValueError("Must not set both y and y_target")
        # Check if order of the norm is acceptable given current implementation
        if self.ord not in [np.inf, 1, 2]:
            raise ValueError("Norm order must be either np.inf, 1, or 2.")

        return True

    def model_loss(self, y, logits):
        # compute cross entropy loss for y of shape (N, dimY)
        # and logits of shape (K, N, dimY)

        # first normalise logits
        if len(logits.get_shape().as_list()) == 2:
            ce_loss = tf.nn.softmax_cross_entropy_with_logits(labels=y,
                                                              logits=logits)
            return tf.reduce_mean(ce_loss)
        else:
            print('attack snapshots...')
            # first normalise logits
            logits -= tf.reduce_max(logits, -1, keep_dims=True)
            logits -= tf.log(
                tf.reduce_sum(tf.exp(logits), -1, keep_dims=True) + 1e-9)

            # then compute cross entropy
            ce_loss = -tf.reduce_sum(logits * y, -1)
            ce_loss = tf.reduce_mean(ce_loss, 0)
            return tf.reduce_mean(ce_loss)

    def combine(self, logits):
        # combine logits of shape (K, N, dimY) to shape (N, dimY)
        print('combine the logits from random network snapshots (%s)...' %
              self.combine_logits)
        if self.combine_logits == 'ensemble':
            results = tf.reduce_mean(tf.nn.softmax(logits), 0)  # (N, dimY)
            results = tf.log(tf.clip_by_value(results, 1e-20, np.inf))
        if self.combine_logits == 'bayes':
            logits_max = tf.reduce_max(logits, 0)
            logits_ = logits - logits_max  # (dimY, N)
            results = tf.log(
                tf.clip_by_value(tf.reduce_mean(tf.exp(logits_), 0), 1e-20,
                                 np.inf))
            results += logits_max

        return results

    def attack_single_step(self, x, eta, y):
        """
        Given the original image and the perturbation computed so far, computes
        a new perturbation.

        :param x: A tensor with the original input.
        :param eta: A tensor the same shape as x that holds the perturbation.
        :param y: A tensor with the target labels or ground-truth labels.
        """
        import tensorflow as tf
        from cleverhans.utils_tf import clip_eta

        adv_x = x + eta
        preds = self.model.get_logits(adv_x)  # shape (K, N, dimY)
        loss = self.model_loss(y, preds)  # see Carlini's recipe
        if self.targeted:
            loss = -loss

        # now forms the predicted output
        if len(preds.get_shape().as_list()) == 2:
            logits = preds
        else:
            logits = self.combine(preds)

        # loss to evade marginal detection
        def logsumexp(x):
            x_max = tf.expand_dims(tf.reduce_max(x, 1), 1)
            res = tf.log(
                tf.clip_by_value(tf.reduce_sum(tf.exp(x - x_max), 1), 1e-10,
                                 np.inf))
            return res + x_max[:, 0]

        logpx = logsumexp(logits)
        loss_detect_marginal = -tf.reduce_mean(
            tf.nn.relu(-logpx - self.delta_marginal))

        # loss to evade logit detection
        y_pred = tf.argmax(logits, 1)
        loss_detect_logit = tf.nn.relu(-logits - self.delta_logit)
        loss_detect_logit = -tf.reduce_mean(
            tf.gather(loss_detect_logit, y_pred, axis=1))

        # loss to evade kl detection
        N = logits.get_shape().as_list()[0]
        logits_normalised = logits - tf.expand_dims(logsumexp(logits), 1)
        kl = tf.reduce_sum(
            self.kl_prob_vec *
            (tf.log(self.kl_prob_vec) - tf.expand_dims(logits_normalised, 1)),
            2)
        loss_detect_kl = tf.nn.relu(kl - self.delta_kl)
        loss_detect_kl = -tf.reduce_mean(
            tf.gather(loss_detect_kl, y_pred, axis=1))

        #loss_detect = loss_detect_marginal
        loss_detect = loss_detect_logit
        #loss_detect = loss_detect_kl

        # combine
        print('using lambda_detect = %.2f' % self.detection_lambda)
        loss += self.detection_lambda * loss_detect

        grad, = tf.gradients(loss, adv_x)
        scaled_signed_grad = self.eps_iter * tf.sign(grad)
        adv_x = adv_x + scaled_signed_grad
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
        eta = adv_x - x
        eta = clip_eta(eta, self.ord, self.eps)
        return eta

    def attack(self, x, y):
        """
        This method creates a symbolic graph that given an input image,
        first randomly perturbs the image. The
        perturbation is bounded to an epsilon ball. Then multiple steps of
        gradient descent is performed to increase the probability of a target
        label or decrease the probability of the ground-truth label.

        :param x: A tensor with the input image.
        """
        import tensorflow as tf
        from cleverhans.utils_tf import clip_eta

        if self.rand_init:
            eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)
            eta = clip_eta(eta, self.ord, self.eps)
        else:
            eta = tf.zeros_like(x)

        for i in range(self.nb_iter):
            eta = self.attack_single_step(x, eta, y)

        adv_x = x + eta
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)

        return adv_x
Esempio n. 4
0
def main(_):
    tf.reset_default_graph()

    # Import data
    cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir)
    cifar.preprocess()  # necessary for adversarial attack to work well.

    with tf.variable_scope('inputs'):
        # Create the model
        x = tf.placeholder(
            tf.float32,
            [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels])
        # Define loss and optimizer
        y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes])

    is_training = tf.placeholder(bool, [])

    # Build the graph for the deep net
    with tf.variable_scope('model'):
        y_conv = deepnn(x, is_training)
        model = CallableModelWrapper(lambda i: deepnn(i, is_training),
                                     'logits')

    # Define your loss function - softmax_cross_entropy
    with tf.variable_scope('x_entropy'):
        cross_entropy = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))

    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                               global_step, FLAGS.decay_steps,
                                               FLAGS.decay_rate)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        optimiser = tf.train.AdamOptimizer(
            learning_rate, name='test').minimize(cross_entropy,
                                                 global_step=global_step)

    correct_prediction = tf.cast(
        tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1)), tf.float32)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                              name='accuracy')

    loss_summary = tf.summary.scalar('Loss', cross_entropy)
    acc_summary = tf.summary.scalar('Accuracy', accuracy)

    # saver for checkpoints
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)

    with tf.Session() as sess:
        summary_writer = tf.summary.FileWriter(run_log_dir + '_train',
                                               sess.graph,
                                               flush_secs=5)
        summary_writer_validation = tf.summary.FileWriter(run_log_dir +
                                                          '_validate',
                                                          sess.graph,
                                                          flush_secs=5)

        with tf.variable_scope('model', reuse=True):
            fgsm = FastGradientMethod(model, sess=sess)
            adversarial_x = fgsm.generate(x,
                                          eps=0.05,
                                          clip_min=0.0,
                                          clip_max=1.0)
            adversarial_predictions = model.get_logits(adversarial_x)

        adversarial_correct_prediction = tf.cast(
            tf.equal(tf.argmax(adversarial_predictions, 1), tf.argmax(y_, 1)),
            tf.float32)
        adversarial_accuracy = tf.reduce_mean(tf.cast(
            adversarial_correct_prediction, tf.float32),
                                              name='accuracy')

        sess.run(tf.global_variables_initializer())

        # Training and validation
        for step in range(FLAGS.max_steps):
            # Training: Backpropagation using train set
            (trainImages, trainLabels) = cifar.getTrainBatch()
            (testImages, testLabels) = cifar.getTestBatch()

            _, summary_str = sess.run([optimiser, loss_summary],
                                      feed_dict={
                                          x: trainImages,
                                          y_: trainLabels,
                                          is_training: True
                                      })

            if step % (FLAGS.log_frequency + 1) == 0:
                summary_writer.add_summary(summary_str, step)

            # Validation: Monitoring accuracy using validation set
            if step % FLAGS.log_frequency == 0:
                validation_accuracy, summary_str = sess.run(
                    [accuracy, acc_summary],
                    feed_dict={
                        x: testImages,
                        y_: testLabels,
                        is_training: False
                    })
                print('step %d, test_accuracy on validation batch: %g' %
                      (step, validation_accuracy))
                summary_writer_validation.add_summary(summary_str, step)

            # Save the model checkpoint periodically.
            if step % FLAGS.save_model == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(run_log_dir + '_train',
                                               'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

        # Testing

        # resetting the internal batch indexes
        cifar.reset()
        evaluated_images = 0
        test_accuracy = 0
        adversarial_test_accuracy = 0
        batch_count = 0

        # don't loop back when we reach the end of the test set
        while evaluated_images != cifar.nTestSamples:
            (testImages,
             testLabels) = cifar.getTestBatch(allowSmallerBatches=True)
            test_accuracy_temp = sess.run(accuracy,
                                          feed_dict={
                                              x: testImages,
                                              y_: testLabels,
                                              is_training: False
                                          })
            adversarial_test_accuracy_temp = sess.run(adversarial_accuracy,
                                                      feed_dict={
                                                          x: testImages,
                                                          y_: testLabels,
                                                          is_training: False
                                                      })

            batch_count = batch_count + 1
            test_accuracy = test_accuracy + test_accuracy_temp
            adversarial_test_accuracy = adversarial_test_accuracy + adversarial_test_accuracy_temp
            evaluated_images = evaluated_images + testLabels.shape[0]

        test_accuracy = test_accuracy / batch_count
        print('test_set: %0.3f' % test_accuracy)

        adversarial_test_accuracy = adversarial_test_accuracy / batch_count
        print('adversarial_test_set: %0.3f' % adversarial_test_accuracy)
Esempio n. 5
0
class DeepFool(Attack):
    """
    DeepFool is an untargeted & iterative attack which aims at finding the
    minimum adversarial perturbations in deep networks. The implementation
    here is w.r.t. the L2 norm.
    Paper link: "https://arxiv.org/pdf/1511.04599.pdf"
    """
    def __init__(self, model, back='tf', sess=None):
        """
        Create a DeepFool instance.
        """
        super(DeepFool, self).__init__(model, back, sess)

        if self.back == 'th':
            raise NotImplementedError('Theano version not implemented.')

        import tensorflow as tf
        self.structural_kwargs = [
            'over_shoot', 'max_iter', 'clip_max', 'clip_min', 'nb_candidate',
            'nb_classes'
        ]

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'logits')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param nb_candidate: The number of classes to test against, i.e.,
                            deepfool only consider nb_candidate classes when
                            attacking (thus accelerate speed)
        :param overshoot: A termination criterion to prevent vanishing updates
        :param max_iter: Maximum number of iteration for deepfool
        :param nb_classes: The number of model output classes
        :param clip_min: Minimum component value for clipping
        :param clip_max: Maximum component value for clipping
        """

        import tensorflow as tf

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        assert self.nb_candidate <= self.nb_classes,\
            'nb_candidate should not be greater than nb_classes'

        # Define graph wrt to this input placeholder
        logits = self.model.get_logits(x)
        preds = tf.reshape(
            tf.nn.top_k(logits, k=self.nb_candidate)[0],
            [-1, self.nb_candidate])
        grads = gradient_graph(preds, x, self.nb_candidate)

        # Define graph
        def deepfool_wrap(x_val):
            return deepfool_batch(self.sess, x, preds, logits, grads, x_val,
                                  self.nb_candidate, self.overshoot,
                                  self.max_iter, self.clip_min, self.clip_max,
                                  self.nb_classes)

        return tf.py_func(deepfool_wrap, [x], tf.float32)

    def parse_params(self,
                     nb_candidate=10,
                     overshoot=0.02,
                     max_iter=50,
                     nb_classes=1001,
                     clip_min=0.,
                     clip_max=1.,
                     **kwargs):
        """
        :param nb_candidate: The number of classes to test against, i.e.,
                            deepfool only consider nb_candidate classes when
                            attacking (thus accelerate speed)
        :param overshoot: A termination criterion to prevent vanishing updates
        :param max_iter: Maximum number of iteration for deepfool
        :param nb_classes: The number of model output classes
        :param clip_min: Minimum component value for clipping
        :param clip_max: Maximum component value for clipping
        """
        self.nb_candidate = nb_candidate
        self.overshoot = overshoot
        self.max_iter = max_iter
        self.nb_classes = nb_classes
        self.clip_min = clip_min
        self.clip_max = clip_max

        return True
class KKTFun5(Attack):
    def __init__(self, model, sess=None, dtypestr= 'float32', **kwargs):
        super(KKTFun5, self).__init__(model, sess, dtypestr, **kwargs)

        self.feedable_kwargs = ('alp','eps','y','y_target','clip_min','clip_max')
        self.structural_kwargs = ['ord','nb_iter']

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')

    def generate(self, x, y, **kwargs):
        self.parse_params(**kwargs)
        flag_cross = tf.cast(tf.zeros(x.shape[0],), tf.bool)

        def cond(i, adv_x, best_x, flag_cross, log_step, log_suc):
            return tf.less(i,self.nb_iter)

        def body(i, adv_x, best_x, flag_cross, log_step, log_suc):
            targeted = (self.y_target is not None)
            logits = self.model.get_logits(adv_x)
            y_hot = tf.one_hot(y,logits.shape[1])
            loss = softmax_cross_entropy_better(logits, y_hot)
            grad, = tf.gradients(loss,adv_x)
            g = - grad

            d = x - adv_x
            snd = tf.tile(norm_l2(d),(1,d.shape[1],d.shape[2],d.shape[3]))
            sng = tf.tile(norm_l2(g),(1,g.shape[1],g.shape[2],g.shape[3]))
            nd = tf.div(d,snd)
            ng = tf.div(g,sng)
            tan_psi, sin_psi = psi_old(ng,nd)

            # cosine decay
            self.eps_all = teddy_decay(i, tf.ones(x.shape[0])*self.nb_iter, self.eps)
            epsi = tf.tile(tf.reshape(self.eps_all,(d.shape[0],1,1,1)),(1,d.shape[1],d.shape[2],d.shape[3]))
            alpe = tf.multiply(self.alp*tf.ones_like(epsi),epsi)

            # search
            p_search = tf.multiply(ng,alpe)

            # refine step
            # out
            g_ort = out_direction(d,g,ng,sng)
            epsi_out = tf.multiply(snd,epsi)
            beta = estimate_beta_out_LS(d,g_ort,tf.multiply(snd,epsi),7,self.levels)
            p_out = out_p(d,g,ng,snd,sng,sin_psi,beta,g_ort)
            beta,nor = estimate_beta_in_simple(d,g,snd,tf.div(snd,epsi))
            p_in = tf.multiply(ng,beta)

            flag,pred_l1 = is_adversarial(logits, y)

            flag_cross =tf.logical_or(flag_cross, flag)
            delta = tf.where(flag_cross, p_in, p_search)
            delta = tf.where(flag, p_out, delta)

            adv_x = tf.clip_by_value(adv_x + delta, self.clip_min, self.clip_max)
            # Quantization
            adv_x = quantization(adv_x, self.levels)
            logits = self.model.get_logits(adv_x)
            flag, pred_l = is_adversarial(logits,y)

            # save best
            a = norm_l2(best_x -x)
            b = norm_l2(adv_x -x)

            flag_save = tf.reshape(tf.greater(a,b),(x.shape[0],))
            nm_best_x = tf.where(flag_save, adv_x, best_x)
            best_x = tf.where(flag, nm_best_x, best_x)

            # save log
            tmp = tf.one_hot(tf.cast(i, tf.int32),self.nb_iter)
            log_step = log_step + tf.reshape(norm_l2(adv_x-x),(delta.shape[0],1))*tmp
            log_suc = log_suc + tf.reshape(tf.cast(loss, tf.float32),(flag.shape[0],1))*tmp

            return i+1, adv_x, best_x, flag_cross, log_step, log_suc

        adv_x = x
        best_x = tf.ones_like(x)
        log_step = tf.zeros([x.shape[0],self.nb_iter],tf.float32)
        log_suc = tf.zeros([x.shape[0],self.nb_iter],tf.float32)
        _, adv_x, best_x, flag_cross, log_step,log_suc = tf.while_loop(cond, body, [tf.zeros([]), adv_x,
            best_x, flag_cross, log_step, log_suc])
        return best_x, log_step, log_suc

    def parse_params(self, eps=0.3, alp=0.4, nb_iter=10, y=None, y_target=None, clip_min=None,
            clip_max=None, ord=2, levels = 256, **kwargs):

        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.
        Attack-specific parameters:
        :param eps: (optional float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (optional float) step size for each attack iteration
        :param nb_iter: (optional int) Number of attack iterations.
        :param y: (optional) A tensor with the true labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param decay_factor: (optional) Decay factor for the momentum term.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """

        # Save attack-specific parameters
        self.nb_iter = nb_iter
        self.eps = eps
        self.alp = alp
        self.y = y
        self.y_target = y_target
        self.ord = ord
        self.clip_min = clip_min
        self.clip_max = clip_max
        self.levels = levels

        if self.y is not None and self.y_target is not None:
          raise ValueError("Must not set both y and y_target")
        # Check if order of the norm is acceptable given current implementation
        if self.ord not in [np.inf, 1, 2]:
          raise ValueError("Norm order must be either np.inf, 1, or 2.")

        return True