コード例 #1
0
class EnsembleRNMethod(Attack):
    def __init__(self, model_list, back='tf', sess=None):
        """
        Create a EnsembleRNMethod instance.
        """
        super(EnsembleRNMethod, self).__init__(model_list, back, sess)
        self.feedable_kwargs = {
            'eps': np.float32,
            'y': np.float32,
            'y_target': np.float32,
            'clip_min': np.float32,
            'clip_max': np.float32
        }
        self.structural_kwargs = ['ord']
        """
        if isinstance(self.model, list):
            print("self.model is list")
            self.model = ModelListWrapper(self.model)
        elif not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')
        """
        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param eps: (optional float) attack step size (input variation)
        :param ord: (optional) Order of the norm (mimics NumPy).
                    Possible values: np.inf, 1 or 2.
        :param y: (optional) A tensor with the model labels. Only provide
                  this parameter if you'd like to use true labels when crafting
                  adversarial samples. Otherwise, model predictions are used as
                  labels to avoid the "label leaking" effect (explained in this
                  paper: https://arxiv.org/abs/1611.01236). Default is None.
                  Labels should be one-hot-encoded.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        # Parse and save attack-specific parameters

        return self.model.get_probs(x)
コード例 #2
0
class MultiModelIterativeMethod(MultipleModelAttack):
    """
    The Basic Iterative Method (Kurakin et al. 2016). The original paper used
    hard labels for this attack; no label smoothing.
    """
    def __init__(self, models, back='tf', sess=None):
        """
        Create a BasicIterativeMethod instance.
        """
        super(MultiModelIterativeMethod, self).__init__(models, back, sess)
        self.feedable_kwargs = {
            'eps': np.float32,
            'eps_iter': np.float32,
            'y': np.float32,
            'clip_min': np.float32,
            'clip_max': np.float32
        }
        self.structural_kwargs = ['ord', 'nb_iter']

        if not isinstance(self.model1, Model):
            self.model1 = CallableModelWrapper(self.model1, 'probs')

        if not isinstance(self.model2, Model):
            self.model2 = CallableModelWrapper(self.model2, 'probs')

        if not isinstance(self.model3, Model):
            self.model3 = CallableModelWrapper(self.model3, 'probs')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (required) A tensor with the model labels.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        import tensorflow as tf

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        # Initialize loop variables
        eta = 0

        # Fix labels to the first model predictions for loss computation
        # model_preds1 = self.model1.get_probs(x)
        # model_preds2 = self.model2.get_probs(x)
        model_preds3 = self.model3.get_probs(x)
        model_preds = model_preds3

        preds_max = tf.reduce_max(model_preds, 1, keep_dims=True)
        y = tf.to_float(tf.equal(model_preds, preds_max))
        fgsm_params = {'eps': self.eps_iter, 'y': y, 'ord': self.ord}

        for i in range(self.nb_iter):

            FGSM1 = FastGradientMethod(self.model1,
                                       back=self.back,
                                       sess=self.sess)
            FGSM2 = FastGradientMethod(self.model2,
                                       back=self.back,
                                       sess=self.sess)
            FGSM3 = FastGradientMethod(self.model3,
                                       back=self.back,
                                       sess=self.sess)

            # Compute this step's perturbation
            eta1 = FGSM1.generate(x + eta, **fgsm_params) - x
            eta2 = FGSM2.generate(x + eta, **fgsm_params) - x
            eta3 = FGSM3.generate(x + eta, **fgsm_params) - x
            eta = eta1 * 0.333 + eta2 * 0.333 + eta3 * 0.333

            # Clipping perturbation eta to self.ord norm ball
            if self.ord == np.inf:
                eta = tf.clip_by_value(eta, -self.eps, self.eps)
            elif self.ord in [1, 2]:
                reduc_ind = list(xrange(1, len(eta.get_shape())))
                if self.ord == 1:
                    norm = tf.reduce_sum(tf.abs(eta),
                                         reduction_indices=reduc_ind,
                                         keep_dims=True)
                elif self.ord == 2:
                    norm = tf.sqrt(
                        tf.reduce_sum(tf.square(eta),
                                      reduction_indices=reduc_ind,
                                      keep_dims=True))
                eta = eta * self.eps / norm

        # Define adversarial example (and clip if necessary)
        adv_x = x + eta
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)

        return adv_x

    def parse_params(self,
                     eps=0.3,
                     eps_iter=0.05,
                     nb_iter=10,
                     y=None,
                     ord=np.inf,
                     clip_min=None,
                     clip_max=None,
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (required) A tensor with the model labels.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """

        # Save attack-specific parameters
        self.eps = eps
        self.eps_iter = eps_iter
        self.nb_iter = nb_iter
        self.y = y
        self.ord = ord
        self.clip_min = clip_min
        self.clip_max = clip_max

        # Check if order of the norm is acceptable given current implementation
        if self.ord not in [np.inf, 1, 2]:
            raise ValueError("Norm order must be either np.inf, 1, or 2.")
        if self.back == 'th':
            error_string = "BasicIterativeMethod is not implemented in Theano"
            raise NotImplementedError(error_string)

        return True
コード例 #3
0
class FastGradientMethod(Attack):
    """
    This attack was originally implemented by Goodfellow et al. (2015) with the
    infinity norm (and is known as the "Fast Gradient Sign Method"). This
    implementation extends the attack to other norms, and is therefore called
    the Fast Gradient Method.
    Paper link: https://arxiv.org/abs/1412.6572
    """
    def __init__(self, model, back='tf', sess=None):
        """
        Create a FastGradientMethod instance.
        """
        super(FastGradientMethod, self).__init__(model, back, sess)
        self.feedable_kwargs = {
            'eps': np.float32,
            'y': np.float32,
            'clip_min': np.float32,
            'clip_max': np.float32
        }
        self.structural_kwargs = ['ord']

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param eps: (optional float) attack step size (input variation)
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param y: (optional) A tensor with the model labels. Only provide
                  this parameter if you'd like to use true labels when crafting
                  adversarial samples. Otherwise, model predictions are used as
                  labels to avoid the "label leaking" effect (explained in this
                  paper: https://arxiv.org/abs/1611.01236). Default is None.
                  Labels should be one-hot-encoded.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        if self.back == 'tf':
            from .attacks_tf import fgm
        else:
            from .attacks_th import fgm

        return fgm(x,
                   self.model.get_probs(x),
                   y=self.y,
                   eps=self.eps,
                   ord=self.ord,
                   clip_min=self.clip_min,
                   clip_max=self.clip_max)

    def parse_params(self,
                     eps=0.3,
                     ord=np.inf,
                     y=None,
                     clip_min=None,
                     clip_max=None,
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param eps: (optional float) attack step size (input variation)
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param y: (optional) A tensor with the model labels. Only provide
                  this parameter if you'd like to use true labels when crafting
                  adversarial samples. Otherwise, model predictions are used as
                  labels to avoid the "label leaking" effect (explained in this
                  paper: https://arxiv.org/abs/1611.01236). Default is None.
                  Labels should be one-hot-encoded.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        # Save attack-specific parameters
        self.eps = eps
        self.ord = ord
        self.y = y
        self.clip_min = clip_min
        self.clip_max = clip_max

        # Check if order of the norm is acceptable given current implementation
        if self.ord not in [np.inf, int(1), int(2)]:
            raise ValueError("Norm order must be either np.inf, 1, or 2.")
        if self.back == 'th' and self.ord != np.inf:
            raise NotImplementedError("The only FastGradientMethod norm "
                                      "implemented for Theano is np.inf.")
        return True
コード例 #4
0
class MadryEtAl_WithRestarts(Attack):
    """
    The Projected Gradient Descent Attack (Madry et al. 2017).
    Paper link: https://arxiv.org/pdf/1706.06083.pdf
    """
    def __init__(self, model, back='tf', sess=None):
        """
        Create a MadryEtAl instance.
        """
        super(MadryEtAl_WithRestarts, self).__init__(model, back, sess)
        self.feedable_kwargs = {
            'eps': np.float32,
            'eps_iter': np.float32,
            'y': np.float32,
            'y_target': np.float32,
            'clip_min': np.float32,
            'clip_max': np.float32,
            'nb_restarts': np.float32
        }
        self.structural_kwargs = ['ord', 'nb_iter', 'rand_init']

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (optional) A tensor with the model labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        :param rand_init: (optional bool) If True, an initial random
                    perturbation is added.
        """

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        labels, nb_classes = self.get_or_guess_labels(x, kwargs)
        self.targeted = self.y_target is not None

        print("targeted?", self.targeted)

        # Initialize loop variables
        adv_x = self.attack(x, labels)

        return adv_x

    def parse_params(self,
                     eps=0.3,
                     eps_iter=0.01,
                     nb_iter=40,
                     y=None,
                     ord=np.inf,
                     clip_min=None,
                     clip_max=None,
                     y_target=None,
                     rand_init=True,
                     nb_restarts=1,
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (optional) A tensor with the model labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        :param rand_init: (optional bool) If True, an initial random
                    perturbation is added.
        """

        # Save attack-specific parameters
        self.eps = eps
        self.eps_iter = eps_iter
        self.nb_iter = nb_iter
        self.y = y
        self.y_target = y_target
        self.ord = ord
        self.clip_min = clip_min
        self.clip_max = clip_max
        self.rand_init = rand_init
        self.nb_restarts = nb_restarts

        if self.y is not None and self.y_target is not None:
            raise ValueError("Must not set both y and y_target")
        # Check if order of the norm is acceptable given current implementation
        if self.ord not in [np.inf, 1, 2]:
            raise ValueError("Norm order must be either np.inf, 1, or 2.")

        return True

    def attack_single_step(self, x, eta, y):
        """
        Given the original image and the perturbation computed so far, computes
        a new perturbation.

        :param x: A tensor with the original input.
        :param eta: A tensor the same shape as x that holds the perturbation.
        :param y: A tensor with the target labels or ground-truth labels.
        """
        import tensorflow as tf
        from cleverhans.utils_tf import model_loss, clip_eta

        adv_x = x + eta
        preds = self.model.get_probs(adv_x)
        loss = model_loss(y, preds)
        loss_vector = model_loss(y, preds, mean=False)
        if self.targeted:
            loss = -loss
        grad, = tf.gradients(loss, adv_x)
        scaled_signed_grad = self.eps_iter * tf.sign(grad)
        adv_x = adv_x + scaled_signed_grad
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
        eta = adv_x - x
        eta = clip_eta(eta, self.ord, self.eps)
        return eta, loss, loss_vector

    def attack(self, x, y):
        """
        This method creates a symbolic graph that given an input image,
        first randomly perturbs the image. The
        perturbation is bounded to an epsilon ball. Then multiple steps of
        gradient descent is performed to increase the probability of a target
        label or decrease the probability of the ground-truth label.

        :param x: A tensor with the input image.
        """
        import tensorflow as tf
        from cleverhans.utils_tf import clip_eta

        best_loss = None
        best_eta = None

        print("Number of steps running", self.nb_restarts + 1)

        for restart_step in range(0, self.nb_restarts + 1):
            if self.rand_init:
                eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)
                eta = clip_eta(eta, self.ord, self.eps)
            else:
                eta = tf.zeros_like(x)
            #eta = tf.Print(eta, [eta[0:2,0:3],restart_step], "Clipped Eta drawn on this step")

            for i in range(self.nb_iter):
                eta, loss, loss_vec = self.attack_single_step(x, eta, y)

            if best_loss == None:
                #print("first time in loop")
                best_loss = loss_vec
                best_eta = eta
            else:
                #print("second time in loop")
                switch_cond = tf.less(best_loss, loss_vec)
                new_best_loss = tf.where(switch_cond, loss_vec * 1.0,
                                         best_loss * 1.0)
                new_best_eta = tf.where(switch_cond, eta * 1.0, best_eta * 1.0)
                #best_loss = tf.Print(best_loss, [best_loss[0:10], restart_step], "This is the best loss")
                #best_eta = tf.Print(best_eta, [best_loss[0:5],loss_vec[0:5],new_best_loss[0:5],best_eta[0:3,0,0,0],eta[0:3,0,0,0],new_best_eta[0:3,0,0,0],tf.shape(eta),restart_step], "Best_Loss, Loss_vec, New_Best_Loss, Best_eta,Eta_Curr, New_Best_Eta, Eta_Shape")
                best_loss = new_best_loss * 1.0
                best_eta = new_best_eta * 1.0

        adv_x = x + best_eta
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)

        return adv_x
コード例 #5
0
class SaliencyMapMethod(Attack):
    """
    The Jacobian-based Saliency Map Method (Papernot et al. 2016).
    Paper link: https://arxiv.org/pdf/1511.07528.pdf
    """
    def __init__(self, model, back='tf', sess=None):
        """
        Create a SaliencyMapMethod instance.
        Note: the model parameter should be an instance of the
        cleverhans.model.Model abstraction provided by CleverHans.
        """
        super(SaliencyMapMethod, self).__init__(model, back, sess)

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')

        if self.back == 'th':
            error = "Theano version of SaliencyMapMethod not implemented."
            raise NotImplementedError(error)

        import tensorflow as tf
        self.feedable_kwargs = {'y_target': tf.float32}
        self.structural_kwargs = ['theta', 'gamma', 'clip_max', 'clip_min']

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param theta: (optional float) Perturbation introduced to modified
                      components (can be positive or negative)
        :param gamma: (optional float) Maximum percentage of perturbed features
        :param clip_min: (optional float) Minimum component value for clipping
        :param clip_max: (optional float) Maximum component value for clipping
        :param y_target: (optional) Target tensor if the attack is targeted
        """
        import tensorflow as tf
        from .attacks_tf import jacobian_graph, jsma_batch

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        # Define Jacobian graph wrt to this input placeholder
        preds = self.model.get_probs(x)
        nb_classes = preds.get_shape().as_list()[-1]
        grads = jacobian_graph(preds, x, nb_classes)

        # Define appropriate graph (targeted / random target labels)
        if self.y_target is not None:

            def jsma_wrap(x_val, y_target):
                return jsma_batch(self.sess,
                                  x,
                                  preds,
                                  grads,
                                  x_val,
                                  self.theta,
                                  self.gamma,
                                  self.clip_min,
                                  self.clip_max,
                                  nb_classes,
                                  y_target=y_target)

            # Attack is targeted, target placeholder will need to be fed
            wrap = tf.py_func(jsma_wrap, [x, self.y_target], tf.float32)
        else:

            def jsma_wrap(x_val):
                return jsma_batch(self.sess,
                                  x,
                                  preds,
                                  grads,
                                  x_val,
                                  self.theta,
                                  self.gamma,
                                  self.clip_min,
                                  self.clip_max,
                                  nb_classes,
                                  y_target=None)

            # Attack is untargeted, target values will be chosen at random
            wrap = tf.py_func(jsma_wrap, [x], tf.float32)

        return wrap

    def parse_params(self,
                     theta=1.,
                     gamma=np.inf,
                     nb_classes=None,
                     clip_min=0.,
                     clip_max=1.,
                     y_target=None,
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param theta: (optional float) Perturbation introduced to modified
                      components (can be positive or negative)
        :param gamma: (optional float) Maximum percentage of perturbed features
        :param nb_classes: (optional int) Number of model output classes
        :param clip_min: (optional float) Minimum component value for clipping
        :param clip_max: (optional float) Maximum component value for clipping
        :param y_target: (optional) Target tensor if the attack is targeted
        """

        if nb_classes is not None:
            warnings.warn("The nb_classes argument is depricated and will "
                          "be removed on 2018-02-11")
        self.theta = theta
        self.gamma = gamma
        self.clip_min = clip_min
        self.clip_max = clip_max
        self.y_target = y_target

        return True
コード例 #6
0
class BasicIterativeMethod(Attack):
    """
    The Basic Iterative Method (Kurakin et al. 2016). The original paper used
    hard labels for this attack; no label smoothing.
    Paper link: https://arxiv.org/pdf/1607.02533.pdf
    """
    def __init__(self, model, back='tf', sess=None):
        """
        Create a BasicIterativeMethod instance.
        Note: the model parameter should be an instance of the
        cleverhans.model.Model abstraction provided by CleverHans.
        """
        super(BasicIterativeMethod, self).__init__(model, back, sess)
        self.feedable_kwargs = {
            'eps': np.float32,
            'eps_iter': np.float32,
            'y': np.float32,
            'y_target': np.float32,
            'clip_min': np.float32,
            'clip_max': np.float32
        }
        self.structural_kwargs = ['ord', 'nb_iter']

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (optional) A tensor with the model labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        import tensorflow as tf

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        # Initialize loop variables
        eta = 0

        # Fix labels to the first model predictions for loss computation
        model_preds = self.model.get_probs(x)
        preds_max = tf.reduce_max(model_preds, 1, keep_dims=True)
        if self.y_target is not None:
            y = self.y_target
            targeted = True
        elif self.y is not None:
            y = self.y
            targeted = False
        else:
            y = tf.to_float(tf.equal(model_preds, preds_max))
            y = tf.stop_gradient(y)
            targeted = False

        y_kwarg = 'y_target' if targeted else 'y'
        fgm_params = {
            'eps': self.eps_iter,
            y_kwarg: y,
            'ord': self.ord,
            'clip_min': self.clip_min,
            'clip_max': self.clip_max
        }

        for i in range(self.nb_iter):
            FGM = FastGradientMethod(self.model,
                                     back=self.back,
                                     sess=self.sess)
            # Compute this step's perturbation
            eta = FGM.generate(x + eta, **fgm_params) - x

            # Clipping perturbation eta to self.ord norm ball
            if self.ord == np.inf:
                eta = tf.clip_by_value(eta, -self.eps, self.eps)
            elif self.ord in [1, 2]:
                reduc_ind = list(xrange(1, len(eta.get_shape())))
                if self.ord == 1:
                    norm = tf.reduce_sum(tf.abs(eta),
                                         reduction_indices=reduc_ind,
                                         keep_dims=True)
                elif self.ord == 2:
                    norm = tf.sqrt(
                        tf.reduce_sum(tf.square(eta),
                                      reduction_indices=reduc_ind,
                                      keep_dims=True))
                eta = eta * self.eps / norm

        # Define adversarial example (and clip if necessary)
        adv_x = x + eta
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)

        return adv_x

    def parse_params(self,
                     eps=0.3,
                     eps_iter=0.05,
                     nb_iter=10,
                     y=None,
                     ord=np.inf,
                     clip_min=None,
                     clip_max=None,
                     y_target=None,
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (optional) A tensor with the model labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """

        # Save attack-specific parameters
        self.eps = eps
        self.eps_iter = eps_iter
        self.nb_iter = nb_iter
        self.y = y
        self.y_target = y_target
        self.ord = ord
        self.clip_min = clip_min
        self.clip_max = clip_max

        if self.y is not None and self.y_target is not None:
            raise ValueError("Must not set both y and y_target")
        # Check if order of the norm is acceptable given current implementation
        if self.ord not in [np.inf, 1, 2]:
            raise ValueError("Norm order must be either np.inf, 1, or 2.")
        if self.back == 'th':
            error_string = "BasicIterativeMethod is not implemented in Theano"
            raise NotImplementedError(error_string)

        return True
コード例 #7
0
class MadryEtAl(Attack):
    """
    The Projected Gradient Descent Attack (Madry et al. 2016).
    Paper link: https://arxiv.org/pdf/1706.06083.pdf
    """
    def __init__(self, model, back='tf', sess=None):
        """
        Create a MadryEtAl instance.
        """
        super(MadryEtAl, self).__init__(model, back, sess)
        self.feedable_kwargs = {
            'eps': np.float32,
            'eps_iter': np.float32,
            'y': np.float32,
            'y_target': np.float32,
            'clip_min': np.float32,
            'clip_max': np.float32
        }
        self.structural_kwargs = ['ord', 'nb_iter']

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')

    def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.
        :param x: The model's symbolic inputs.
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (optional) A tensor with the model labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        labels, nb_classes = self.get_or_guess_labels(x, kwargs)
        self.targeted = self.y_target is not None

        # Initialize loop variables
        adv_x = self.attack(x)

        return adv_x

    def parse_params(self,
                     eps=0.3,
                     eps_iter=0.01,
                     nb_iter=40,
                     y=None,
                     ord=np.inf,
                     clip_min=None,
                     clip_max=None,
                     y_target=None,
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param y: (optional) A tensor with the model labels.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """

        # Save attack-specific parameters
        self.eps = eps
        self.eps_iter = eps_iter
        self.nb_iter = nb_iter
        self.y = y
        self.y_target = y_target
        self.ord = ord
        self.clip_min = clip_min
        self.clip_max = clip_max

        if self.y is not None and self.y_target is not None:
            raise ValueError("Must not set both y and y_target")
        # Check if order of the norm is acceptable given current implementation
        if self.ord not in [np.inf, 1, 2]:
            raise ValueError("Norm order must be either np.inf, 1, or 2.")
        if self.back == 'th':
            error_string = ("ProjectedGradientDescentMethod is"
                            " not implemented in Theano")
            raise NotImplementedError(error_string)

        return True

    def attack_single_step(self, x, eta, y):
        """
        Given the original image and the perturbation computed so far, computes
        a new perturbation.

        :param x: A tensor with the original input.
        :param eta: A tensor the same shape as x that holds the perturbation.
        :param y: A tensor with the target labels or ground-truth labels.
        """
        import tensorflow as tf
        from utils_tf import model_loss, clip_eta

        adv_x = x + eta
        preds = self.model.get_probs(adv_x)
        loss = model_loss(y, preds)
        if self.targeted:
            loss = -loss
        grad, = tf.gradients(loss, adv_x)
        scaled_signed_grad = self.eps_iter * tf.sign(grad)
        adv_x = adv_x + scaled_signed_grad
        adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
        eta = adv_x - x
        eta = clip_eta(eta, self.ord, self.eps)
        return x, eta

    def attack(self, x, **kwargs):
        """
        This method creates a symbolic graph that given an input image,
        first randomly perturbs the image. The
        perturbation is bounded to an epsilon ball. Then multiple steps of
        gradient descent is performed to increase the probability of a target
        label or decrease the probability of the ground-truth label.

        :param x: A tensor with the input image.
        """
        import tensorflow as tf
        from utils_tf import clip_eta

        eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)
        eta = clip_eta(eta, self.ord, self.eps)

        if self.y is not None:
            y = self.y
        else:
            preds = self.model.get_probs(x)
            preds_max = tf.reduce_max(preds, 1, keep_dims=True)
            y = tf.to_float(tf.equal(preds, preds_max))
            y = y / tf.reduce_sum(y, 1, keep_dims=True)
        y = tf.stop_gradient(y)

        for i in range(self.nb_iter):
            x, eta = self.attack_single_step(x, eta, y)

        adv_x = x + eta
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)

        return adv_x
コード例 #8
0
class SaliencyMapMethod(Attack):
    """
    The Jacobian-based Saliency Map Method (Papernot et al. 2016).
    Paper link: https://arxiv.org/pdf/1511.07528.pdf
    """

    def __init__(self, model, back='tf', sess=None):
        """
        Create a SaliencyMapMethod instance.
        """
        super(SaliencyMapMethod, self).__init__(model, back, sess)

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'probs')

        if self.back == 'th':
            error = "Theano version of SaliencyMapMethod not implemented."
            raise NotImplementedError(error)

        import tensorflow as tf
        self.feedable_kwargs = {'targets': tf.float32}
        self.structural_kwargs = ['theta', 'gamma', 'nb_classes',
                                  'clip_max', 'clip_min']

    def generate(self, x, **kwargs):
        """
        Attack-specific parameters:
        """
        import tensorflow as tf
        from .attacks_tf import jacobian_graph, jsma_batch

        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        # Define Jacobian graph wrt to this input placeholder
        preds = self.model.get_probs(x)
        grads = jacobian_graph(preds, x, self.nb_classes)

        # Define appropriate graph (targeted / random target labels)
        if self.targets is not None:
            def jsma_wrap(x_val, targets):
                return jsma_batch(self.sess, x, preds, grads, x_val,
                                  self.theta, self.gamma, self.clip_min,
                                  self.clip_max, self.nb_classes,
                                  targets=targets)

            # Attack is targeted, target placeholder will need to be fed
            wrap = tf.py_func(jsma_wrap, [x, self.targets], tf.float32)
        else:
            def jsma_wrap(x_val):
                return jsma_batch(self.sess, x, preds, grads, x_val,
                                  self.theta, self.gamma, self.clip_min,
                                  self.clip_max, self.nb_classes,
                                  targets=None)

            # Attack is untargeted, target values will be chosen at random
            wrap = tf.py_func(jsma_wrap, [x], tf.float32)

        return wrap

    def parse_params(self, theta=1., gamma=np.inf, nb_classes=10, clip_min=0.,
                     clip_max=1., targets=None, **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:
        :param theta: (optional float) Perturbation introduced to modified
                      components (can be positive or negative)
        :param gamma: (optional float) Maximum percentage of perturbed features
        :param nb_classes: (optional int) Number of model output classes
        :param clip_min: (optional float) Minimum component value for clipping
        :param clip_max: (optional float) Maximum component value for clipping
        :param targets: (optional) Target placeholder if the attack is targeted
        """

        self.theta = theta
        self.gamma = gamma
        self.nb_classes = nb_classes
        self.clip_min = clip_min
        self.clip_max = clip_max
        self.targets = targets

        return True
コード例 #9
0
def attack(eps=FLAGS.epsilon):
    X_train, valid_set, X_test, Y_train, valid_targets, Y_test = dataset_gen()
    report = AccuracyReport()
    config_args = {}
    sess = tf.Session(config=tf.ConfigProto(**config_args))
    # print(train_set[0:10])
    # print(train_targets[0:10])
    #
    # model_dir = os.path.join(FLAGS.work_dir, FLAGS.model_version)
    # my_classifier = tf.estimator.Estimator(
    #     model_fn=basic_dnn, model_dir=model_dir
    # )
    #
    # tensors_to_log = {"probabilities": "sortie",
    #                   "entrypoints": "inputs",
    #                   "outputter": "outputter",
    #                   "Logic_GDS": "Output_Logic_Gradient"}
    #
    # logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
    #                                           every_n_iter=500)
    #
    # train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": train_set},
    #                                                     y=train_targets,
    #                                                     batch_size=1,
    #                                                     num_epochs=300,
    #                                                     shuffle=False)
    #
    # my_classifier.train(input_fn=train_input_fn,
    #                     steps=FLAGS.training_iterations,
    #                     hooks=[logging_hook])
    #
    # eval_input_fn = tf.estimator.inputs.numpy_input_fn(
    #     x={"x": test_set},
    #     y=test_targets,
    #     num_epochs=1,
    #     shuffle=False,
    #
    # )
    #
    # eval_results = my_classifier.evaluate(input_fn=eval_input_fn)
    # print(eval_results)
    #
    # # exporting the model
    # # def predNN(x):
    # #     pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=x)
    # #     prd = my_classifier.predict(pred_input_fn)
    #
    # print("Estimator directory is : %s !" % model_dir)
    
    # Now we plan the attack ! Create the attacked white-box model using CleverHans and the class below extanding it
    attacked_model = CallableModelWrapper(attack_dnn, 'logits')
    x = tf.placeholder(tf.float32, shape=(1, 2))
    y = tf.placeholder(tf.float32, shape=(1, 2))
    fgsm_params = {'eps': eps,
                   'clip_min': 0.,
                   'clip_max': 3.}
    train_params = {
        'nb_epochs': 10,
        'batch_size': 1,
        'learning_rate': 0.02
    }
    
    preds = attacked_model.get_probs(x)
    fgsm = FastGradientMethod(attacked_model)
    adv_x = fgsm.generate(x, **fgsm_params)
    preds_adv = attacked_model.get_probs(adv_x)
    eval_params = {'batch_size': 1}
    
    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test
        # examples
        
        acc = model_eval(
            sess, x, y, preds, X_test, Y_test, args=eval_params)
        report.clean_train_clean_eval = acc
        print('Test accuracy on legitimate examples (training) : %0.4f' % acc)
    global mode_setter
    mode_setter = tf.estimator.ModeKeys.TRAIN
    model_train(sess, x, y, preds, X_train, Y_train, evaluate=evaluate,
                args=train_params)
    mode_setter = tf.estimator.ModeKeys.EVAL
    acc = model_eval(
        sess, x, y, preds, X_test, Y_test, args=eval_params)
    print('Test accuracy on legitimate examples (test) : %0.4f' % acc)
    print("Precision on Adversarial Examples below.")
    eval_par = {'batch_size': 1}
    acc = model_eval(sess=tf.get_default_session(), x=x, y=y, predictions=preds_adv,
                     X_test=X_test, Y_test=Y_test, args=eval_par)
    print('Test accuracy on adversarial examples: %0.4f\n' % acc)
コード例 #10
0
ファイル: attacks.py プロジェクト: xiaottang2/cleverhans
class CarliniWagnerL2(Attack):
    """
    This attack was originally proposed by Carlini and Wagner. It is an
    iterative attack that finds adversarial examples on many defenses that
    are robust to other attacks.
    Paper link: https://arxiv.org/abs/1608.04644

    At a high level, this attack is an iterative attack using Adam and
    a specially-chosen loss function to find adversarial examples with
    lower distortion than other attacks. This comes at the cost of speed,
    as this attack is often much slower than others.
    """
    def __init__(self, model, back='tf', sess=None):
        super(CarliniWagnerL2, self).__init__(model, back, sess)

        if self.back == 'th':
            raise NotImplementedError('Theano version not implemented.')

        import tensorflow as tf
        self.feedable_kwargs = {'y': tf.float32, 'y_target': tf.float32}

        self.structural_kwargs = [
            'nb_classes', 'batch_size', 'confidence', 'targeted',
            'learning_rate', 'binary_search_steps', 'max_iterations',
            'abort_early', 'initial_const', 'clip_min', 'clip_max'
        ]

        if not isinstance(self.model, Model):
            self.model = CallableModelWrapper(self.model, 'logits')

    def generate(self, x, **kwargs):
        """
        Return a tensor that constructs adversarial examples for the given
        input. Generate uses tf.py_func in order to operate over tensors.

        :param x: (required) A tensor with the inputs.
        :param y: (optional) A tensor with the true labels for an untargeted
                  attack. If None (and y_target is None) then use the
                  original labels the classifier assigns.
        :param y_target: (optional) A tensor with the target labels for a
                  targeted attack.
        :param nb_classes: The number of classes the model has.
        :param confidence: Confidence of adversarial examples: higher produces
                           examples with larger l2 distortion, but more
                           strongly classified as adversarial.
        :param batch_size: Number of attacks to run simultaneously.
        :param learning_rate: The learning rate for the attack algorithm.
                              Smaller values produce better results but are
                              slower to converge.
        :param binary_search_steps: The number of times we perform binary
                                    search to find the optimal tradeoff-
                                    constant between norm of the purturbation
                                    and confidence of the classification.
        :param max_iterations: The maximum number of iterations. Setting this
                               to a larger value will produce lower distortion
                               results. Using only a few iterations requires
                               a larger learning rate, and will produce larger
                               distortion results.
        :param abort_early: If true, allows early aborts if gradient descent
                            is unable to make progress (i.e., gets stuck in
                            a local minimum).
        :param initial_const: The initial tradeoff-constant to use to tune the
                              relative importance of size of the pururbation
                              and confidence of classification.
                              If binary_search_steps is large, the initial
                              constant is not important. A smaller value of
                              this constant gives lower distortion results.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        import tensorflow as tf
        from .attacks_tf import CarliniWagnerL2 as CWL2
        self.parse_params(**kwargs)

        attack = CWL2(self.sess, self.model, self.batch_size, self.confidence,
                      'y_target' in kwargs, self.learning_rate,
                      self.binary_search_steps, self.max_iterations,
                      self.abort_early, self.initial_const, self.clip_min,
                      self.clip_max, self.nb_classes,
                      x.get_shape().as_list()[1:])

        if 'y' in kwargs and 'y_target' in kwargs:
            raise ValueError("Can not set both 'y' and 'y_target'.")
        elif 'y' in kwargs:
            labels = kwargs['y']
        elif 'y_target' in kwargs:
            labels = kwargs['y_target']
        else:
            preds = self.model.get_probs(x)
            preds_max = tf.reduce_max(preds, 1, keep_dims=True)
            original_predictions = tf.to_float(tf.equal(preds, preds_max))
            labels = original_predictions

        def cw_wrap(x_val, y_val):
            return np.array(attack.attack(x_val, y_val), dtype=np.float32)

        wrap = tf.py_func(cw_wrap, [x, labels], tf.float32)

        return wrap

    def parse_params(self,
                     y=None,
                     y_target=None,
                     nb_classes=10,
                     batch_size=1,
                     confidence=0,
                     learning_rate=5e-3,
                     binary_search_steps=5,
                     max_iterations=1000,
                     abort_early=True,
                     initial_const=1e-2,
                     clip_min=0,
                     clip_max=1):

        # ignore the y and y_target argument
        self.nb_classes = nb_classes
        self.batch_size = batch_size
        self.confidence = confidence
        self.learning_rate = learning_rate
        self.binary_search_steps = binary_search_steps
        self.max_iterations = max_iterations
        self.abort_early = abort_early
        self.initial_const = initial_const
        self.clip_min = clip_min
        self.clip_max = clip_max