Exemple #1
0
    def __init__(
        self, classifier, attacker="deepfool", attacker_params=None, delta=0.2, max_iter=20, eps=10.0, norm=np.inf
    ):
        """
        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param attacker: Adversarial attack name. Default is 'deepfool'. Supported names: 'carlini', 'carlini_inf',
                         'deepfool', 'fgsm', 'bim', 'pgd', 'margin', 'ead', 'newtonfool', 'jsma', 'vat'.
        :type attacker: `str`
        :param attacker_params: Parameters specific to the adversarial attack. If this parameter is not specified,
                                the default parameters of the chosen attack will be used.
        :type attacker_params: `dict`
        :param delta: desired accuracy
        :type delta: `float`
        :param max_iter: The maximum number of iterations for computing universal perturbation.
        :type max_iter: `int`
        :param eps: Attack step size (input variation)
        :type eps: `float`
        :param norm: The norm of the adversarial perturbation. Possible values: np.inf, 2
        :type norm: `int`
        """
        super(UniversalPerturbation, self).__init__(classifier)
        if not isinstance(classifier, ClassifierNeuralNetwork) or not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierNeuralNetwork, ClassifierGradients], classifier)

        kwargs = {
            "attacker": attacker,
            "attacker_params": attacker_params,
            "delta": delta,
            "max_iter": max_iter,
            "eps": eps,
            "norm": norm,
        }
        self.set_params(**kwargs)
    def __init__(self,
                 classifier,
                 max_iter=10,
                 finite_diff=1e-6,
                 eps=0.1,
                 batch_size=1):
        """
        Create a VirtualAdversarialMethod instance.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param eps: Attack step (max input variation).
        :type eps: `float`
        :param finite_diff: The finite difference parameter.
        :type finite_diff: `float`
        :param max_iter: The maximum number of iterations.
        :type max_iter: `int`
        :param batch_size: Size of the batch on which adversarial samples are generated.
        :type batch_size: `int`
        """
        super(VirtualAdversarialMethod, self).__init__(classifier)
        if not isinstance(classifier,
                          ClassifierNeuralNetwork) or not isinstance(
                              classifier, ClassifierGradients):
            raise ClassifierError(
                self.__class__, [ClassifierGradients, ClassifierNeuralNetwork],
                classifier)

        kwargs = {
            "finite_diff": finite_diff,
            "eps": eps,
            "max_iter": max_iter,
            "batch_size": batch_size
        }
        self.set_params(**kwargs)
    def __init__(
        self,
        classifier,
        norm=np.inf,
        eps=0.3,
        eps_step=0.1,
        max_iter=100,
        targeted=False,
        num_random_init=0,
        batch_size=1,
        random_eps=False,
    ):
        """
        Create a :class:`.ProjectedGradientDescent` instance.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param norm: The norm of the adversarial perturbation. Possible values: np.inf, 1 or 2.
        :type norm: `int`
        :param eps: Maximum perturbation that the attacker can introduce.
        :type eps: `float`
        :param eps_step: Attack step size (input variation) at each iteration.
        :type eps_step: `float`
        :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature
                           suggests this for FGSM based training to generalize across different epsilons. eps_step
                           is modified to preserve the ratio of eps / eps_step. The effectiveness of this
                           method with PGD is untested (https://arxiv.org/pdf/1611.01236.pdf).
        :type random_eps: `bool`
        :param max_iter: The maximum number of iterations.
        :type max_iter: `int`
        :param targeted: Indicates whether the attack is targeted (True) or untargeted (False)
        :type targeted: `bool`
        :param num_random_init: Number of random initialisations within the epsilon ball. For num_random_init=0
            starting at the original input.
        :type num_random_init: `int`
        :param batch_size: Size of the batch on which adversarial samples are generated.
        :type batch_size: `int`
        """
        super(ProjectedGradientDescent, self).__init__(
            classifier,
            norm=norm,
            eps=eps,
            eps_step=eps_step,
            targeted=targeted,
            num_random_init=num_random_init,
            batch_size=batch_size,
            minimal=False,
        )
        if not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierGradients], classifier)

        kwargs = {"max_iter": max_iter, "random_eps": random_eps}
        ProjectedGradientDescent.set_params(self, **kwargs)

        if self.random_eps:
            lower, upper = 0, eps
            mu, sigma = 0, (eps / 2)
            self.norm_dist = truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)

        self._project = True
    def __init__(self,
                 classifier,
                 max_iter=100,
                 epsilon=1e-6,
                 nb_grads=10,
                 batch_size=1):
        """
        Create a DeepFool attack instance.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param max_iter: The maximum number of iterations.
        :type max_iter: `int`
        :param epsilon: Overshoot parameter.
        :type epsilon: `float`
        :param nb_grads: The number of class gradients (top nb_grads w.r.t. prediction) to compute. This way only the
                         most likely classes are considered, speeding up the computation.
        :type nb_grads: `int`
        :param batch_size: Batch size
        :type batch_size: `int`
        """
        super(DeepFool, self).__init__(classifier=classifier)
        if not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierGradients],
                                  classifier)

        params = {
            "max_iter": max_iter,
            "epsilon": epsilon,
            "nb_grads": nb_grads,
            "batch_size": batch_size
        }
        self.set_params(**params)
Exemple #5
0
    def __init__(
        self,
        classifier,
        aux_classifier=None,
        confidence=0.0,
        targeted=False,
        learning_rate=0.01,
        max_iter=10,
        max_halving=5,
        max_doubling=5,
        eps=0.3,
        batch_size=128,
    ):
        """
        Create a Carlini L_Inf attack instance.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther away,
                from the original input, but classified with higher confidence as the target class.
        :type confidence: `float`
        :param targeted: Should the attack target one specific class.
        :type targeted: `bool`
        :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better
                results but are slower to converge.
        :type learning_rate: `float`
        :param max_iter: The maximum number of iterations.
        :type max_iter: `int`
        :param max_halving: Maximum number of halving steps in the line search optimization.
        :type max_halving: `int`
        :param max_doubling: Maximum number of doubling steps in the line search optimization.
        :type max_doubling: `int`
        :param eps: An upper bound for the L_0 norm of the adversarial perturbation.
        :type eps: `float`
        :param batch_size: Size of the batch on which adversarial samples are generated.
        :type batch_size: `int`
        :param expectation: An expectation over transformations to be applied when computing
                            classifier gradients and predictions.
        :type expectation: :class:`.ExpectationOverTransformations`
        """
        super(CarliniLInfMethod, self).__init__(classifier)
        if not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierGradients],
                                  classifier)

        kwargs = {
            "confidence": confidence,
            "targeted": targeted,
            "learning_rate": learning_rate,
            "max_iter": max_iter,
            "max_halving": max_halving,
            "max_doubling": max_doubling,
            "eps": eps,
            "batch_size": batch_size,
        }
        assert self.set_params(**kwargs)

        # There is one internal hyperparameter:
        # Smooth arguments of arctanh by multiplying with this constant to avoid division by zero:
        self._tanh_smoother = 0.999999
    def __init__(
        self,
        classifier,
        confidence=0.0,
        targeted=False,
        learning_rate=1e-2,
        binary_search_steps=9,
        max_iter=100,
        beta=1e-3,
        initial_const=1e-3,
        batch_size=1,
        decision_rule="EN",
    ):
        """
        Create an ElasticNet attack instance.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther
               away, from the original input, but classified with higher confidence as the target class.
        :type confidence: `float`
        :param targeted: Should the attack target one specific class.
        :type targeted: `bool`
        :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better
               results but are slower to converge.
        :type learning_rate: `float`
        :param binary_search_steps: Number of times to adjust constant with binary search (positive value).
        :type binary_search_steps: `int`
        :param max_iter: The maximum number of iterations.
        :type max_iter: `int`
        :param beta: Hyperparameter trading off L2 minimization for L1 minimization.
        :type beta: `float`
        :param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance
               and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in
               Carlini and Wagner (2016).
        :type initial_const: `float`
        :param batch_size: Internal size of batches on which adversarial samples are generated.
        :type batch_size: `int`
        :param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.
        :type decision_rule: `string`
        """
        super(ElasticNet, self).__init__(classifier)
        if not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierGradients],
                                  classifier)

        kwargs = {
            "confidence": confidence,
            "targeted": targeted,
            "learning_rate": learning_rate,
            "binary_search_steps": binary_search_steps,
            "max_iter": max_iter,
            "beta": beta,
            "initial_const": initial_const,
            "batch_size": batch_size,
            "decision_rule": decision_rule,
        }
        assert self.set_params(**kwargs)
Exemple #7
0
    def __init__(self, classifier):
        """
        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        """
        if not isinstance(classifier, Classifier) and classifier is not None:
            raise ClassifierError(self.__class__, [Classifier], classifier)

        self.classifier = classifier
    def __init__(
        self,
        classifier,
        target=0,
        rotation_max=22.5,
        scale_min=0.1,
        scale_max=1.0,
        learning_rate=5.0,
        max_iter=500,
        clip_patch=None,
        batch_size=16,
    ):
        """
        Create an instance of the :class:`.AdversarialPatch`.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param target: The target label for the created patch.
        :type target: `int`
        :param rotation_max: The maximum rotation applied to random patches. The value is expected to be in the
               range `[0, 180]`.
        :type rotation_max: `float`
        :param scale_min: The minimum scaling applied to random patches. The value should be in the range `[0, 1]`,
               but less than `scale_max`.
        :type scale_min: `float`
        :param scale_max: The maximum scaling applied to random patches. The value should be in the range `[0, 1]`, but
               larger than `scale_min.`
        :type scale_max: `float`
        :param learning_rate: The learning rate of the optimization.
        :type learning_rate: `float`
        :param max_iter: The number of optimization steps.
        :type max_iter: `int`
        :param clip_patch: The minimum and maximum values for each channel
        :type clip_patch: [(float, float), (float, float), (float, float)]
        :param batch_size: The size of the training batch.
        :type batch_size: `int`
        """
        super(AdversarialPatch, self).__init__(classifier=classifier)
        if not isinstance(classifier,
                          ClassifierNeuralNetwork) or not isinstance(
                              classifier, ClassifierGradients):
            raise ClassifierError(
                self.__class__, [ClassifierNeuralNetwork, ClassifierGradients],
                classifier)

        kwargs = {
            "target": target,
            "rotation_max": rotation_max,
            "scale_min": scale_min,
            "scale_max": scale_max,
            "learning_rate": learning_rate,
            "max_iter": max_iter,
            "batch_size": batch_size,
            "clip_patch": clip_patch,
        }
        self.set_params(**kwargs)
        self.patch = None
Exemple #9
0
    def __init__(
        self,
        classifier,
        norm=np.inf,
        eps=0.3,
        eps_step=0.1,
        targeted=False,
        num_random_init=0,
        batch_size=1,
        minimal=False,
        distribution=None,
    ):
        """
        Create a :class:`.FastGradientMethod` instance.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param norm: The norm of the adversarial perturbation. Possible values: np.inf, 1 or 2.
        :type norm: `int`
        :param eps: Attack step size (input variation)
        :type eps: `float`
        :param eps_step: Step size of input variation for minimal perturbation computation
        :type eps_step: `float`
        :param targeted: Indicates whether the attack is targeted (True) or untargeted (False)
        :type targeted: `bool`
        :param num_random_init: Number of random initialisations within the epsilon ball. For random_init=0 starting at
            the original input.
        :type num_random_init: `int`
        :param batch_size: Size of the batch on which adversarial samples are generated.
        :type batch_size: `int`
        :param minimal: Indicates if computing the minimal perturbation (True). If True, also define `eps_step` for
                        the step size and eps for the maximum perturbation.
        :type minimal: `bool`
        :param distribution: configuration of the distribution to sample transformations from.
        :type distribution: `dictionary`
        """
        super(FastGradientMethod, self).__init__(classifier)

        if not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierGradients],
                                  classifier)

        kwargs = {
            "norm": norm,
            "eps": eps,
            "eps_step": eps_step,
            "targeted": targeted,
            "num_random_init": num_random_init,
            "batch_size": batch_size,
            "minimal": minimal,
            "distribution": distribution,
        }

        FastGradientMethod.set_params(self, **kwargs)

        self._project = True
    def __init__(self, classifier, theta=0.1, gamma=1.0, batch_size=1):
        """
        Create a SaliencyMapMethod instance.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param theta: Amount of Perturbation introduced to each modified feature per step (can be positive or negative).
        :type theta: `float`
        :param gamma: Maximum fraction of features being perturbed (between 0 and 1).
        :type gamma: `float`
        :param batch_size: Size of the batch on which adversarial samples are generated.
        :type batch_size: `int`
        """
        super(SaliencyMapMethod, self).__init__(classifier)
        if not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierGradients],
                                  classifier)

        kwargs = {"theta": theta, "gamma": gamma, "batch_size": batch_size}
        self.set_params(**kwargs)
    def __init__(
        self,
        classifier,
        norm=np.inf,
        eps=1e-1,
        targeted=False,
        batch_size=1,
    ):
        super().__init__(classifier)
        if not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierGradients],
                                  classifier)

        kwargs = {
            "norm": norm,
            "eps": eps,
            "targeted": targeted,
            "batch_size": batch_size,
        }

        NoiseAttack.set_params(self, **kwargs)

        self._project = True
    def __init__(
        self,
        classifier,
        confidence=0.0,
        targeted=False,
        learning_rate=0.01,
        binary_search_steps=10,
        max_iter=10,
        initial_const=0.01,
        max_halving=5,
        max_doubling=5,
        batch_size=1,
    ):
        """
        Create a Carlini L_2 attack instance.

        :param classifier: A trained classifier.
        :type classifier: :class:`.Classifier`
        :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther away,
                from the original input, but classified with higher confidence as the target class.
        :type confidence: `float`
        :param targeted: Should the attack target one specific class.
        :type targeted: `bool`
        :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better results
                but are slower to converge.
        :type learning_rate: `float`
        :param binary_search_steps: Number of times to adjust constant with binary search (positive value). If
                                    `binary_search_steps` is large, then the algorithm is not very sensitive to the
                                    value of `initial_const`. Note that the values gamma=0.999999 and c_upper=10e10 are
                                    hardcoded with the same values used by the authors of the method.
        :type binary_search_steps: `int`
        :param max_iter: The maximum number of iterations.
        :type max_iter: `int`
        :param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance and
                confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in
                Carlini and Wagner (2016).
        :type initial_const: `float`
        :param max_halving: Maximum number of halving steps in the line search optimization.
        :type max_halving: `int`
        :param max_doubling: Maximum number of doubling steps in the line search optimization.
        :type max_doubling: `int`
        :param batch_size: Size of the batch on which adversarial samples are generated.
        :type batch_size: `int`
        """
        super(CarliniL2Method, self).__init__(classifier)
        if not isinstance(classifier, ClassifierGradients):
            raise ClassifierError(self.__class__, [ClassifierGradients],
                                  classifier)

        kwargs = {
            "confidence": confidence,
            "targeted": targeted,
            "learning_rate": learning_rate,
            "binary_search_steps": binary_search_steps,
            "max_iter": max_iter,
            "initial_const": initial_const,
            "max_halving": max_halving,
            "max_doubling": max_doubling,
            "batch_size": batch_size,
        }
        assert self.set_params(**kwargs)

        # There are internal hyperparameters:
        # Abort binary search for c if it exceeds this threshold (suggested in Carlini and Wagner (2016)):
        self._c_upper_bound = 10e10

        # Smooth arguments of arctanh by multiplying with this constant to avoid division by zero.
        # It appears this is what Carlini and Wagner (2016) are alluding to in their footnote 8. However, it is not
        # clear how their proposed trick ("instead of scaling by 1/2 we scale by 1/2 + eps") works in detail.
        self._tanh_smoother = 0.999999