Esempio n. 1
0
def _check_first_dimension(x, tensor_name):
    message = "Tensor {} should have batch_size of 1.".format(tensor_name)
    if x.get_shape().as_list()[0] is None:
        check_batch = utils_tf.assert_equal(tf.shape(x)[0], 1, message=message)
        with tf.control_dependencies([check_batch]):
            x = tf.identity(x)
    elif x.get_shape().as_list()[0] != 1:
        raise ValueError(message)
Esempio n. 2
0
    def generate(self,
                 x,
                 y=None,
                 y_target=None,
                 eps=None,
                 clip_min=None,
                 clip_max=None,
                 nb_iter=None,
                 is_targeted=None,
                 early_stop_loss_threshold=None,
                 learning_rate=SPSA.DEFAULT_LEARNING_RATE,
                 delta=0.1,
                 spsa_samples=128,
                 batch_size=None,
                 spsa_iters=SPSA.DEFAULT_SPSA_ITERS,
                 is_debug=False,
                 epsilon=None,
                 num_steps=None):
        """
        Generate symbolic graph for adversarial examples.

        :param x: The model's symbolic inputs. Must be a batch of size 1.
        :param y: A Tensor or None. The index of the correct label.
        :param y_target: A Tensor or None. The index of the target label in a
                         targeted attack.
        :param eps: The size of the maximum perturbation, measured in the
                    L-infinity norm.
        :param clip_min: If specified, the minimum input value
        :param clip_max: If specified, the maximum input value
        :param nb_iter: The number of optimization steps.
        :param early_stop_loss_threshold: A float or None. If specified, the
                                          attack will end as soon as the loss
                                          is below `early_stop_loss_threshold`.
        :param learning_rate: Learning rate of ADAM optimizer.
        :param delta: Perturbation size used for SPSA approximation.
        :param spsa_samples: Number of inputs to evaluate at a single time.
                           The true batch size (the number of evaluated
                           inputs for each update) is `spsa_samples *
                           spsa_iters`
        :param batch_size: Deprecated param that is an alias for spsa_samples
        :param spsa_iters: Number of model evaluations before performing an
                           update, where each evaluation is on `spsa_samples`
                           different inputs.
        :param is_debug: If True, print the adversarial loss after each update.
        :param epsilon: Deprecated alias for `eps`
        :param num_steps: Deprecated alias for `nb_iter`.
        :param is_targeted: Deprecated argument. Ignored.
        """

        if epsilon is not None:
            if eps is not None:
                raise ValueError(
                    "Should not specify both eps and its deprecated "
                    "alias, epsilon")
            warnings.warn(
                "`epsilon` is deprecated. Switch to `eps`. `epsilon` may "
                "be removed on or after 2019-04-15.")
            eps = epsilon
        del epsilon

        if num_steps is not None:
            if nb_iter is not None:
                raise ValueError(
                    "Should not specify both nb_iter and its deprecated "
                    "alias, num_steps")
            warnings.warn("`num_steps` is deprecated. Switch to `nb_iter`. "
                          "`num_steps` may be removed on or after 2019-04-15.")
            nb_iter = num_steps
        del num_steps
        assert nb_iter is not None

        if (y is not None) + (y_target is not None) != 1:
            raise ValueError(
                "Must specify exactly one of y (untargeted attack, "
                "cause the input not to be classified as this true "
                "label) and y_target (targeted attack, cause the "
                "input to be classified as this target label).")

        if is_targeted is not None:
            warnings.warn(
                "`is_targeted` is deprecated. Simply do not specify it."
                " It may become an error to specify it on or after "
                "2019-04-15.")
            assert is_targeted == y_target is not None

        is_targeted = y_target is not None

        if x.get_shape().as_list()[0] is None:
            check_batch = utils_tf.assert_equal(tf.shape(x)[0], 1)
            with tf.control_dependencies([check_batch]):
                x = tf.identity(x)
        elif x.get_shape().as_list()[0] != 1:
            raise ValueError(
                "For SPSA, input tensor x must have batch_size of 1.")

        if batch_size is not None:
            warnings.warn(
                'The "batch_size" argument to SPSA is deprecated, and will '
                'be removed on 2019-03-17. '
                'Please use spsa_samples instead.')
            spsa_samples = batch_size

        optimizer = SPSAAdam(lr=learning_rate,
                             delta=delta,
                             num_samples=spsa_samples,
                             num_iters=spsa_iters)

        def loss_fn(x, label):
            """
            Margin logit loss, with correct sign for targeted vs untargeted loss.
            """
            logits = self.model.get_logits(x)
            loss_multiplier = 1 if is_targeted else -1
            return loss_multiplier * margin_logit_loss(
                logits,
                label,
                nb_classes=self.model.nb_classes or logits.get_shape()[-1])

        y_attack = y_target if is_targeted else y
        adv_x = projected_optimization(
            loss_fn,
            x,
            y_attack,
            eps,
            num_steps=nb_iter,
            optimizer=optimizer,
            early_stop_loss_threshold=early_stop_loss_threshold,
            is_debug=is_debug,
            clip_min=clip_min,
            clip_max=clip_max)
        return adv_x