def clip_eta(eta, ord, eps): """ Helper function to clip the perturbation to epsilon norm ball. :param eta: A tensor with the current perturbation. :param ord: Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param eps: Epsilon, bound of the perturbation. """ # Clipping perturbation eta to self.ord norm ball if ord not in [np.inf, 1, 2]: raise ValueError('ord must be np.inf, 1, or 2.') reduc_ind = list(xrange(1, len(eta.get_shape()))) avoid_zero_div = 1e-12 if ord == np.inf: eta = clip_by_value(eta, -eps, eps) else: if ord == 1: raise NotImplementedError("The expression below is not the correct way" " to project onto the L1 norm ball.") norm = tf.maximum(avoid_zero_div, reduce_sum(tf.abs(eta), reduc_ind, keepdims=True)) elif ord == 2: # avoid_zero_div must go inside sqrt to avoid a divide by zero # in the gradient through this operation norm = tf.sqrt(tf.maximum(avoid_zero_div, reduce_sum(tf.square(eta), reduc_ind, keepdims=True))) # We must *clip* to within the norm ball, not *normalize* onto the # surface of the ball factor = tf.minimum(1., div(eps, norm)) eta = eta * factor return eta
def __init__(self, sess, x, logits, targeted_label, binary_search_steps, max_iterations, initial_const, clip_min, clip_max, nb_classes, batch_size): """ Return a tensor that constructs adversarial examples for the given input. Generate uses tf.py_func in order to operate over tensors. :param sess: a TF session. :param x: A tensor with the inputs. :param logits: A tensor with model's output logits. :param targeted_label: A tensor with the target labels. :param binary_search_steps: The number of times we perform binary search to find the optimal tradeoff- constant between norm of the purturbation and cross-entropy loss of classification. :param max_iterations: The maximum number of iterations. :param initial_const: The initial tradeoff-constant to use to tune the relative importance of size of the purturbation and cross-entropy loss of the classification. :param clip_min: Minimum input component value :param clip_max: Maximum input component value :param num_labels: The number of classes in the model's output. :param batch_size: Number of attacks to run simultaneously. """ self.sess = sess self.x = x self.logits = logits assert logits.op.type != 'Softmax' self.targeted_label = targeted_label self.binary_search_steps = binary_search_steps self.max_iterations = max_iterations self.initial_const = initial_const self.clip_min = clip_min self.clip_max = clip_max self.batch_size = batch_size self.repeat = self.binary_search_steps >= 10 self.shape = tuple([self.batch_size] + list(self.x.get_shape().as_list()[1:])) self.ori_img = tf.Variable( np.zeros(self.shape), dtype=tf_dtype, name='ori_img') self.const = tf.Variable( np.zeros(self.batch_size), dtype=tf_dtype, name='const') self.score = softmax_cross_entropy_with_logits( labels=self.targeted_label, logits=self.logits) self.l2dist = reduce_sum(tf.square(self.x - self.ori_img)) # small self.const will result small adversarial perturbation self.loss = reduce_sum(self.score * self.const) + self.l2dist self.grad, = tf.gradients(self.loss, self.x)
def kl_with_logits(p_logits, q_logits, scope=None, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES): """Helper function to compute kl-divergence KL(p || q) """ with tf.name_scope(scope, "kl_divergence") as name: p = tf.nn.softmax(p_logits) p_log = tf.nn.log_softmax(p_logits) q_log = tf.nn.log_softmax(q_logits) loss = reduce_mean(reduce_sum(p * (p_log - q_log), axis=1), name=name) tf.losses.add_loss(loss, loss_collection) return loss
def l2_batch_normalize(x, epsilon=1e-12, scope=None): """ Helper function to normalize a batch of vectors. :param x: the input placeholder :param epsilon: stabilizes division :return: the batch of l2 normalized vector """ with tf.name_scope(scope, "l2_batch_normalize") as name_scope: x_shape = tf.shape(x) x = tf.contrib.layers.flatten(x) x /= (epsilon + reduce_max(tf.abs(x), 1, keepdims=True)) square_sum = reduce_sum(tf.square(x), 1, keepdims=True) x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum) x_norm = tf.multiply(x, x_inv_norm) return tf.reshape(x_norm, x_shape, name_scope)
def optimize_linear(grad, eps, ord=np.inf): """ Solves for the optimal input to a linear function under a norm constraint. Optimal_perturbation = argmax_{eta, ||eta||_{ord} < eps} dot(eta, grad) :param grad: tf tensor containing a batch of gradients :param eps: float scalar specifying size of constraint region :param ord: int specifying order of norm :returns: tf tensor containing optimal perturbation """ # In Python 2, the `list` call in the following line is redundant / harmless. # In Python 3, the `list` call is needed to convert the iterator returned by `range` into a list. red_ind = list(range(1, len(grad.get_shape()))) avoid_zero_div = 1e-12 if ord == np.inf: # Take sign of gradient optimal_perturbation = tf.sign(grad) # The following line should not change the numerical results. # It applies only because `optimal_perturbation` is the output of # a `sign` op, which has zero derivative anyway. # It should not be applied for the other norms, where the # perturbation has a non-zero derivative. optimal_perturbation = tf.stop_gradient(optimal_perturbation) elif ord == 1: abs_grad = tf.abs(grad) sign = tf.sign(grad) max_abs_grad = tf.reduce_max(abs_grad, red_ind, keepdims=True) tied_for_max = tf.to_float(tf.equal(abs_grad, max_abs_grad)) num_ties = tf.reduce_sum(tied_for_max, red_ind, keepdims=True) optimal_perturbation = sign * tied_for_max / num_ties elif ord == 2: square = tf.maximum( avoid_zero_div, reduce_sum(tf.square(grad), reduction_indices=red_ind, keepdims=True)) optimal_perturbation = grad / tf.sqrt(square) else: raise NotImplementedError("Only L-inf, L1 and L2 norms are " "currently implemented.") # Scale perturbation to be the solution for the norm=eps rather than # norm=1 problem scaled_perturbation = utils_tf.mul(eps, optimal_perturbation) return scaled_perturbation
def attack_single_step(self, x, eta, g_feat): """ TensorFlow implementation of the Fast Feature Gradient. This is a single step attack similar to Fast Gradient Method that attacks an internal representation. :param x: the input placeholder :param eta: A tensor the same shape as x that holds the perturbation. :param g_feat: model's internal tensor for guide :return: a tensor for the adversarial example """ adv_x = x + eta a_feat = self.model.fprop(adv_x)[self.layer] # feat.shape = (batch, c) or (batch, w, h, c) axis = list(range(1, len(a_feat.shape))) # Compute loss # This is a targeted attack, hence the negative sign loss = -reduce_sum(tf.square(a_feat - g_feat), axis) # Define gradient of loss wrt input grad, = tf.gradients(loss, adv_x) # Multiply by constant epsilon scaled_signed_grad = self.eps_iter * tf.sign(grad) # Add perturbation to original example to obtain adversarial example adv_x = adv_x + scaled_signed_grad # If clipping is needed, # reset all values outside of [clip_min, clip_max] if (self.clip_min is not None) and (self.clip_max is not None): adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) adv_x = tf.stop_gradient(adv_x) eta = adv_x - x eta = clip_eta(eta, self.ord, self.eps) return eta
def fgm(x, logits, y=None, eps=0.3, ord=np.inf, clip_min=None, clip_max=None, targeted=False, sanity_checks=True): """ TensorFlow implementation of the Fast Gradient Method. :param x: the input placeholder :param logits: output of model.get_logits :param y: (optional) A placeholder for the true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. Labels should be one-hot-encoded. :param eps: the epsilon (input variation parameter) :param ord: (optional) Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param clip_min: Minimum float value for adversarial example components :param clip_max: Maximum float value for adversarial example components :param targeted: Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :return: a tensor for the adversarial example """ asserts = [] # If a data range was specified, check that the input was in that range if clip_min is not None: asserts.append( utils_tf.assert_greater_equal(x, tf.cast(clip_min, x.dtype))) if clip_max is not None: asserts.append( utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype))) # Make sure the caller has not passed probs by accident assert logits.op.type != 'Softmax' if y is None: # Using model predictions as ground truth to avoid label leaking preds_max = reduce_max(logits, 1, keepdims=True) y = tf.to_float(tf.equal(logits, preds_max)) y = tf.stop_gradient(y) y = y / reduce_sum(y, 1, keepdims=True) # Compute loss loss = softmax_cross_entropy_with_logits(labels=y, logits=logits) if targeted: loss = -loss # Define gradient of loss wrt input grad, = tf.gradients(loss, x) optimal_perturbation = optimize_linear(grad, eps, ord) # Add perturbation to original example to obtain adversarial example adv_x = x + optimal_perturbation # If clipping is needed, reset all values outside of [clip_min, clip_max] if (clip_min is not None) or (clip_max is not None): # We don't currently support one-sided clipping assert clip_min is not None and clip_max is not None adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max) if sanity_checks: with tf.control_dependencies(asserts): adv_x = tf.identity(adv_x) return adv_x
def generate(self, x, **kwargs): """ Generate symbolic graph for adversarial examples and return. :param x: The model's symbolic inputs. :param kwargs: Keyword arguments. See `parse_params` for documentation. """ # Parse and save attack-specific parameters assert self.parse_params(**kwargs) asserts = [] # If a data range was specified, check that the input was in that range if self.clip_min is not None: asserts.append( utils_tf.assert_greater_equal(x, tf.cast(self.clip_min, x.dtype))) if self.clip_max is not None: asserts.append( utils_tf.assert_less_equal(x, tf.cast(self.clip_max, x.dtype))) # Initialize loop variables momentum = tf.zeros_like(x) adv_x = x # Fix labels to the first model predictions for loss computation y, _nb_classes = self.get_or_guess_labels(x, kwargs) y = y / reduce_sum(y, 1, keepdims=True) targeted = (self.y_target is not None) def cond(i, _, __): return tf.less(i, self.nb_iter) def body(i, ax, m): logits = self.model.get_logits(ax) loss = softmax_cross_entropy_with_logits(labels=y, logits=logits) if targeted: loss = -loss # Define gradient of loss wrt input grad, = tf.gradients(loss, ax) # Normalize current gradient and add it to the accumulated gradient red_ind = list(range(1, len(grad.get_shape()))) avoid_zero_div = tf.cast(1e-12, grad.dtype) grad = grad / tf.maximum( avoid_zero_div, reduce_mean(tf.abs(grad), red_ind, keepdims=True)) m = self.decay_factor * m + grad optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord) if self.ord == 1: raise NotImplementedError( "This attack hasn't been tested for ord=1." "It's not clear that FGM makes a good inner " "loop step for iterative optimization since " "it updates just one coordinate at a time.") # Update and clip adversarial example in current iteration ax = ax + optimal_perturbation ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps) if self.clip_min is not None and self.clip_max is not None: ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max) ax = tf.stop_gradient(ax) return i + 1, ax, m _, adv_x, _ = tf.while_loop(cond, body, (tf.zeros([]), adv_x, momentum), back_prop=True, maximum_iterations=self.nb_iter) if self.sanity_checks: with tf.control_dependencies(asserts): adv_x = tf.identity(adv_x) return adv_x