예제 #1
0
  def compute_sanitized_gradients(self, loss, var_list=None,
                                  add_noise=True):
    """Compute the sanitized gradients.

    Args:
      loss: the loss tensor.
      var_list: the optional variables.
      add_noise: if true, then add noise. Always clip.
    Returns:
      a pair of (list of sanitized gradients) and privacy spending accumulation
      operations.
    Raises:
      TypeError: if var_list contains non-variable.
    """

    self._assert_valid_dtypes([loss])

    xs = [tf.convert_to_tensor(x) for x in var_list]
    px_grads = per_example_gradients.PerExampleGradients(loss, xs)
    sanitized_grads = []
    for px_grad, v in zip(px_grads, var_list):
      tensor_name = utils.GetTensorOpName(v)
      sanitized_grad = self._sanitizer.sanitize(
          px_grad, self._eps_delta, sigma=self._sigma,
          tensor_name=tensor_name, add_noise=add_noise,
          num_examples=self._batches_per_lot * tf.slice(
              tf.shape(px_grad), [0], [1]))
      sanitized_grads.append(sanitized_grad)

    return sanitized_grads
예제 #2
0
    def __init__(self,
                 learning_rate,
                 eps_delta,
                 sanitizer,
                 sigma=None,
                 use_locking=False,
                 name="DPGradientDescent",
                 batches_per_lot=1):
        """Construct a differentially private gradient descent optimizer.

    The optimizer uses fixed privacy budget for each batch of training.

    Args:
      learning_rate: for GradientDescentOptimizer.
      eps_delta: EpsDelta pair for each epoch.
      sanitizer: for sanitizing the graident.
      sigma: noise sigma. If None, use eps_delta pair to compute sigma;
        otherwise use supplied sigma directly.
      use_locking: use locking.
      name: name for the object.
      batches_per_lot: Number of batches in a lot.
    """

        super(DPGradientDescentOptimizer,
              self).__init__(learning_rate, use_locking, name)

        # Also, if needed, define the gradient accumulators
        self._batches_per_lot = batches_per_lot
        self._grad_accum_dict = {}
        if batches_per_lot > 1:
            self._batch_count = tf.Variable(1,
                                            dtype=tf.int32,
                                            trainable=False,
                                            name="batch_count")
            var_list = tf.trainable_variables()
            with tf.variable_scope("grad_acc_for"):
                for var in var_list:
                    v_grad_accum = tf.Variable(tf.zeros_like(var),
                                               trainable=False,
                                               name=utils.GetTensorOpName(var))
                    self._grad_accum_dict[var.name] = v_grad_accum

        self._eps_delta = eps_delta
        self._sanitizer = sanitizer
        self._sigma = sigma