Example #1
0
    def aggregate_gradients(self, grads_and_vars):
        """Aggregate gradients on all devices.

    By default we will perform reduce_sum of gradients across devices. Users can
    implement their own aggregation logic by overriding this method.

    Args:
      grads_and_vars: List of (gradient, variable) pairs.

    Returns:
      List of (gradient, variable) pairs.
    """
        return optimizer_utils.all_reduce_sum_gradients(grads_and_vars)
Example #2
0
 def _aggregate_gradients(self, grads_and_vars):
     return optimizer_utils.all_reduce_sum_gradients(grads_and_vars)