def testDivideIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   n = 2
   expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
   result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(expected, result)
 def testDivideIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   n = 2
   expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
   result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(expected, result)
Ejemplo n.º 3
0
def _simple_reduce(per_replica_value, reduce_to_device, accumulation_fn,
                   reduce_op):
  # pylint: disable=g-missing-docstring
  all_values = per_replica_value.values
  if not all_values:
    raise ValueError("`per_replica_value` must be non-empty")
  count = len(all_values)

  with ops.device(reduce_to_device):
    with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
      reduced = cross_device_utils.aggregate_tensors_or_indexed_slices(
          all_values, accumulation_fn)
      if reduce_op == reduce_util.ReduceOp.MEAN:
        reduced = cross_device_utils.divide_by_n_tensors_or_indexed_slices(
            reduced, count)
      elif reduce_op != reduce_util.ReduceOp.SUM:
        raise ValueError("`reduce_op` must be Reduce.SUM or Reduce.MEAN.")
  return reduced
Ejemplo n.º 4
0
def _simple_reduce(per_replica_value, reduce_to_device, accumulation_fn,
                   reduce_op):
  # pylint: disable=g-missing-docstring
  all_values = per_replica_value.values
  if not all_values:
    raise ValueError("`per_replica_value` must be non-empty")
  count = len(all_values)

  with ops.device(reduce_to_device):
    with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
      reduced = cross_device_utils.aggregate_tensors_or_indexed_slices(
          all_values, accumulation_fn)
      if reduce_op == reduce_util.ReduceOp.MEAN:
        reduced = cross_device_utils.divide_by_n_tensors_or_indexed_slices(
            reduced, count)
      elif reduce_op != reduce_util.ReduceOp.SUM:
        raise ValueError("`reduce_op` must be Reduce.SUM or Reduce.MEAN.")
  return reduced
 def testDivideTensor(self):
   t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
   n = 2
   expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
   result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
   self._assert_values_equal(expected, result)
 def testDivideTensor(self):
   t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
   n = 2
   expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
   result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
   self._assert_values_equal(expected, result)