def testContainsIndexedSlices_PerReplica(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_replica = value_lib.PerReplica((t0, t1))
   self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica))
 def testContainsIndexedSlices_PerDevice(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_device = value_lib.PerDevice({"/gpu:0": t0, "/cpu:0": t1})
   self.assertTrue(cross_tower_utils.contains_indexed_slices(per_device))
 def testContainsIndexedSlices_PerReplica(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_replica = value_lib.PerReplica({"/gpu:0": t0, "/cpu:0": t1})
   self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica))
 def testMultipleGradients(self):
     t0 = math_ops._as_indexed_slices(
         constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
     t1 = math_ops._as_indexed_slices(
         constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
     total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
     result = gradients_impl._AggregateIndexedSlicesGradients([t0, t1])
     self._assert_indexed_slices_equal(total, result)
 def testAggregateIndexedSlices(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
   result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(total, result)
Ejemplo n.º 6
0
 def testMultipleGradients(self):
   t0 = math_ops._as_indexed_slices(constant_op.constant(
       [[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(constant_op.constant(
       [[0., 0.], [5, 6], [7., 8.]]))
   total = constant_op.constant(
       [[1., 2.], [5, 6], [10., 12.]])
   result = gradients_impl._AggregateIndexedSlicesGradients([t0, t1])
   self._assert_indexed_slices_equal(total, result)
 def testAggregateIndexedSlices(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
   result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(total, result)
 def testContainsIndexedSlices_PerDeviceMapOutput(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_device = value_lib.PerDevice({
       "/gpu:0": value_lib.MapOutput([t0]),
       "/cpu:0": value_lib.MapOutput([t1])})
   self.assertTrue(cross_tower_utils.contains_indexed_slices(per_device))
Ejemplo n.º 9
0
 def testContainsIndexedSlices_PerReplicaMapOutput(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_replica = value_lib.PerReplica({
       "/gpu:0": value_lib.MapOutput([t0]),
       "/cpu:0": value_lib.MapOutput([t1])})
   self.assertTrue(cross_tower_utils.contains_indexed_slices(per_replica))
 def testDivideIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   n = 2
   expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
   result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(expected, result)
 def testDivideIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   n = 2
   expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
   result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(expected, result)
Ejemplo n.º 12
0
 def testIndexedSlicesToTensor(self):
   with self.test_session():
     np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
     c = constant_op.constant(np_val)
     c_sparse = math_ops._as_indexed_slices(c)
     self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
     c_dense = math_ops.mul(c_sparse, 1.0)
     self.assertAllClose(np_val, c_dense.eval())
Ejemplo n.º 13
0
def _make_output_composite_tensors_match(true_graph, false_graph):
    """Modifies true_graph and false_graph so they have the same output signature.

  Currently the only transformation implemented is turning a Tensor into an
  equivalent IndexedSlices if the other branch returns an IndexedSlices.
  Updates {true,false}_graph.{outputs,structured_outputs}.

  Args:
    true_graph: FuncGraph
    false_graph: FuncGraph

  Raises:
    TypeError: if a pair of outputs cannot be rewritten.
  """
    # Note: since this is only used for gradient graphs, we do not expect the
    # outputs to be structured (e.g. nested lists), and thus do not need to use
    # nest.flatten, etc.
    true_outputs = list(true_graph.structured_outputs)
    false_outputs = list(false_graph.structured_outputs)
    assert len(true_outputs) == len(false_outputs)

    for idx, (true_out,
              false_out) in enumerate(zip(true_outputs, false_outputs)):
        if type(true_out) == type(false_out):  # pylint: disable=unidiomatic-typecheck
            continue
        if (isinstance(true_out, ops.IndexedSlices)
                and isinstance(false_out, ops.Tensor)):
            with false_graph.as_default():
                false_outputs[idx] = math_ops._as_indexed_slices(false_out)
        elif (isinstance(true_out, ops.Tensor)
              and isinstance(false_out, ops.IndexedSlices)):
            with true_graph.as_default():
                true_outputs[idx] = math_ops._as_indexed_slices(true_out)
        else:
            raise TypeError("Cannot reconcile tf.cond %i-th outputs:\n"
                            "  true_fn returned:  %s\n"
                            "  false_fn returned: %s" %
                            (idx, true_out, false_out))

    true_graph.structured_outputs = true_outputs
    true_graph.outputs = func_graph_module.flatten(true_outputs)
    false_graph.structured_outputs = false_outputs
    false_graph.outputs = func_graph_module.flatten(false_outputs)
Ejemplo n.º 14
0
def _make_output_composite_tensors_match(true_graph, false_graph):
  """Rewrites {true,false}_graph's outputs to use the same _TensorLike classes.

  Currently the only transformation implemented is turning a Tensor into an
  equivalent IndexedSlices if the other branch returns an IndexedSlices.
  Updates {true,false}_graph.{outputs,structured_outputs}.

  Args:
    true_graph: FuncGraph
    false_graph: FuncGraph

  Raises:
    TypeError: if a pair of outputs cannot be rewritten.
  """
  # Note: since this is only used for gradient graphs, we do not expect the
  # outputs to be structured (e.g. nested lists), and thus do not need to use
  # nest.flatten, etc.
  true_outputs = list(true_graph.structured_outputs)
  false_outputs = list(false_graph.structured_outputs)
  assert len(true_outputs) == len(false_outputs)

  for idx, (true_out, false_out) in enumerate(zip(true_outputs, false_outputs)):
    if type(true_out) == type(false_out):  # pylint: disable=unidiomatic-typecheck
      continue
    if (isinstance(true_out, ops.IndexedSlices) and
        isinstance(false_out, ops.Tensor)):
      with false_graph.as_default():
        false_outputs[idx] = math_ops._as_indexed_slices(false_out)
    elif (isinstance(true_out, ops.Tensor) and
          isinstance(false_out, ops.IndexedSlices)):
      with true_graph.as_default():
        true_outputs[idx] = math_ops._as_indexed_slices(true_out)
    else:
      raise TypeError(
          "Cannot reconcile tf.cond %i-th outputs:\n"
          "  true_fn returned:  %s\n"
          "  false_fn returned: %s" % (idx, true_out, false_out))

  true_graph.structured_outputs = true_outputs
  true_graph.outputs = func_graph_module.flatten(true_outputs)
  false_graph.structured_outputs = false_outputs
  false_graph.outputs = func_graph_module.flatten(false_outputs)
Ejemplo n.º 15
0
 def testInt64Indices(self):
   with self.test_session():
     np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
     c = constant_op.constant(np_val)
     c_sparse = math_ops._as_indexed_slices(c)
     c_sparse = ops.IndexedSlices(
         c_sparse.values, math_ops.cast(c_sparse.indices, dtypes.int64),
         c_sparse.dense_shape)
     self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
     c_dense = math_ops.mul(c_sparse, 1.0)
     self.assertAllClose(np_val, c_dense.eval())
  def testCopyIndexedSlices(self):
    with ops.device("/cpu:0"):
      t = math_ops._as_indexed_slices(
          constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
    destination = "/gpu:0"
    result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(
        t, destination)

    self.assertIsInstance(result, ops.IndexedSlices)
    self._assert_values_equal(t, result)
    self.assertEqual(device_util.resolve(destination),
                     device_util.resolve(result.device))
  def testCopyIndexedSlices(self):
    with ops.device("/cpu:0"):
      t = math_ops._as_indexed_slices(
          constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
    destination = "/gpu:0"
    result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(
        t, destination)

    self.assertIsInstance(result, ops.IndexedSlices)
    self._assert_values_equal(t, result)
    self.assertEqual(
        device_util.resolve(destination), device_util.resolve(result.device))
Ejemplo n.º 18
0
def _make_output_composite_tensors_match(op_type, branch_graphs):
    """Modifies each branch_graph's outputs to have the same output signature.

  Currently the only transformation implemented is turning a Tensor into an
  equivalent IndexedSlices if the other branch returns an IndexedSlices.
  Updates branch_graph.{outputs,structured_outputs} for each branch_graph in
  branch_graphs.

  Args:
    op_type: _COND or _CASE
    branch_graphs: `list` of `FuncGraph`

  Raises:
    TypeError: if a set of outputs cannot be rewritten.
  """
    # Note: since this is only used for gradient graphs, we do not expect the
    # outputs to be structured (e.g. nested lists), and thus do not need to use
    # nest.flatten, etc.
    assert branch_graphs
    branch_outputs = [g.structured_outputs for g in branch_graphs]
    outputs_per_branch = list(len(outs) for outs in branch_outputs)
    assert len(set(outputs_per_branch)) == 1, outputs_per_branch

    for output_idx, branch_outs in enumerate(zip(*branch_outputs)):
        if len(set(type(out) for out in branch_outs)) == 1:
            continue
        if not any(isinstance(out, ops.IndexedSlices) for out in branch_outs):
            continue
        for branch_idx, branch_out in enumerate(branch_outs):
            if isinstance(branch_out, ops.IndexedSlices):
                continue
            elif isinstance(branch_out, ops.Tensor):
                with branch_graphs[branch_idx].as_default():
                    branch_outputs[branch_idx][
                        output_idx] = math_ops._as_indexed_slices(branch_out)
            else:
                raise TypeError(
                    "Cannot reconcile {op_name} {output_idx}-th outputs:\n"
                    "  outputs from all branches: {outputs}".format(
                        op_name="tf.cond"
                        if op_type == _COND else "tf.switch_case",
                        output_idx=output_idx,
                        outputs=branch_outs))

    for branch_graph, branch_outs in zip(branch_graphs, branch_outputs):
        branch_graph.structured_outputs = branch_outs
        branch_graph.outputs = [
            t for t in func_graph_module.flatten(branch_outs) if t is not None
        ]
Ejemplo n.º 19
0
 def testIndexedSlicesToTensorList(self):
   with self.test_session():
     numpy_list = []
     dense_list = []
     sparse_list = []
     for _ in range(3):
       np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
       c = constant_op.constant(np_val)
       c_sparse = math_ops._as_indexed_slices(c)
       numpy_list.append(np_val)
       dense_list.append(c)
       sparse_list.append(c_sparse)
     packed_dense = array_ops.pack(dense_list)
     packed_sparse = array_ops.pack(sparse_list)
     self.assertAllClose(packed_dense.eval(), packed_sparse.eval())
 def testContainsIndexedSlices_List(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   self.assertTrue(cross_tower_utils.contains_indexed_slices([t0, t1]))
Ejemplo n.º 21
0
 def testContainsIndexedSlices_List(self):
     t0 = math_ops._as_indexed_slices(
         constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
     t1 = math_ops._as_indexed_slices(
         constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
     self.assertTrue(cross_tower_utils.contains_indexed_slices([t0, t1]))
 def testOneGradient(self):
     t = math_ops._as_indexed_slices(
         constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
     result = gradients_impl._AggregateIndexedSlicesGradients([t])
     self._assert_indexed_slices_equal(t, result)
Ejemplo n.º 23
0
 def false_fn():
   return (None, None, None,
           math_ops._as_indexed_slices(constant_op.constant([2.])))
Ejemplo n.º 24
0
 def true_fn():
   return (None, None, None,
           math_ops._as_indexed_slices(constant_op.constant([1.])))
 def testIsIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   self.assertTrue(cross_device_utils.contains_indexed_slices(t))
 def testContainsIndexedSlices_Tuple(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   self.assertTrue(cross_device_utils.contains_indexed_slices((t0, t1)))
 def testIsIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   self.assertTrue(cross_device_utils.contains_indexed_slices(t))
 def testContainsIndexedSlices_Tuple(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   self.assertTrue(cross_device_utils.contains_indexed_slices((t0, t1)))
Ejemplo n.º 29
0
 def testOneGradient(self):
   t = math_ops._as_indexed_slices(constant_op.constant(
       [[1., 2.], [0, 0], [3., 4.]]))
   result = gradients_impl._AggregateIndexedSlicesGradients([t])
   self._assert_indexed_slices_equal(t, result)