コード例 #1
0
 def testContainsIndexedSlices_PerReplica(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_replica = value_lib.PerReplica((t0, t1))
   self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica))
コード例 #2
0
 def testContainsIndexedSlices_PerDevice(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_device = value_lib.PerDevice({"/gpu:0": t0, "/cpu:0": t1})
   self.assertTrue(cross_tower_utils.contains_indexed_slices(per_device))
コード例 #3
0
 def testContainsIndexedSlices_PerReplica(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_replica = value_lib.PerReplica({"/gpu:0": t0, "/cpu:0": t1})
   self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica))
コード例 #4
0
 def testMultipleGradients(self):
     t0 = math_ops._as_indexed_slices(
         constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
     t1 = math_ops._as_indexed_slices(
         constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
     total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
     result = gradients_impl._AggregateIndexedSlicesGradients([t0, t1])
     self._assert_indexed_slices_equal(total, result)
コード例 #5
0
 def testAggregateIndexedSlices(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
   result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(total, result)
コード例 #6
0
 def testMultipleGradients(self):
   t0 = math_ops._as_indexed_slices(constant_op.constant(
       [[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(constant_op.constant(
       [[0., 0.], [5, 6], [7., 8.]]))
   total = constant_op.constant(
       [[1., 2.], [5, 6], [10., 12.]])
   result = gradients_impl._AggregateIndexedSlicesGradients([t0, t1])
   self._assert_indexed_slices_equal(total, result)
コード例 #7
0
 def testAggregateIndexedSlices(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
   result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(total, result)
コード例 #8
0
 def testContainsIndexedSlices_PerDeviceMapOutput(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_device = value_lib.PerDevice({
       "/gpu:0": value_lib.MapOutput([t0]),
       "/cpu:0": value_lib.MapOutput([t1])})
   self.assertTrue(cross_tower_utils.contains_indexed_slices(per_device))
コード例 #9
0
 def testContainsIndexedSlices_PerReplicaMapOutput(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_replica = value_lib.PerReplica({
       "/gpu:0": value_lib.MapOutput([t0]),
       "/cpu:0": value_lib.MapOutput([t1])})
   self.assertTrue(cross_tower_utils.contains_indexed_slices(per_replica))
コード例 #10
0
 def testDivideIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   n = 2
   expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
   result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(expected, result)
コード例 #11
0
 def testDivideIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   n = 2
   expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
   result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
   self.assertIsInstance(result, ops.IndexedSlices)
   self._assert_values_equal(expected, result)
コード例 #12
0
 def testIndexedSlicesToTensor(self):
   with self.test_session():
     np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
     c = constant_op.constant(np_val)
     c_sparse = math_ops._as_indexed_slices(c)
     self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
     c_dense = math_ops.mul(c_sparse, 1.0)
     self.assertAllClose(np_val, c_dense.eval())
コード例 #13
0
def _make_output_composite_tensors_match(true_graph, false_graph):
    """Modifies true_graph and false_graph so they have the same output signature.

  Currently the only transformation implemented is turning a Tensor into an
  equivalent IndexedSlices if the other branch returns an IndexedSlices.
  Updates {true,false}_graph.{outputs,structured_outputs}.

  Args:
    true_graph: FuncGraph
    false_graph: FuncGraph

  Raises:
    TypeError: if a pair of outputs cannot be rewritten.
  """
    # Note: since this is only used for gradient graphs, we do not expect the
    # outputs to be structured (e.g. nested lists), and thus do not need to use
    # nest.flatten, etc.
    true_outputs = list(true_graph.structured_outputs)
    false_outputs = list(false_graph.structured_outputs)
    assert len(true_outputs) == len(false_outputs)

    for idx, (true_out,
              false_out) in enumerate(zip(true_outputs, false_outputs)):
        if type(true_out) == type(false_out):  # pylint: disable=unidiomatic-typecheck
            continue
        if (isinstance(true_out, ops.IndexedSlices)
                and isinstance(false_out, ops.Tensor)):
            with false_graph.as_default():
                false_outputs[idx] = math_ops._as_indexed_slices(false_out)
        elif (isinstance(true_out, ops.Tensor)
              and isinstance(false_out, ops.IndexedSlices)):
            with true_graph.as_default():
                true_outputs[idx] = math_ops._as_indexed_slices(true_out)
        else:
            raise TypeError("Cannot reconcile tf.cond %i-th outputs:\n"
                            "  true_fn returned:  %s\n"
                            "  false_fn returned: %s" %
                            (idx, true_out, false_out))

    true_graph.structured_outputs = true_outputs
    true_graph.outputs = func_graph_module.flatten(true_outputs)
    false_graph.structured_outputs = false_outputs
    false_graph.outputs = func_graph_module.flatten(false_outputs)
コード例 #14
0
ファイル: cond_v2.py プロジェクト: terrytangyuan/tensorflow
def _make_output_composite_tensors_match(true_graph, false_graph):
  """Rewrites {true,false}_graph's outputs to use the same _TensorLike classes.

  Currently the only transformation implemented is turning a Tensor into an
  equivalent IndexedSlices if the other branch returns an IndexedSlices.
  Updates {true,false}_graph.{outputs,structured_outputs}.

  Args:
    true_graph: FuncGraph
    false_graph: FuncGraph

  Raises:
    TypeError: if a pair of outputs cannot be rewritten.
  """
  # Note: since this is only used for gradient graphs, we do not expect the
  # outputs to be structured (e.g. nested lists), and thus do not need to use
  # nest.flatten, etc.
  true_outputs = list(true_graph.structured_outputs)
  false_outputs = list(false_graph.structured_outputs)
  assert len(true_outputs) == len(false_outputs)

  for idx, (true_out, false_out) in enumerate(zip(true_outputs, false_outputs)):
    if type(true_out) == type(false_out):  # pylint: disable=unidiomatic-typecheck
      continue
    if (isinstance(true_out, ops.IndexedSlices) and
        isinstance(false_out, ops.Tensor)):
      with false_graph.as_default():
        false_outputs[idx] = math_ops._as_indexed_slices(false_out)
    elif (isinstance(true_out, ops.Tensor) and
          isinstance(false_out, ops.IndexedSlices)):
      with true_graph.as_default():
        true_outputs[idx] = math_ops._as_indexed_slices(true_out)
    else:
      raise TypeError(
          "Cannot reconcile tf.cond %i-th outputs:\n"
          "  true_fn returned:  %s\n"
          "  false_fn returned: %s" % (idx, true_out, false_out))

  true_graph.structured_outputs = true_outputs
  true_graph.outputs = func_graph_module.flatten(true_outputs)
  false_graph.structured_outputs = false_outputs
  false_graph.outputs = func_graph_module.flatten(false_outputs)
コード例 #15
0
 def testInt64Indices(self):
   with self.test_session():
     np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
     c = constant_op.constant(np_val)
     c_sparse = math_ops._as_indexed_slices(c)
     c_sparse = ops.IndexedSlices(
         c_sparse.values, math_ops.cast(c_sparse.indices, dtypes.int64),
         c_sparse.dense_shape)
     self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
     c_dense = math_ops.mul(c_sparse, 1.0)
     self.assertAllClose(np_val, c_dense.eval())
コード例 #16
0
  def testCopyIndexedSlices(self):
    with ops.device("/cpu:0"):
      t = math_ops._as_indexed_slices(
          constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
    destination = "/gpu:0"
    result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(
        t, destination)

    self.assertIsInstance(result, ops.IndexedSlices)
    self._assert_values_equal(t, result)
    self.assertEqual(device_util.resolve(destination),
                     device_util.resolve(result.device))
コード例 #17
0
  def testCopyIndexedSlices(self):
    with ops.device("/cpu:0"):
      t = math_ops._as_indexed_slices(
          constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
    destination = "/gpu:0"
    result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(
        t, destination)

    self.assertIsInstance(result, ops.IndexedSlices)
    self._assert_values_equal(t, result)
    self.assertEqual(
        device_util.resolve(destination), device_util.resolve(result.device))
コード例 #18
0
ファイル: cond_v2.py プロジェクト: wangjunbo2000/wangjb
def _make_output_composite_tensors_match(op_type, branch_graphs):
    """Modifies each branch_graph's outputs to have the same output signature.

  Currently the only transformation implemented is turning a Tensor into an
  equivalent IndexedSlices if the other branch returns an IndexedSlices.
  Updates branch_graph.{outputs,structured_outputs} for each branch_graph in
  branch_graphs.

  Args:
    op_type: _COND or _CASE
    branch_graphs: `list` of `FuncGraph`

  Raises:
    TypeError: if a set of outputs cannot be rewritten.
  """
    # Note: since this is only used for gradient graphs, we do not expect the
    # outputs to be structured (e.g. nested lists), and thus do not need to use
    # nest.flatten, etc.
    assert branch_graphs
    branch_outputs = [g.structured_outputs for g in branch_graphs]
    outputs_per_branch = list(len(outs) for outs in branch_outputs)
    assert len(set(outputs_per_branch)) == 1, outputs_per_branch

    for output_idx, branch_outs in enumerate(zip(*branch_outputs)):
        if len(set(type(out) for out in branch_outs)) == 1:
            continue
        if not any(isinstance(out, ops.IndexedSlices) for out in branch_outs):
            continue
        for branch_idx, branch_out in enumerate(branch_outs):
            if isinstance(branch_out, ops.IndexedSlices):
                continue
            elif isinstance(branch_out, ops.Tensor):
                with branch_graphs[branch_idx].as_default():
                    branch_outputs[branch_idx][
                        output_idx] = math_ops._as_indexed_slices(branch_out)
            else:
                raise TypeError(
                    "Cannot reconcile {op_name} {output_idx}-th outputs:\n"
                    "  outputs from all branches: {outputs}".format(
                        op_name="tf.cond"
                        if op_type == _COND else "tf.switch_case",
                        output_idx=output_idx,
                        outputs=branch_outs))

    for branch_graph, branch_outs in zip(branch_graphs, branch_outputs):
        branch_graph.structured_outputs = branch_outs
        branch_graph.outputs = [
            t for t in func_graph_module.flatten(branch_outs) if t is not None
        ]
コード例 #19
0
 def testIndexedSlicesToTensorList(self):
   with self.test_session():
     numpy_list = []
     dense_list = []
     sparse_list = []
     for _ in range(3):
       np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
       c = constant_op.constant(np_val)
       c_sparse = math_ops._as_indexed_slices(c)
       numpy_list.append(np_val)
       dense_list.append(c)
       sparse_list.append(c_sparse)
     packed_dense = array_ops.pack(dense_list)
     packed_sparse = array_ops.pack(sparse_list)
     self.assertAllClose(packed_dense.eval(), packed_sparse.eval())
コード例 #20
0
 def testContainsIndexedSlices_List(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   self.assertTrue(cross_tower_utils.contains_indexed_slices([t0, t1]))
コード例 #21
0
 def testContainsIndexedSlices_List(self):
     t0 = math_ops._as_indexed_slices(
         constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
     t1 = math_ops._as_indexed_slices(
         constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
     self.assertTrue(cross_tower_utils.contains_indexed_slices([t0, t1]))
コード例 #22
0
 def testOneGradient(self):
     t = math_ops._as_indexed_slices(
         constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
     result = gradients_impl._AggregateIndexedSlicesGradients([t])
     self._assert_indexed_slices_equal(t, result)
コード例 #23
0
 def false_fn():
   return (None, None, None,
           math_ops._as_indexed_slices(constant_op.constant([2.])))
コード例 #24
0
 def true_fn():
   return (None, None, None,
           math_ops._as_indexed_slices(constant_op.constant([1.])))
コード例 #25
0
 def testIsIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   self.assertTrue(cross_device_utils.contains_indexed_slices(t))
コード例 #26
0
 def testContainsIndexedSlices_Tuple(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   self.assertTrue(cross_device_utils.contains_indexed_slices((t0, t1)))
コード例 #27
0
 def testIsIndexedSlices(self):
   t = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   self.assertTrue(cross_device_utils.contains_indexed_slices(t))
コード例 #28
0
 def testContainsIndexedSlices_Tuple(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   self.assertTrue(cross_device_utils.contains_indexed_slices((t0, t1)))
コード例 #29
0
 def testOneGradient(self):
   t = math_ops._as_indexed_slices(constant_op.constant(
       [[1., 2.], [0, 0], [3., 4.]]))
   result = gradients_impl._AggregateIndexedSlicesGradients([t])
   self._assert_indexed_slices_equal(t, result)