コード例 #1
0
  def testFromTensorsMixed(self):
    """Test an dataset that represents a single tuple of tensors."""
    components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0]]),
                      values=np.array([0]),
                      dense_shape=np.array([1])),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 1]]),
                      values=np.array([-1, 1]),
                      dense_shape=np.array([2, 2])))

    iterator = (
        dataset_ops.Dataset.from_tensors(components)
        .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    self.assertEqual([
        tensor_shape.TensorShape(c.dense_shape)
        if sparse_tensor.is_sparse(c) else c.shape for c in components
    ], [shape for shape in iterator.output_shapes])

    with self.test_session() as sess:
      sess.run(init_op)
      results = sess.run(get_next)
      for component, result_component in zip(components, results):
        if sparse_tensor.is_sparse(component):
          self.assertSparseValuesEqual(component, result_component)
        else:
          self.assertAllEqual(component, result_component)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #2
0
  def testFromTensorsMixed(self):
    """Test an dataset that represents a single tuple of tensors."""
    components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0]]),
                      values=np.array([0]),
                      dense_shape=np.array([1])),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 1]]),
                      values=np.array([-1, 1]),
                      dense_shape=np.array([2, 2])))

    iterator = (
        dataset_ops.Dataset.from_tensors(components)
        .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    self.assertEqual([
        tensor_shape.TensorShape(c.dense_shape)
        if sparse_tensor.is_sparse(c) else c.shape for c in components
    ], [shape for shape in iterator.output_shapes])

    with self.test_session() as sess:
      sess.run(init_op)
      results = sess.run(get_next)
      for component, result_component in zip(components, results):
        if sparse_tensor.is_sparse(component):
          self.assertSparseValuesEqual(component, result_component)
        else:
          self.assertAllEqual(component, result_component)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #3
0
  def testFromTensorSlicesMixed(self):
    """Test a dataset that represents the slices from a tuple of tensors."""
    components = (np.tile(np.array([[1], [2], [3]]), 20),
                  np.tile(np.array([[12], [13], [14]]), 22),
                  np.array([37.0, 38.0, 39.0]),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 0], [2, 0]]),
                      values=np.array([0, 0, 0]),
                      dense_shape=np.array([3, 1])),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 1], [2, 2]]),
                      values=np.array([1, 2, 3]),
                      dense_shape=np.array([3, 3])))

    dataset = dataset_ops.Dataset.from_tensor_slices(components)
    get_next = self.getNext(dataset)
    self.assertEqual([
        tensor_shape.TensorShape(c.dense_shape[1:])
        if sparse_tensor.is_sparse(c) else c.shape[1:] for c in components
    ], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])

    expected = [
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[0]]),
             values=np.array([1]),
             dense_shape=np.array([3]))),
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[1]]),
             values=np.array([2]),
             dense_shape=np.array([3]))),
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[2]]),
             values=np.array([3]),
             dense_shape=np.array([3]))),
    ]
    for i in range(3):
      results = self.evaluate(get_next())
      for component, result_component in zip(
          (list(zip(*components[:3]))[i] + expected[i]), results):
        if sparse_tensor.is_sparse(component):
          self.assertSparseValuesEqual(component, result_component)
        else:
          self.assertAllEqual(component, result_component)
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(get_next())
コード例 #4
0
 def testIsSparse(self):
   self.assertFalse(sparse_tensor.is_sparse(3))
   self.assertFalse(sparse_tensor.is_sparse("foo"))
   self.assertFalse(sparse_tensor.is_sparse(np.array(3)))
   self.assertTrue(
       sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))
   self.assertTrue(
       sparse_tensor.is_sparse(
           sparse_tensor.SparseTensorValue([[0]], [0], [1])))
コード例 #5
0
    def testFromTensorSlicesMixed(self):
        """Test a dataset that represents the slices from a tuple of tensors."""
        components = (np.tile(np.array([[1], [2], [3]]),
                              20), np.tile(np.array([[12], [13], [14]]),
                                           22), np.array([37.0, 38.0, 39.0]),
                      sparse_tensor.SparseTensorValue(
                          indices=np.array([[0, 0], [1, 0], [2, 0]]),
                          values=np.array([0, 0, 0]),
                          dense_shape=np.array([3, 1])),
                      sparse_tensor.SparseTensorValue(
                          indices=np.array([[0, 0], [1, 1], [2, 2]]),
                          values=np.array([1, 2, 3]),
                          dense_shape=np.array([3, 3])))

        iterator = (dataset_ops.Dataset.from_tensor_slices(
            components).make_initializable_iterator())
        init_op = iterator.initializer
        get_next = iterator.get_next()

        self.assertEqual([
            tensor_shape.TensorShape(c.dense_shape[1:])
            if sparse_tensor.is_sparse(c) else c.shape[1:] for c in components
        ], [shape for shape in iterator.output_shapes])

        with self.test_session() as sess:
            sess.run(init_op)
            expected = [
                (sparse_tensor.SparseTensorValue(indices=np.array([[0]]),
                                                 values=np.array([0]),
                                                 dense_shape=np.array([1])),
                 sparse_tensor.SparseTensorValue(indices=np.array([[0]]),
                                                 values=np.array([1]),
                                                 dense_shape=np.array([3]))),
                (sparse_tensor.SparseTensorValue(indices=np.array([[0]]),
                                                 values=np.array([0]),
                                                 dense_shape=np.array([1])),
                 sparse_tensor.SparseTensorValue(indices=np.array([[1]]),
                                                 values=np.array([2]),
                                                 dense_shape=np.array([3]))),
                (sparse_tensor.SparseTensorValue(indices=np.array([[0]]),
                                                 values=np.array([0]),
                                                 dense_shape=np.array([1])),
                 sparse_tensor.SparseTensorValue(indices=np.array([[2]]),
                                                 values=np.array([3]),
                                                 dense_shape=np.array([3]))),
            ]
            for i in range(3):
                results = sess.run(get_next)
                for component, result_component in zip(
                    (zip(*components[:3])[i] + expected[i]), results):
                    if sparse_tensor.is_sparse(component):
                        self.assertSparseValuesEqual(component,
                                                     result_component)
                    else:
                        self.assertAllEqual(component, result_component)
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
コード例 #6
0
  def testFromTensorSlicesMixed(self):
    """Test a dataset that represents the slices from a tuple of tensors."""
    components = (np.tile(np.array([[1], [2], [3]]), 20),
                  np.tile(np.array([[12], [13], [14]]), 22),
                  np.array([37.0, 38.0, 39.0]),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 0], [2, 0]]),
                      values=np.array([0, 0, 0]),
                      dense_shape=np.array([3, 1])),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 1], [2, 2]]),
                      values=np.array([1, 2, 3]),
                      dense_shape=np.array([3, 3])))

    dataset = dataset_ops.Dataset.from_tensor_slices(components)
    get_next = self.getNext(dataset)
    self.assertEqual([
        tensor_shape.TensorShape(c.dense_shape[1:])
        if sparse_tensor.is_sparse(c) else c.shape[1:] for c in components
    ], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])

    expected = [
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[0]]),
             values=np.array([1]),
             dense_shape=np.array([3]))),
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[1]]),
             values=np.array([2]),
             dense_shape=np.array([3]))),
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[2]]),
             values=np.array([3]),
             dense_shape=np.array([3]))),
    ]
    for i in range(3):
      results = self.evaluate(get_next())
      for component, result_component in zip(
          (list(zip(*components[:3]))[i] + expected[i]), results):
        if sparse_tensor.is_sparse(component):
          self.assertSparseValuesEqual(component, result_component)
        else:
          self.assertAllEqual(component, result_component)
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(get_next())
コード例 #7
0
 def testIsSparse(self):
   self.assertFalse(sparse_tensor.is_sparse(3))
   self.assertFalse(sparse_tensor.is_sparse("foo"))
   self.assertFalse(sparse_tensor.is_sparse(np.array(3)))
   self.assertTrue(
       sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))
   self.assertTrue(
       sparse_tensor.is_sparse(
           sparse_tensor.SparseTensorValue([[0]], [0], [1])))
コード例 #8
0
  def testBatchSparseWithDifferentDenseShapes(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=array_ops.expand_dims(
              math_ops.range(i, dtype=dtypes.int64), 1),
          values=array_ops.fill([math_ops.to_int32(i)], i),
          dense_shape=[i])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).batch(
        5).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.test_session() as sess:
      sess.run(init_op)
      for i in range(2):
        actual = sess.run(get_next)
        expected_indices = []
        expected_values = []
        for j in range(5):
          for k in range(i * 5 + j):
            expected_indices.append([j, k])
            expected_values.append(i * 5 + j)
        expected = sparse_tensor.SparseTensorValue(
            indices=expected_indices,
            values=expected_values,
            dense_shape=[5, (i + 1) * 5 - 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #9
0
ファイル: test_base.py プロジェクト: qwerzou1/shibie
 def _compareOutputToExpected(self, result_values, expected_values):
     for i in range(len(result_values)):
         if sparse_tensor.is_sparse(result_values[i]):
             self.assertSparseValuesEqual(result_values[i],
                                          expected_values[i])
         else:
             self.assertAllEqual(result_values[i], expected_values[i])
コード例 #10
0
        def tf_finalize_func(*args):
            """A wrapper for Defun that facilitates shape inference."""
            for arg, shape in zip(
                    args,
                    nest.flatten(
                        sparse.as_dense_shapes(self._state_shapes,
                                               self._state_classes))):
                arg.set_shape(shape)

            nested_args = nest.pack_sequence_as(self._state_types, args)
            nested_args = sparse.deserialize_sparse_tensors(
                nested_args, self._state_types, self._state_shapes,
                self._state_classes)

            ret = finalize_func(nested_args)

            # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
            # values to tensors.
            ret = nest.pack_sequence_as(ret, [
                sparse_tensor.SparseTensor.from_value(t)
                if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(t)
                for t in nest.flatten(ret)
            ])

            self._output_classes = sparse.get_classes(ret)
            self._output_shapes = nest.pack_sequence_as(
                ret, [t.get_shape() for t in nest.flatten(ret)])
            self._output_types = nest.pack_sequence_as(
                ret, [t.dtype for t in nest.flatten(ret)])

            # Serialize any sparse tensors.
            ret = nest.pack_sequence_as(ret, [
                t for t in nest.flatten(sparse.serialize_sparse_tensors(ret))
            ])
            return nest.flatten(ret)
コード例 #11
0
ファイル: structure.py プロジェクト: aritratony/tensorflow
def normalize_tensors(tensors):
  """Converts a nested structure of tensor-like objects to tensors.

  * `SparseTensor`-like inputs are converted to `SparseTensor`.
  * `TensorArray` inputs are passed through.
  * Everything else is converted to a dense `Tensor`.

  Args:
    tensors: A nested structure of tensor-like, list,
      `SparseTensor`, `SparseTensorValue`, or `TensorArray` objects.

  Returns:
    A nested structure of tensor, `SparseTensor`, or `TensorArray` objects.
  """
  flat_tensors = nest.flatten(tensors)
  prepared = []
  with ops.name_scope("normalize_tensors"):
    for i, t in enumerate(flat_tensors):
      if sparse_tensor_lib.is_sparse(t):
        prepared.append(sparse_tensor_lib.SparseTensor.from_value(t))
      elif ragged_tensor.is_ragged(t):
        prepared.append(
            ragged_tensor.convert_to_tensor_or_ragged_tensor(
                t, name="component_%d" % i))
      elif isinstance(t, tensor_array_ops.TensorArray):
        prepared.append(t)
      else:
        prepared.append(ops.convert_to_tensor(t, name="component_%d" % i))
  return nest.pack_sequence_as(tensors, prepared)
コード例 #12
0
  def testSlideSparseWithDifferentDenseShapes(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=array_ops.expand_dims(
              math_ops.range(i, dtype=dtypes.int64), 1),
          values=array_ops.fill([math_ops.to_int32(i)], i),
          dense_shape=[i])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
        sliding.sliding_window_batch(
            window_size=5, window_shift=3)).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(init_op)
      num_batches = (10 - 5) // 3 + 1
      for i in range(num_batches):
        actual = sess.run(get_next)
        expected_indices = []
        expected_values = []
        for j in range(5):
          for k in range(i * 3 + j):
            expected_indices.append([j, k])
            expected_values.append(i * 3 + j)
        expected = sparse_tensor.SparseTensorValue(
            indices=expected_indices,
            values=expected_values,
            dense_shape=[5, i * 3 + 5 - 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #13
0
    def testWindowSparse(self):
        def _sparse(i):
            return sparse_tensor.SparseTensorValue(indices=[[0]],
                                                   values=(i * [1]),
                                                   dense_shape=[1])

        iterator = dataset_ops.Dataset.range(10).map(_sparse).window(
            size=5, shift=3, drop_remainder=True).flat_map(
                lambda x: x.batch(batch_size=5)).make_initializable_iterator()
        init_op = iterator.initializer
        get_next = iterator.get_next()

        with self.cached_session() as sess:
            self.evaluate(init_op)
            num_batches = (10 - 5) // 3 + 1
            for i in range(num_batches):
                actual = self.evaluate(get_next)
                expected = sparse_tensor.SparseTensorValue(
                    indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
                    values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
                    dense_shape=[5, 1])
                self.assertTrue(sparse_tensor.is_sparse(actual))
                self.assertSparseValuesEqual(actual, expected)
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
コード例 #14
0
ファイル: grouping.py プロジェクト: Jackiefan/tensorflow
    def tf_finalize_func(*args):
      """A wrapper for Defun that facilitates shape inference."""
      for arg, shape in zip(
          args,
          nest.flatten(
              sparse.as_dense_shapes(self._state_shapes, self._state_classes))):
        arg.set_shape(shape)

      nested_args = nest.pack_sequence_as(self._state_types, args)
      nested_args = sparse.deserialize_sparse_tensors(
          nested_args, self._state_types, self._state_shapes,
          self._state_classes)

      ret = finalize_func(nested_args)

      # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
      # values to tensors.
      ret = nest.pack_sequence_as(ret, [
          sparse_tensor.SparseTensor.from_value(t)
          if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(t)
          for t in nest.flatten(ret)
      ])

      self._output_classes = sparse.get_classes(ret)
      self._output_shapes = nest.pack_sequence_as(
          ret, [t.get_shape() for t in nest.flatten(ret)])
      self._output_types = nest.pack_sequence_as(
          ret, [t.dtype for t in nest.flatten(ret)])

      # Serialize any sparse tensors.
      ret = nest.pack_sequence_as(
          ret, [t for t in nest.flatten(sparse.serialize_sparse_tensors(ret))])
      return nest.flatten(ret)
コード例 #15
0
ファイル: test_base.py プロジェクト: wang002/tensorflow_w
    def assertDatasetsEqual(self, dataset1, dataset2):
        """Checks that datasets are equal. Supports both graph and eager mode."""
        self.assertTrue(
            structure.are_compatible(dataset_ops.get_structure(dataset1),
                                     dataset_ops.get_structure(dataset2)))

        flattened_types = nest.flatten(
            dataset_ops.get_legacy_output_types(dataset1))

        next1 = self.getNext(dataset1)
        next2 = self.getNext(dataset2)

        while True:
            try:
                op1 = self.evaluate(next1())
            except errors.OutOfRangeError:
                with self.assertRaises(errors.OutOfRangeError):
                    self.evaluate(next2())
                break
            op2 = self.evaluate(next2())

            op1 = nest.flatten(op1)
            op2 = nest.flatten(op2)
            assert len(op1) == len(op2)
            for i in range(len(op1)):
                if sparse_tensor.is_sparse(op1[i]) or ragged_tensor.is_ragged(
                        op1[i]):
                    self.assertValuesEqual(op1[i], op2[i])
                elif flattened_types[i] == dtypes.string:
                    self.assertAllEqual(op1[i], op2[i])
                else:
                    self.assertAllClose(op1[i], op2[i])
コード例 #16
0
ファイル: test_base.py プロジェクト: adit-chandra/tensorflow
  def assertDatasetsEqual(self, dataset1, dataset2):
    """Checks that datasets are equal. Supports both graph and eager mode."""
    self.assertTrue(dataset_ops.get_structure(dataset1).is_compatible_with(
        dataset_ops.get_structure(dataset2)))
    self.assertTrue(dataset_ops.get_structure(dataset2).is_compatible_with(
        dataset_ops.get_structure(dataset1)))
    flattened_types = nest.flatten(
        dataset_ops.get_legacy_output_types(dataset1))

    next1 = self.getNext(dataset1)
    next2 = self.getNext(dataset2)

    while True:
      try:
        op1 = self.evaluate(next1())
      except errors.OutOfRangeError:
        with self.assertRaises(errors.OutOfRangeError):
          self.evaluate(next2())
        break
      op2 = self.evaluate(next2())

      op1 = nest.flatten(op1)
      op2 = nest.flatten(op2)
      assert len(op1) == len(op2)
      for i in range(len(op1)):
        if sparse_tensor.is_sparse(op1[i]):
          self.assertSparseValuesEqual(op1[i], op2[i])
        elif flattened_types[i] == dtypes.string:
          self.assertAllEqual(op1[i], op2[i])
        else:
          self.assertAllClose(op1[i], op2[i])
コード例 #17
0
ファイル: grouping.py プロジェクト: zhuyangda/tensorflow
        def tf_init_func(key):
            """A wrapper for Defun that facilitates shape inference."""
            key.set_shape([])
            ret = init_func(key)
            # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
            # values to tensors.
            ret = nest.pack_sequence_as(ret, [
                sparse_tensor.SparseTensor.from_value(t)
                if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(t)
                for t in nest.flatten(ret)
            ])

            self._state_classes = sparse.get_classes(ret)
            self._state_shapes = nest.pack_sequence_as(
                ret, [t.get_shape() for t in nest.flatten(ret)])
            self._state_types = nest.pack_sequence_as(
                ret, [t.dtype for t in nest.flatten(ret)])

            dataset_ops._warn_if_collections(
                "tf.contrib.data.group_by_reducer()")  # pylint: disable=protected-access

            # Serialize any sparse tensors.
            ret = nest.pack_sequence_as(ret, [
                t for t in nest.flatten(sparse.serialize_sparse_tensors(ret))
            ])
            return nest.flatten(ret)
コード例 #18
0
ファイル: optional_ops.py プロジェクト: AnishShah/tensorflow
  def from_value(value):
    """Returns an `Optional` that wraps the given value.

    Args:
      value: A nested structure of `tf.Tensor` and/or `tf.SparseTensor` objects.

    Returns:
      An `Optional` that wraps `value`.
    """
    # TODO(b/110122868): Consolidate this destructuring logic with the
    # similar code in `Dataset.from_tensors()`.
    with ops.name_scope("optional") as scope:
      with ops.name_scope("value"):
        value = nest.pack_sequence_as(value, [
            sparse_tensor_lib.SparseTensor.from_value(t)
            if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor(
                t, name="component_%d" % i)
            for i, t in enumerate(nest.flatten(value))
        ])

      encoded_value = nest.flatten(sparse.serialize_sparse_tensors(value))
      output_classes = sparse.get_classes(value)
      output_shapes = nest.pack_sequence_as(
          value, [t.get_shape() for t in nest.flatten(value)])
      output_types = nest.pack_sequence_as(
          value, [t.dtype for t in nest.flatten(value)])

    return _OptionalImpl(
        gen_dataset_ops.optional_from_value(encoded_value, name=scope),
        output_shapes, output_types, output_classes)
コード例 #19
0
ファイル: structure.py プロジェクト: flavz27/master_PA
def normalize_tensors(tensors):
    """Converts a nested structure of tensor-like objects to tensors.

  * `SparseTensor`-like inputs are converted to `SparseTensor`.
  * `TensorArray` inputs are passed through.
  * Everything else is converted to a dense `Tensor`.

  Args:
    tensors: A nested structure of tensor-like, list,
      `SparseTensor`, `SparseTensorValue`, or `TensorArray` objects.

  Returns:
    A nested structure of tensor, `SparseTensor`, or `TensorArray` objects.
  """
    flat_tensors = nest.flatten(tensors)
    prepared = []
    with ops.name_scope("normalize_tensors"):
        for i, t in enumerate(flat_tensors):
            if sparse_tensor_lib.is_sparse(t):
                prepared.append(sparse_tensor_lib.SparseTensor.from_value(t))
            elif ragged_tensor.is_ragged(t):
                prepared.append(
                    ragged_tensor.convert_to_tensor_or_ragged_tensor(
                        t, name="component_%d" % i))
            elif isinstance(t, tensor_array_ops.TensorArray):
                prepared.append(t)
            else:
                prepared.append(
                    ops.convert_to_tensor(t, name="component_%d" % i))
    return nest.pack_sequence_as(tensors, prepared)
コード例 #20
0
ファイル: test_base.py プロジェクト: FedericoFontana/ray
    def assertDatasetsEqual(self, dataset1, dataset2):
        """Checks that datasets are equal. Supports both graph and eager mode."""
        self.assertEqual(dataset1.output_types, dataset2.output_types)
        self.assertEqual(dataset1.output_classes, dataset2.output_classes)
        flattened_types = nest.flatten(dataset1.output_types)

        next1 = self.getNext(dataset1)
        next2 = self.getNext(dataset2)
        while True:
            try:
                op1 = self.evaluate(next1())
            except errors.OutOfRangeError:
                with self.assertRaises(errors.OutOfRangeError):
                    self.evaluate(next2())
                break
            op2 = self.evaluate(next2())

            op1 = nest.flatten(op1)
            op2 = nest.flatten(op2)
            assert len(op1) == len(op2)
            for i in range(len(op1)):
                if sparse_tensor.is_sparse(op1[i]):
                    self.assertSparseValuesEqual(op1[i], op2[i])
                elif flattened_types[i] == dtypes.string:
                    self.assertAllEqual(op1[i], op2[i])
                else:
                    self.assertAllClose(op1[i], op2[i])
コード例 #21
0
ファイル: structure_test.py プロジェクト: zzk88862/tensorflow
    def testToBatchedTensorList(self, value_fn, element_0_fn):
        batched_value = value_fn()
        s = structure.Structure.from_value(batched_value)
        batched_tensor_list = s._to_batched_tensor_list(batched_value)

        # The batch dimension is 2 for all of the test cases.
        # NOTE(mrry): `tf.shape()` does not currently work for the DT_VARIANT
        # tensors in which we store sparse tensors.
        for t in batched_tensor_list:
            if t.dtype != dtypes.variant:
                self.assertEqual(2, self.evaluate(array_ops.shape(t)[0]))

        # Test that the 0th element from the unbatched tensor is equal to the
        # expected value.
        expected_element_0 = self.evaluate(element_0_fn())
        unbatched_s = s._unbatch()
        actual_element_0 = unbatched_s._from_tensor_list(
            [t[0] for t in batched_tensor_list])

        for expected, actual in zip(nest.flatten(expected_element_0),
                                    nest.flatten(actual_element_0)):
            if sparse_tensor.is_sparse(expected):
                self.assertSparseValuesEqual(expected, actual)
            else:
                self.assertAllEqual(expected, actual)
コード例 #22
0
  def testSlideSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
        sliding.sliding_window_batch(
            window_size=5, window_shift=3)).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(init_op)
      num_batches = (10 - 5) // 3 + 1
      for i in range(num_batches):
        actual = sess.run(get_next)
        expected = sparse_tensor.SparseTensorValue(
            indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
            values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
            dense_shape=[5, 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #23
0
ファイル: optional_ops.py プロジェクト: zpdcqu/tensorflow
  def from_value(value):
    """Returns an `Optional` that wraps the given value.

    Args:
      value: A nested structure of `tf.Tensor` and/or `tf.SparseTensor` objects.

    Returns:
      An `Optional` that wraps `value`.
    """
    # TODO(b/110122868): Consolidate this destructuring logic with the
    # similar code in `Dataset.from_tensors()`.
    with ops.name_scope("optional") as scope:
      with ops.name_scope("value"):
        value = nest.pack_sequence_as(value, [
            sparse_tensor_lib.SparseTensor.from_value(t)
            if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor(
                t, name="component_%d" % i)
            for i, t in enumerate(nest.flatten(value))
        ])

      encoded_value = nest.flatten(sparse.serialize_sparse_tensors(value))
      output_classes = sparse.get_classes(value)
      output_shapes = nest.pack_sequence_as(
          value, [t.get_shape() for t in nest.flatten(value)])
      output_types = nest.pack_sequence_as(
          value, [t.dtype for t in nest.flatten(value)])

    return _OptionalImpl(
        gen_dataset_ops.optional_from_value(encoded_value, name=scope),
        output_shapes, output_types, output_classes)
コード例 #24
0
    def testMapAndBatchSparse(self, numa_aware):
        def _sparse(i):
            return sparse_tensor.SparseTensorValue(indices=[[0]],
                                                   values=(i * [1]),
                                                   dense_shape=[1])

        dataset = dataset_ops.Dataset.range(10).apply(
            batching.map_and_batch(_sparse, 5))
        if numa_aware:
            options = dataset_ops.Options()
            options.experimental_numa_aware = True
            dataset = dataset.with_options(options)
        iterator = dataset.make_initializable_iterator()

        init_op = iterator.initializer
        get_next = iterator.get_next()

        with self.cached_session() as sess:
            self.evaluate(init_op)
            for i in range(2):
                actual = self.evaluate(get_next)
                expected = sparse_tensor.SparseTensorValue(
                    indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
                    values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
                    dense_shape=[5, 1])
                self.assertTrue(sparse_tensor.is_sparse(actual))
                self.assertSparseValuesEqual(actual, expected)
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
コード例 #25
0
ファイル: grouping.py プロジェクト: xman/tensorflow
    def tf_finalize_func(*args):
      """A wrapper for Defun that facilitates shape inference."""
      for arg, shape in zip(
          args,
          nest.flatten(
              sparse.as_dense_shapes(self._state_shapes, self._state_classes))):
        arg.set_shape(shape)

      nested_args = nest.pack_sequence_as(self._state_types, args)
      nested_args = sparse.deserialize_sparse_tensors(
          nested_args, self._state_types, self._state_shapes,
          self._state_classes)

      ret = finalize_func(nested_args)

      # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
      # values to tensors.
      ret = nest.pack_sequence_as(ret, [
          sparse_tensor.SparseTensor.from_value(t)
          if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(t)
          for t in nest.flatten(ret)
      ])

      self._output_classes = sparse.get_classes(ret)
      self._output_shapes = nest.pack_sequence_as(
          ret, [t.get_shape() for t in nest.flatten(ret)])
      self._output_types = nest.pack_sequence_as(
          ret, [t.dtype for t in nest.flatten(ret)])

      dataset_ops._warn_if_collections("tf.contrib.data.group_by_reducer()")  # pylint: disable=protected-access

      # Serialize any sparse tensors.
      ret = nest.pack_sequence_as(
          ret, [t for t in nest.flatten(sparse.serialize_sparse_tensors(ret))])
      return nest.flatten(ret)
コード例 #26
0
  def testToBatchedTensorList(self, value_fn, element_0_fn):
    batched_value = value_fn()
    s = structure.Structure.from_value(batched_value)
    batched_tensor_list = s._to_batched_tensor_list(batched_value)

    # The batch dimension is 2 for all of the test cases.
    # NOTE(mrry): `tf.shape()` does not currently work for the DT_VARIANT
    # tensors in which we store sparse tensors.
    for t in batched_tensor_list:
      if t.dtype != dtypes.variant:
        self.assertEqual(2, self.evaluate(array_ops.shape(t)[0]))

    # Test that the 0th element from the unbatched tensor is equal to the
    # expected value.
    expected_element_0 = self.evaluate(element_0_fn())
    unbatched_s = s._unbatch()
    actual_element_0 = unbatched_s._from_tensor_list(
        [t[0] for t in batched_tensor_list])

    for expected, actual in zip(
        nest.flatten(expected_element_0), nest.flatten(actual_element_0)):
      if sparse_tensor.is_sparse(expected):
        self.assertSparseValuesEqual(expected, actual)
      elif ragged_tensor.is_ragged(expected):
        self.assertRaggedEqual(expected, actual)
      else:
        self.assertAllEqual(expected, actual)
コード例 #27
0
  def testMapAndBatchSparse(self, numa_aware):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    dataset = dataset_ops.Dataset.range(10).apply(
        batching.map_and_batch(_sparse, 5))
    if numa_aware:
      options = dataset_ops.Options()
      options.experimental_numa_aware = True
      dataset = dataset.with_options(options)
    iterator = dataset_ops.make_initializable_iterator(dataset)

    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      self.evaluate(init_op)
      for i in range(2):
        actual = self.evaluate(get_next)
        expected = sparse_tensor.SparseTensorValue(
            indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
            values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
            dense_shape=[5, 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        self.evaluate(get_next)
コード例 #28
0
  def testWindowSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).window(
        size=5, shift=3, drop_remainder=True).flat_map(
            lambda x: x.batch(batch_size=5)).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      self.evaluate(init_op)
      num_batches = (10 - 5) // 3 + 1
      for i in range(num_batches):
        actual = self.evaluate(get_next)
        expected = sparse_tensor.SparseTensorValue(
            indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
            values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
            dense_shape=[5, 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #29
0
 def _ragged_to_sparse(self, t):
     if ragged_tensor.is_ragged(t):
         return ragged_tensor.convert_to_tensor_or_ragged_tensor(
             t).to_sparse()
     elif sparse_tensor.is_sparse(t):
         return sparse_tensor.SparseTensor.from_value(t)
     else:
         return ops.convert_to_tensor(t)
コード例 #30
0
ファイル: test_base.py プロジェクト: wang002/tensorflow_w
 def assertValuesEqual(self, expected, actual):
     """Asserts that two values are equal."""
     if sparse_tensor.is_sparse(expected):
         self.assertAllEqual(expected.indices, actual.indices)
         self.assertAllEqual(expected.values, actual.values)
         self.assertAllEqual(expected.dense_shape, actual.dense_shape)
     else:
         self.assertAllEqual(expected, actual)
コード例 #31
0
  def testFromTensorSlicesMixedRagged(self):
    components = (np.tile(np.array([[1], [2], [3]]),
                          20), np.tile(np.array([[12], [13], [14]]),
                                       22), np.array([37.0, 38.0, 39.0]),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 0], [2, 0]]),
                      values=np.array([0, 0, 0]),
                      dense_shape=np.array([3, 1])),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 1], [2, 2]]),
                      values=np.array([1, 2, 3]),
                      dense_shape=np.array([3, 3])),
                  ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))

    dataset = dataset_ops.Dataset.from_tensor_slices(components)
    get_next = self.getNext(dataset)

    expected = [
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[0]]),
             values=np.array([1]),
             dense_shape=np.array([3])), ragged_factory_ops.constant_value([[0]
                                                                           ])),
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[1]]),
             values=np.array([2]),
             dense_shape=np.array([3])), ragged_factory_ops.constant_value([[1]
                                                                           ])),
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[2]]),
             values=np.array([3]),
             dense_shape=np.array([3])), ragged_factory_ops.constant_value([[2]
                                                                           ])),
    ]
    for i in range(3):
      results = self.evaluate(get_next())
      for component, result_component in zip(
          (list(zip(*components[:3]))[i] + expected[i]), results):
        if sparse_tensor.is_sparse(component):
          self.assertSparseValuesEqual(component, result_component)
        elif ragged_tensor.is_ragged(component):
          self.assertRaggedEqual(component, result_component)
        else:
          self.assertAllEqual(component, result_component)
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(get_next())
コード例 #32
0
            def tf_reduce_func(*args):
                """A wrapper for Defun that facilitates shape inference."""
                for arg, shape in zip(
                        args,
                        nest.flatten(
                            sparse.as_dense_shapes(self._state_shapes,
                                                   self._state_classes)) +
                        nest.flatten(
                            sparse.as_dense_shapes(
                                input_dataset.output_shapes,
                                input_dataset.output_classes))):
                    arg.set_shape(shape)

                pivot = len(nest.flatten(self._state_shapes))
                nested_state_args = nest.pack_sequence_as(
                    self._state_types, args[:pivot])
                nested_state_args = sparse.deserialize_sparse_tensors(
                    nested_state_args, self._state_types, self._state_shapes,
                    self._state_classes)
                nested_input_args = nest.pack_sequence_as(
                    input_dataset.output_types, args[pivot:])
                nested_input_args = sparse.deserialize_sparse_tensors(
                    nested_input_args, input_dataset.output_types,
                    input_dataset.output_shapes, input_dataset.output_classes)

                ret = reduce_func(nested_state_args, nested_input_args)

                # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
                # values to tensors.
                ret = nest.pack_sequence_as(ret, [
                    sparse_tensor.SparseTensor.from_value(t)
                    if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(t)
                    for t in nest.flatten(ret)
                ])

                # Extract shape information from the returned values.
                flat_new_state = nest.flatten(ret)
                flat_new_state_shapes.extend(
                    [t.get_shape() for t in flat_new_state])

                # Extract and validate type information from the returned values.
                for t, dtype in zip(flat_new_state,
                                    nest.flatten(self._state_types)):
                    if t.dtype != dtype:
                        raise TypeError(
                            "The element types for the new state must match the initial "
                            "state. Expected %s; got %s." %
                            (self._state_types,
                             nest.pack_sequence_as(
                                 self._state_types,
                                 [t.dtype for t in flat_new_state])))

                # Serialize any sparse tensors.
                ret = nest.pack_sequence_as(ret, [
                    t
                    for t in nest.flatten(sparse.serialize_sparse_tensors(ret))
                ])
                return nest.flatten(ret)
コード例 #33
0
  def testFromTensorSlicesMixedRagged(self):
    components = (np.tile(np.array([[1], [2], [3]]),
                          20), np.tile(np.array([[12], [13], [14]]),
                                       22), np.array([37.0, 38.0, 39.0]),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 0], [2, 0]]),
                      values=np.array([0, 0, 0]),
                      dense_shape=np.array([3, 1])),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 1], [2, 2]]),
                      values=np.array([1, 2, 3]),
                      dense_shape=np.array([3, 3])),
                  ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))

    dataset = dataset_ops.Dataset.from_tensor_slices(components)
    get_next = self.getNext(dataset)

    expected = [
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[0]]),
             values=np.array([1]),
             dense_shape=np.array([3])), ragged_factory_ops.constant_value([[0]
                                                                           ])),
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[1]]),
             values=np.array([2]),
             dense_shape=np.array([3])), ragged_factory_ops.constant_value([[1]
                                                                           ])),
        (sparse_tensor.SparseTensorValue(
            indices=np.array([[0]]),
            values=np.array([0]),
            dense_shape=np.array([1])),
         sparse_tensor.SparseTensorValue(
             indices=np.array([[2]]),
             values=np.array([3]),
             dense_shape=np.array([3])), ragged_factory_ops.constant_value([[2]
                                                                           ])),
    ]
    for i in range(3):
      results = self.evaluate(get_next())
      for component, result_component in zip(
          (list(zip(*components[:3]))[i] + expected[i]), results):
        if sparse_tensor.is_sparse(component):
          self.assertSparseValuesEqual(component, result_component)
        elif ragged_tensor.is_ragged(component):
          self.assertRaggedEqual(component, result_component)
        else:
          self.assertAllEqual(component, result_component)
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(get_next())
コード例 #34
0
def from_sparse(st_input, name=None):
    """Converts a 2D `SparseTensor` to a `RaggedTensor`.

  Each row of the `output` `RaggedTensor` will contain the explicit values from
  the same row in `st_input`.  `st_input` must be ragged-right.  If not it is
  not ragged-right, then an error will be generated.

  Example:

  ```python
  >>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]],
  ...                   values=[1, 2, 3, 4, 5],
  ...                   dense_shape=[4, 3])
  >>> ragged.from_sparse(st).eval().tolist()
  [[1, 2, 3], [4], [], [5]]
  ```

  Currently, only two-dimensional `SparseTensors` are supported.

  Args:
    st_input: The sparse tensor to convert.  Must have rank 2.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `RaggedTensor` with the same values as `st_input`.
    `output.ragged_rank = rank(st_input) - 1`.
    `output.shape = [st_input.dense_shape[0], None]`.
  Raises:
    ValueError: If the number of dimensions in `st_input` is not known
      statically, or is not two.
  """
    if not sparse_tensor.is_sparse(st_input):
        raise TypeError('Expected SparseTensor, got %s' %
                        type(st_input).__name__)
    with ops.name_scope(name, 'RaggedFromSparse', [st_input]):
        st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
            st_input, name='rt_input')

        static_rank_from_dense_shape = (
            None if st_input.dense_shape.shape.ndims is None else
            st_input.dense_shape.shape.dims[0].value)
        static_rank_from_indices = (None
                                    if st_input.indices.shape.ndims is None
                                    else st_input.indices.shape.dims[1].value)

        if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
            raise ValueError('rank(st_input) must be 2')

        with ops.control_dependencies(
                _assert_sparse_indices_are_ragged_right(st_input.indices)):
            # Treat sparse row indices as segment ids to generate a splits tensor that
            # we can pair with the sparse tensor values.  (Ignore sparse column
            # indices.)
            segment_ids = st_input.indices[:, 0]
            num_segments = st_input.dense_shape[0]
            return ragged_factory_ops.from_value_rowids(
                st_input.values, segment_ids, num_segments)
コード例 #35
0
def from_sparse(st_input, name=None):
  """Converts a 2D `SparseTensor` to a `RaggedTensor`.

  Each row of the `output` `RaggedTensor` will contain the explicit values from
  the same row in `st_input`.  `st_input` must be ragged-right.  If not it is
  not ragged-right, then an error will be generated.

  Example:

  ```python
  >>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]],
  ...                   values=[1, 2, 3, 4, 5],
  ...                   dense_shape=[4, 3])
  >>> ragged.from_sparse(st).eval().tolist()
  [[1, 2, 3], [4], [], [5]]
  ```

  Currently, only two-dimensional `SparseTensors` are supported.

  Args:
    st_input: The sparse tensor to convert.  Must have rank 2.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `RaggedTensor` with the same values as `st_input`.
    `output.ragged_rank = rank(st_input) - 1`.
    `output.shape = [st_input.dense_shape[0], None]`.
  Raises:
    ValueError: If the number of dimensions in `st_input` is not known
      statically, or is not two.
  """
  if not sparse_tensor.is_sparse(st_input):
    raise TypeError('Expected SparseTensor, got %s' % type(st_input).__name__)
  with ops.name_scope(name, 'RaggedFromSparse', [st_input]):
    st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
        st_input, name='rt_input')

    static_rank_from_dense_shape = (
        None if st_input.dense_shape.shape.ndims is None
        else st_input.dense_shape.shape.dims[0].value)
    static_rank_from_indices = (
        None if st_input.indices.shape.ndims is None
        else st_input.indices.shape.dims[1].value)

    if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
      raise ValueError('rank(st_input) must be 2')

    with ops.control_dependencies(
        _assert_sparse_indices_are_ragged_right(st_input.indices)):
      # Treat sparse row indices as segment ids to generate a splits tensor that
      # we can pair with the sparse tensor values.  (Ignore sparse column
      # indices.)
      segment_ids = st_input.indices[:, 0]
      num_segments = st_input.dense_shape[0]
      return ragged_factory_ops.from_value_rowids(st_input.values, segment_ids,
                                                  num_segments)
コード例 #36
0
  def _compare_output_to_expected(self, dict_tensors, expected_tensors):
    self.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))

    for k, v in sorted(dict_tensors.items()):
      expected_v = expected_tensors[k]
      if sparse_tensor.is_sparse(v):
        self.assertSparseValuesEqual(expected_v, v)
      else:
        # One output for standard Tensor.
        self.assertAllEqual(expected_v, v)
コード例 #37
0
ファイル: grouping.py プロジェクト: xman/tensorflow
      def tf_reduce_func(*args):
        """A wrapper for Defun that facilitates shape inference."""
        for arg, shape in zip(
            args,
            nest.flatten(
                sparse.as_dense_shapes(self._state_shapes, self._state_classes))
            + nest.flatten(
                sparse.as_dense_shapes(input_dataset.output_shapes,
                                       input_dataset.output_classes))):
          arg.set_shape(shape)

        pivot = len(nest.flatten(self._state_shapes))
        nested_state_args = nest.pack_sequence_as(self._state_types,
                                                  args[:pivot])
        nested_state_args = sparse.deserialize_sparse_tensors(
            nested_state_args, self._state_types, self._state_shapes,
            self._state_classes)
        nested_input_args = nest.pack_sequence_as(input_dataset.output_types,
                                                  args[pivot:])
        nested_input_args = sparse.deserialize_sparse_tensors(
            nested_input_args, input_dataset.output_types,
            input_dataset.output_shapes, input_dataset.output_classes)

        ret = reduce_func(nested_state_args, nested_input_args)

        # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
        # values to tensors.
        ret = nest.pack_sequence_as(ret, [
            sparse_tensor.SparseTensor.from_value(t)
            if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(t)
            for t in nest.flatten(ret)
        ])

        # Extract shape information from the returned values.
        flat_new_state = nest.flatten(ret)
        flat_new_state_shapes.extend([t.get_shape() for t in flat_new_state])

        # Extract and validate type information from the returned values.
        for t, dtype in zip(flat_new_state, nest.flatten(self._state_types)):
          if t.dtype != dtype:
            raise TypeError(
                "The element types for the new state must match the initial "
                "state. Expected %s; got %s." %
                (self._state_types,
                 nest.pack_sequence_as(self._state_types,
                                       [t.dtype for t in flat_new_state])))

        dataset_ops._warn_if_collections("tf.contrib.data.group_by_reducer()")  # pylint: disable=protected-access

        # Serialize any sparse tensors.
        ret = nest.pack_sequence_as(
            ret,
            [t for t in nest.flatten(sparse.serialize_sparse_tensors(ret))])
        return nest.flatten(ret)
コード例 #38
0
ファイル: test_base.py プロジェクト: MFChunga/poo
 def assertValuesEqual(self, expected, actual):
     """Asserts that two values are equal."""
     if isinstance(expected, dict):
         self.assertItemsEqual(list(expected.keys()), list(actual.keys()))
         for k in expected.keys():
             self.assertValuesEqual(expected[k], actual[k])
     elif sparse_tensor.is_sparse(expected):
         self.assertAllEqual(expected.indices, actual.indices)
         self.assertAllEqual(expected.values, actual.values)
         self.assertAllEqual(expected.dense_shape, actual.dense_shape)
     else:
         self.assertAllEqual(expected, actual)
コード例 #39
0
    def testNestedSlideSparse(self):
        def _sparse(i):
            return sparse_tensor.SparseTensorValue(indices=[[0]],
                                                   values=(i * [1]),
                                                   dense_shape=[1])

        iterator = dataset_ops.make_initializable_iterator(
            dataset_ops.Dataset.range(10).map(_sparse).apply(
                sliding.sliding_window_batch(
                    window_size=4, window_shift=2)).apply(
                        sliding.sliding_window_batch(window_size=3,
                                                     window_shift=1)))
        init_op = iterator.initializer
        get_next = iterator.get_next()

        with self.cached_session() as sess:
            sess.run(init_op)
            # Slide: 1st batch.
            actual = sess.run(get_next)
            expected = sparse_tensor.SparseTensorValue(
                indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                         [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                         [2, 2, 0], [2, 3, 0]],
                values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
                dense_shape=[3, 4, 1])
            self.assertTrue(sparse_tensor.is_sparse(actual))
            self.assertValuesEqual(actual, expected)
            # Slide: 2nd batch.
            actual = sess.run(get_next)
            expected = sparse_tensor.SparseTensorValue(
                indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                         [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                         [2, 2, 0], [2, 3, 0]],
                values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
                dense_shape=[3, 4, 1])
            self.assertTrue(sparse_tensor.is_sparse(actual))
            self.assertValuesEqual(actual, expected)
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
コード例 #40
0
    def testNestedWindowSparse(self):
        def _sparse(i):
            return sparse_tensor.SparseTensorValue(indices=[[0]],
                                                   values=(i * [1]),
                                                   dense_shape=[1])

        iterator = dataset_ops.Dataset.range(10).map(_sparse).window(
            size=4, shift=2, drop_remainder=True
        ).flat_map(lambda x: x.batch(batch_size=4)).window(
            size=3, shift=1, drop_remainder=True).flat_map(
                lambda x: x.batch(batch_size=3)).make_initializable_iterator()
        init_op = iterator.initializer
        get_next = iterator.get_next()

        with self.cached_session() as sess:
            self.evaluate(init_op)
            # Slide: 1st batch.
            actual = self.evaluate(get_next)
            expected = sparse_tensor.SparseTensorValue(
                indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                         [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                         [2, 2, 0], [2, 3, 0]],
                values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
                dense_shape=[3, 4, 1])
            self.assertTrue(sparse_tensor.is_sparse(actual))
            self.assertSparseValuesEqual(actual, expected)
            # Slide: 2nd batch.
            actual = self.evaluate(get_next)
            expected = sparse_tensor.SparseTensorValue(
                indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                         [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                         [2, 2, 0], [2, 3, 0]],
                values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
                dense_shape=[3, 4, 1])
            self.assertTrue(sparse_tensor.is_sparse(actual))
            self.assertSparseValuesEqual(actual, expected)
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
コード例 #41
0
  def testNestedSlideSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = (
        dataset_ops.Dataset.range(10).map(_sparse).apply(
            sliding.sliding_window_batch(window_size=4, window_shift=2)).apply(
                sliding.sliding_window_batch(window_size=3, window_shift=1))
        .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(init_op)
      # Slide: 1st batch.
      actual = sess.run(get_next)
      expected = sparse_tensor.SparseTensorValue(
          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                   [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                   [2, 2, 0], [2, 3, 0]],
          values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
          dense_shape=[3, 4, 1])
      self.assertTrue(sparse_tensor.is_sparse(actual))
      self.assertSparseValuesEqual(actual, expected)
      # Slide: 2nd batch.
      actual = sess.run(get_next)
      expected = sparse_tensor.SparseTensorValue(
          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                   [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                   [2, 2, 0], [2, 3, 0]],
          values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
          dense_shape=[3, 4, 1])
      self.assertTrue(sparse_tensor.is_sparse(actual))
      self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #42
0
  def testNestedWindowSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).window(
        size=4, shift=2,
        drop_remainder=True).flat_map(lambda x: x.batch(batch_size=4)).window(
            size=3, shift=1, drop_remainder=True).flat_map(
                lambda x: x.batch(batch_size=3)).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(init_op)
      # Slide: 1st batch.
      actual = sess.run(get_next)
      expected = sparse_tensor.SparseTensorValue(
          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                   [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                   [2, 2, 0], [2, 3, 0]],
          values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
          dense_shape=[3, 4, 1])
      self.assertTrue(sparse_tensor.is_sparse(actual))
      self.assertSparseValuesEqual(actual, expected)
      # Slide: 2nd batch.
      actual = sess.run(get_next)
      expected = sparse_tensor.SparseTensorValue(
          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                   [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                   [2, 2, 0], [2, 3, 0]],
          values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
          dense_shape=[3, 4, 1])
      self.assertTrue(sparse_tensor.is_sparse(actual))
      self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #43
0
 def _compareOutputToExpected(self, result_values, expected_values,
                              assert_items_equal):
     if assert_items_equal:
         self.assertItemsEqual(result_values, expected_values)
         return
     for i in range(len(result_values)):
         nest.assert_same_structure(result_values[i], expected_values[i])
         for result_value, expected_value in zip(
                 nest.flatten(result_values[i]),
                 nest.flatten(expected_values[i])):
             if sparse_tensor.is_sparse(result_value):
                 self.assertSparseValuesEqual(result_value, expected_value)
             else:
                 self.assertAllEqual(result_value, expected_value)
コード例 #44
0
ファイル: test_base.py プロジェクト: aeverall/tensorflow
 def _compareOutputToExpected(self, result_values, expected_values,
                              assert_items_equal):
   if assert_items_equal:
     # TODO(shivaniagrawal): add support for nested elements containing sparse
     # tensors when needed.
     self.assertItemsEqual(result_values, expected_values)
     return
   for i in range(len(result_values)):
     nest.assert_same_structure(result_values[i], expected_values[i])
     for result_value, expected_value in zip(
         nest.flatten(result_values[i]), nest.flatten(expected_values[i])):
       if sparse_tensor.is_sparse(result_value):
         self.assertSparseValuesEqual(result_value, expected_value)
       else:
         self.assertAllEqual(result_value, expected_value)
コード例 #45
0
 def _compareOutputToExpected(self, result_values, expected_values,
                              assert_items_equal):
   if assert_items_equal:
     # TODO(shivaniagrawal): add support for nested elements containing sparse
     # tensors when needed.
     self.assertItemsEqual(result_values, expected_values)
     return
   for i in range(len(result_values)):
     nest.assert_same_structure(result_values[i], expected_values[i])
     for result_value, expected_value in zip(
         nest.flatten(result_values[i]), nest.flatten(expected_values[i])):
       if sparse_tensor.is_sparse(result_value):
         self.assertSparseValuesEqual(result_value, expected_value)
       else:
         self.assertAllEqual(result_value, expected_value)
コード例 #46
0
ファイル: sparse.py プロジェクト: vinegreti2010/CFRFServers
def serialize_many_sparse_tensors(tensors):
    """Serializes many sparse tensors into a batch.

  Args:
    tensors: a tensor structure to serialize.

  Returns:
    `tensors` with any sparse tensors replaced by the serialized batch.
  """

    ret = nest.pack_sequence_as(tensors, [
        sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant)
        if sparse_tensor.is_sparse(tensor) else tensor
        for tensor in nest.flatten(tensors)
    ])
    return ret
コード例 #47
0
ファイル: sparse.py プロジェクト: AndrewTwinz/tensorflow
def serialize_many_sparse_tensors(tensors):
  """Serializes many sparse tensors into a batch.

  Args:
    tensors: a tensor structure to serialize.

  Returns:
    `tensors` with any sparse tensors replaced by the serialized batch.
  """

  ret = nest.pack_sequence_as(tensors, [
      sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant)
      if sparse_tensor.is_sparse(tensor) else tensor
      for tensor in nest.flatten(tensors)
  ])
  return ret
コード例 #48
0
  def testFromTensorsMixed(self):
    """Test an dataset that represents a single tuple of tensors."""
    components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0]]),
                      values=np.array([0]),
                      dense_shape=np.array([1])),
                  sparse_tensor.SparseTensorValue(
                      indices=np.array([[0, 0], [1, 1]]),
                      values=np.array([-1, 1]),
                      dense_shape=np.array([2, 2])))

    dataset = dataset_ops.Dataset.from_tensors(components)
    self.assertEqual([
        tensor_shape.TensorShape(c.dense_shape)
        if sparse_tensor.is_sparse(c) else c.shape for c in components
    ], [shape for shape in dataset.output_shapes])

    self.assertDatasetProduces(dataset, expected_output=[components])
コード例 #49
0
    def testFromTensorsMixed(self):
        """Test an dataset that represents a single tuple of tensors."""
        components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
                      sparse_tensor.SparseTensorValue(indices=np.array([[0]]),
                                                      values=np.array([0]),
                                                      dense_shape=np.array(
                                                          [1])),
                      sparse_tensor.SparseTensorValue(
                          indices=np.array([[0, 0], [1, 1]]),
                          values=np.array([-1, 1]),
                          dense_shape=np.array([2, 2])))

        dataset = dataset_ops.Dataset.from_tensors(components)
        self.assertEqual([
            tensor_shape.TensorShape(c.dense_shape)
            if sparse_tensor.is_sparse(c) else c.shape for c in components
        ], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])

        self.assertDatasetProduces(dataset, expected_output=[components])
コード例 #50
0
ファイル: grouping.py プロジェクト: Jackiefan/tensorflow
    def tf_init_func(key):
      """A wrapper for Defun that facilitates shape inference."""
      key.set_shape([])
      ret = init_func(key)
      # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
      # values to tensors.
      ret = nest.pack_sequence_as(ret, [
          sparse_tensor.SparseTensor.from_value(t)
          if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(t)
          for t in nest.flatten(ret)
      ])

      self._state_classes = sparse.get_classes(ret)
      self._state_shapes = nest.pack_sequence_as(
          ret, [t.get_shape() for t in nest.flatten(ret)])
      self._state_types = nest.pack_sequence_as(
          ret, [t.dtype for t in nest.flatten(ret)])

      # Serialize any sparse tensors.
      ret = nest.pack_sequence_as(
          ret, [t for t in nest.flatten(sparse.serialize_sparse_tensors(ret))])
      return nest.flatten(ret)
コード例 #51
0
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
                                flat_output):
  tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))

  i = 0  # Index into the flattened output of session.run()
  for k, v in sorted(dict_tensors.items()):
    # TODO(shivaniagrawal): flat_output is same as v.
    expected_v = expected_tensors[k]
    tf_logging.info("Comparing key: %s", k)
    print("i", i, "flat_output", flat_output[i], "expected_v", expected_v)
    if sparse_tensor.is_sparse(v):
      # Three outputs for SparseTensor : indices, values, shape.
      tester.assertEqual([k, len(expected_v)], [k, 3])
      print("i", i, "flat_output", flat_output[i].indices, "expected_v",
            expected_v[0])
      tester.assertAllEqual(expected_v[0], flat_output[i].indices)
      tester.assertAllEqual(expected_v[1], flat_output[i].values)
      tester.assertAllEqual(expected_v[2], flat_output[i].dense_shape)
    else:
      # One output for standard Tensor.
      tester.assertAllEqual(expected_v, flat_output[i])
    i += 1
コード例 #52
0
  def testBatchSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).batch(
        5).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.test_session() as sess:
      sess.run(init_op)
      for i in range(2):
        actual = sess.run(get_next)
        expected = sparse_tensor.SparseTensorValue(
            indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
            values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
            dense_shape=[5, 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #53
0
  def testNestedBatchSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).batch(5).batch(
        2).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.test_session() as sess:
      sess.run(init_op)
      actual = sess.run(get_next)
      expected = sparse_tensor.SparseTensorValue(
          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [0, 4, 0],
                   [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0], [1, 4, 0]],
          values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
          dense_shape=[2, 5, 1])
      self.assertTrue(sparse_tensor.is_sparse(actual))
      self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
コード例 #54
0
ファイル: grouping.py プロジェクト: xman/tensorflow
    def tf_init_func(key):
      """A wrapper for Defun that facilitates shape inference."""
      key.set_shape([])
      ret = init_func(key)
      # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
      # values to tensors.
      ret = nest.pack_sequence_as(ret, [
          sparse_tensor.SparseTensor.from_value(t)
          if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(t)
          for t in nest.flatten(ret)
      ])

      self._state_classes = sparse.get_classes(ret)
      self._state_shapes = nest.pack_sequence_as(
          ret, [t.get_shape() for t in nest.flatten(ret)])
      self._state_types = nest.pack_sequence_as(
          ret, [t.dtype for t in nest.flatten(ret)])

      dataset_ops._warn_if_collections("tf.contrib.data.group_by_reducer()")  # pylint: disable=protected-access

      # Serialize any sparse tensors.
      ret = nest.pack_sequence_as(
          ret, [t for t in nest.flatten(sparse.serialize_sparse_tensors(ret))])
      return nest.flatten(ret)
コード例 #55
0
ファイル: scan_ops.py プロジェクト: AnishShah/tensorflow
  def __init__(self, input_dataset, initial_state, scan_func):
    """See `scan()` for details."""
    super(_ScanDataset, self).__init__()
    self._input_dataset = input_dataset

    with ops.name_scope("initial_state"):
      # Convert any `SparseTensorValue`s to `SparseTensor`s and all other
      # values to tensors.
      self._initial_state = nest.pack_sequence_as(initial_state, [
          sparse_tensor.SparseTensor.from_value(t)
          if sparse_tensor.is_sparse(t) else ops.convert_to_tensor(
              t, name="component_%d" % i)
          for i, t in enumerate(nest.flatten(initial_state))
      ])

    # Compute initial values for the state classes, shapes and types based on
    # the initial state. The shapes may be refined by running `tf_scan_func` one
    # or more times below.
    self._state_classes = sparse.get_classes(self._initial_state)
    self._state_shapes = nest.pack_sequence_as(
        self._initial_state,
        [t.get_shape() for t in nest.flatten(self._initial_state)])
    self._state_types = nest.pack_sequence_as(
        self._initial_state,
        [t.dtype for t in nest.flatten(self._initial_state)])

    # Will be populated by calling `tf_scan_func`.
    self._output_classes = None
    self._output_shapes = None
    self._output_types = None

    # Iteratively rerun the scan function until reaching a fixed point on
    # `self._state_shapes`.
    need_to_rerun = True
    while need_to_rerun:

      wrapped_func = dataset_ops.StructuredFunctionWrapper(
          scan_func, "tf.contrib.data.scan()",
          input_classes=(self._state_classes, input_dataset.output_classes),
          input_shapes=(self._state_shapes, input_dataset.output_shapes),
          input_types=(self._state_types, input_dataset.output_types),
          add_to_graph=False)
      if not (
          isinstance(wrapped_func.output_types, collections.Sequence) and
          len(wrapped_func.output_types) == 2):
        raise TypeError("The scan function must return a pair comprising the "
                        "new state and the output value.")

      new_state_classes, self._output_classes = wrapped_func.output_classes

      # Extract and validate class information from the returned values.
      for new_state_class, state_class in zip(
          nest.flatten(new_state_classes),
          nest.flatten(self._state_classes)):
        if not issubclass(new_state_class, state_class):
          raise TypeError(
              "The element classes for the new state must match the initial "
              "state. Expected %s; got %s." %
              (self._state_classes, new_state_classes))

      # Extract and validate type information from the returned values.
      new_state_types, self._output_types = wrapped_func.output_types
      for new_state_type, state_type in zip(
          nest.flatten(new_state_types), nest.flatten(self._state_types)):
        if new_state_type != state_type:
          raise TypeError(
              "The element types for the new state must match the initial "
              "state. Expected %s; got %s." %
              (self._state_types, new_state_types))

      # Extract shape information from the returned values.
      new_state_shapes, self._output_shapes = wrapped_func.output_shapes

      flat_state_shapes = nest.flatten(self._state_shapes)
      flat_new_state_shapes = nest.flatten(new_state_shapes)
      weakened_state_shapes = [
          original.most_specific_compatible_shape(new)
          for original, new in zip(flat_state_shapes, flat_new_state_shapes)
      ]

      need_to_rerun = False
      for original_shape, weakened_shape in zip(flat_state_shapes,
                                                weakened_state_shapes):
        if original_shape.ndims is not None and (
            weakened_shape.ndims is None or
            original_shape.as_list() != weakened_shape.as_list()):
          need_to_rerun = True
          break

      if need_to_rerun:
        self._state_shapes = nest.pack_sequence_as(self._state_shapes,
                                                   weakened_state_shapes)

    self._scan_func = wrapped_func.function
    self._scan_func.add_to_graph(ops.get_default_graph())
コード例 #56
0
ファイル: map_test.py プロジェクト: bunbutter/tensorflow
 def _check(i):
   self.assertTrue(sparse_tensor.is_sparse(i))
   return sparse_ops.sparse_concat(0, [i, i])