示例#1
0
 def testAddTensorListsFailsIfLeadingDimsMismatch(self):
   with self.cached_session(), self.test_scope():
     l1 = list_ops.tensor_list_reserve(
         element_shape=[], element_dtype=dtypes.float32, num_elements=2)
     l2 = list_ops.tensor_list_reserve(
         element_shape=[], element_dtype=dtypes.float32, num_elements=3)
     l = math_ops.add_n([l1, l2])
     with self.assertRaisesRegexp(
         errors.InvalidArgumentError,
         "TensorList arguments to AddN must all have the same shape"):
       list_ops.tensor_list_stack(l, element_dtype=dtypes.float32).eval()
示例#2
0
 def testAddTensorListsFailsIfLeadingDimsMismatch(self):
   with self.session(), self.test_scope():
     l1 = list_ops.tensor_list_reserve(
         element_shape=[], element_dtype=dtypes.float32, num_elements=2)
     l2 = list_ops.tensor_list_reserve(
         element_shape=[], element_dtype=dtypes.float32, num_elements=3)
     l = math_ops.add_n([l1, l2])
     with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "TensorList arguments to AddN must all have the same shape"):
       list_ops.tensor_list_stack(l, element_dtype=dtypes.float32).eval()
示例#3
0
  def testAddTensorLists(self):
    with self.session(), self.test_scope():
      l1 = list_ops.tensor_list_reserve(
          element_shape=[], element_dtype=dtypes.float32, num_elements=3)
      l2 = list_ops.tensor_list_reserve(
          element_shape=[], element_dtype=dtypes.float32, num_elements=3)
      l1 = list_ops.tensor_list_set_item(l1, 0, 5.)
      l2 = list_ops.tensor_list_set_item(l2, 2, 10.)

      l = math_ops.add_n([l1, l2])
      self.assertAllEqual(
          list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),
          [5.0, 0.0, 10.0])
示例#4
0
  def testAddTensorLists(self):
    with self.cached_session(), self.test_scope():
      l1 = list_ops.tensor_list_reserve(
          element_shape=[], element_dtype=dtypes.float32, num_elements=3)
      l2 = list_ops.tensor_list_reserve(
          element_shape=[], element_dtype=dtypes.float32, num_elements=3)
      l1 = list_ops.tensor_list_set_item(l1, 0, 5.)
      l2 = list_ops.tensor_list_set_item(l2, 2, 10.)

      l = math_ops.add_n([l1, l2])
      self.assertAllEqual(
          list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),
          [5.0, 0.0, 10.0])
示例#5
0
 def testStackWithUninitializedTensors(self):
     with self.cached_session(), self.test_scope():
         l = list_ops.tensor_list_reserve(element_dtype=dtypes.float32,
                                          element_shape=[],
                                          num_elements=3)
         t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
         self.assertAllEqual(t, [0., 0., 0.])
    def build_graph(parameters):
        """Build the TensorListSetItem op testing graph."""
        item = tf.placeholder(dtype=parameters["element_dtype"],
                              shape=parameters["element_shape"])
        tensor_list = list_ops.tensor_list_reserve(
            element_shape=None,
            num_elements=parameters["num_elements"],
            element_dtype=parameters["element_dtype"])

        init_state = (0, tensor_list)
        condition = lambda i, _: i < parameters["num_elements"]

        def loop_body(i, tensor_list):
            new_item = tf.add(
                tf.add(item, item),
                tf.constant(value=1, dtype=parameters["element_dtype"]))
            new_list = list_ops.tensor_list_set_item(tensor_list, i, new_item)
            return i + 1, new_list

        _, tensor_list = tf.while_loop(condition, loop_body, init_state)
        out = list_ops.tensor_list_stack(
            tensor_list,
            num_elements=parameters["num_elements"],
            element_dtype=parameters["element_dtype"])
        return [item], [out]
示例#7
0
 def test_numpyFailsForVariant(self):
     variant_t = list_ops.tensor_list_reserve(element_shape=[],
                                              num_elements=1,
                                              element_dtype=dtypes.float32)
     with self.assertRaisesRegex(errors.InvalidArgumentError,
                                 "Cannot convert .+ variant"):
         variant_t._numpy()
示例#8
0
 def testSetStackReservedUnknownElementShape(self):
   with self.cached_session(), self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=None, num_elements=2)
     l = list_ops.tensor_list_set_item(l, 0, [3.0, 4.0])
     t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
     self.assertAllEqual(t, [[3.0, 4.0], [0., 0.]])
 def testSetStackReservedUnknownElementShape(self):
   with self.cached_session(), self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=None, num_elements=2)
     l = list_ops.tensor_list_set_item(l, 0, [3.0, 4.0])
     t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
     self.assertAllEqual(t, [[3.0, 4.0], [0., 0.]])
示例#10
0
 def testGetSetReserved(self):
   with self.cached_session(), self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=[], num_elements=2)
     e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     self.assertAllEqual(e0, 0.0)
     l = list_ops.tensor_list_set_item(l, 0, 3.0)
     t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
     self.assertAllEqual(t, [3.0, 0.0])
 def testGetSetReserved(self):
   with self.cached_session(), self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=[], num_elements=2)
     e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     self.assertAllEqual(e0, 0.0)
     l = list_ops.tensor_list_set_item(l, 0, 3.0)
     t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
     self.assertAllEqual(t, [3.0, 0.0])
示例#12
0
 def testSetGetGrad(self):
   with backprop.GradientTape() as tape:
     t = constant_op.constant(5.)
     tape.watch(t)
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=[], num_elements=3)
     l = list_ops.tensor_list_set_item(l, 1, 2. * t)
     e = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
     self.assertAllEqual(self.evaluate(e), 10.0)
   self.assertAllEqual(self.evaluate(tape.gradient(e, t)), 2.0)
示例#13
0
 def testSkipEagerTensorListGetItemGradAggregation(self):
   l = list_ops.tensor_list_reserve(
       element_shape=[], num_elements=1, element_dtype=dtypes.float32)
   x = constant_op.constant(1.0)
   l = list_ops.tensor_list_set_item(l, 0, x)
   l_read1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
   l_read2 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
   grad = gradients_impl.gradients([l_read1, l_read2], [x])
   with self.cached_session() as sess:
     self.assertSequenceEqual(self.evaluate(grad), [2.])
示例#14
0
 def testSetGetGrad(self):
   with backprop.GradientTape() as tape:
     t = constant_op.constant(5.)
     tape.watch(t)
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=[], num_elements=3)
     l = list_ops.tensor_list_set_item(l, 1, 2. * t)
     e = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
     self.assertAllEqual(self.evaluate(e), 10.0)
   self.assertAllEqual(self.evaluate(tape.gradient(e, t)), 2.0)
示例#15
0
 def testSkipEagerTensorListGetItemGradAggregation(self):
   l = list_ops.tensor_list_reserve(
       element_shape=[], num_elements=1, element_dtype=dtypes.float32)
   x = constant_op.constant(1.0)
   l = list_ops.tensor_list_set_item(l, 0, x)
   l_read1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
   l_read2 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
   grad = gradients_impl.gradients([l_read1, l_read2], [x])
   with self.cached_session() as sess:
     self.assertSequenceEqual(self.evaluate(grad), [2.])
 def testElementShape(self):
   with self.cached_session() as sess, self.test_scope():
     dim = array_ops.placeholder(dtypes.int32)
     l = list_ops.tensor_list_reserve(
         element_shape=(dim, 15), num_elements=20,
         element_dtype=dtypes.float32)
     e32 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32)
     e64 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int64)
     self.assertAllEqual(sess.run(e32, {dim: 10}), (10, 15))
     self.assertAllEqual(sess.run(e64, {dim: 7}), (7, 15))
示例#17
0
 def testElementShape(self):
   with self.cached_session() as sess, self.test_scope():
     dim = array_ops.placeholder(dtypes.int32)
     l = list_ops.tensor_list_reserve(
         element_shape=(dim, 15), num_elements=20,
         element_dtype=dtypes.float32)
     e32 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32)
     e64 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int64)
     self.assertAllEqual(sess.run(e32, {dim: 10}), (10, 15))
     self.assertAllEqual(sess.run(e64, {dim: 7}), (7, 15))
示例#18
0
 def testGetSetReservedNonScalar(self):
   with self.cached_session() as sess, self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32,
         element_shape=(7, 15),
         num_elements=2)
     l = list_ops.tensor_list_set_item(
         l, 0, constant_op.constant(1.0, shape=(7, 15)))
     e1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     e2 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e1), np.ones((7, 15)))
     self.assertAllEqual(sess.run(e2), np.zeros((7, 15)))
 def testGetSetReservedNonScalar(self):
   with self.cached_session() as sess, self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32,
         element_shape=(7, 15),
         num_elements=2)
     l = list_ops.tensor_list_set_item(
         l, 0, constant_op.constant(1.0, shape=(7, 15)))
     e1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     e2 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e1), np.ones((7, 15)))
     self.assertAllEqual(sess.run(e2), np.zeros((7, 15)))
 def testPushPop(self):
   with self.cached_session() as sess, self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_shape=(7, 15), num_elements=10, element_dtype=dtypes.float32)
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(1.0, shape=(7, 15)))
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(2.0, shape=(7, 15)))
     l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     _, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e2), 2.0 * np.ones((7, 15)))
     self.assertAllEqual(sess.run(e1), 1.0 * np.ones((7, 15)))
示例#21
0
 def testPushPop(self):
   with self.cached_session() as sess, self.test_scope():
     num = array_ops.placeholder(dtypes.int32)
     l = list_ops.tensor_list_reserve(
         element_shape=(7, 15), num_elements=num, element_dtype=dtypes.float32)
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(1.0, shape=(7, 15)))
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(2.0, shape=(7, 15)))
     l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     _, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e2, {num: 10}), 2.0 * np.ones((7, 15)))
     self.assertAllEqual(sess.run(e1, {num: 10}), 1.0 * np.ones((7, 15)))
示例#22
0
        def model(x):
            l = list_ops.tensor_list_reserve(element_dtype=tf.int64,
                                             element_shape=[None, 1],
                                             num_elements=2)
            init_state = (0, x, l)
            condition = lambda i, x, l: i < 2

            def body(i, x, l):
                element = tf.where(x[i])
                l = list_ops.tensor_list_set_item(l, i, element)
                return i + 1, x, l

            _, _, l_final = tf.while_loop(condition, body, init_state)
            return list_ops.tensor_list_stack(l_final, element_dtype=tf.int64)
示例#23
0
 def testAddTensorListsFailsIfElementShapesMismatch(self):
   with self.session() as session, self.test_scope():
     # Use placeholders instead of constant values for shapes to prevent TF's
     # shape inference from catching this early.
     l1_element_shape = array_ops.placeholder(dtype=dtypes.int32)
     l2_element_shape = array_ops.placeholder(dtype=dtypes.int32)
     l1 = list_ops.tensor_list_reserve(
         element_shape=l1_element_shape,
         element_dtype=dtypes.float32,
         num_elements=3)
     l2 = list_ops.tensor_list_reserve(
         element_shape=l2_element_shape,
         element_dtype=dtypes.float32,
         num_elements=3)
     l = math_ops.add_n([l1, l2])
     with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "TensorList arguments to AddN must all have the same shape"):
       session.run(
           list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), {
               l1_element_shape: [],
               l2_element_shape: [2]
           })
示例#24
0
 def testSerializeListWithInvalidTensors(self):
   worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
   with ops.Graph().as_default(), session.Session(target=worker.target):
     with ops.device("/job:worker"):
       l = list_ops.tensor_list_reserve(
           element_dtype=dtypes.float32, element_shape=[], num_elements=2)
       l = list_ops.tensor_list_set_item(l, 0, 1.)
     with ops.device("/job:ps"):
       l_ps = array_ops.identity(l)
       l_ps = list_ops.tensor_list_set_item(l_ps, 1, 2.)
       t = list_ops.tensor_list_stack(l_ps, element_dtype=dtypes.float32)
     with ops.device("/job:worker"):
       worker_t = array_ops.identity(t)
     self.assertAllEqual(self.evaluate(worker_t), [1.0, 2.0])
示例#25
0
 def testSerializeListWithInvalidTensors(self):
   worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
   with ops.Graph().as_default(), session.Session(target=worker.target):
     with ops.device("/job:worker"):
       l = list_ops.tensor_list_reserve(
           element_dtype=dtypes.float32, element_shape=[], num_elements=2)
       l = list_ops.tensor_list_set_item(l, 0, 1.)
     with ops.device("/job:ps"):
       l_ps = array_ops.identity(l)
       l_ps = list_ops.tensor_list_set_item(l_ps, 1, 2.)
       t = list_ops.tensor_list_stack(l_ps, element_dtype=dtypes.float32)
     with ops.device("/job:worker"):
       worker_t = array_ops.identity(t)
     self.assertAllEqual(self.evaluate(worker_t), [1.0, 2.0])
示例#26
0
 def testAddTensorListsFailsIfElementShapesMismatch(self):
   with self.cached_session() as session, self.test_scope():
     # Use placeholders instead of constant values for shapes to prevent TF's
     # shape inference from catching this early.
     l1_element_shape = array_ops.placeholder(dtype=dtypes.int32)
     l2_element_shape = array_ops.placeholder(dtype=dtypes.int32)
     l1 = list_ops.tensor_list_reserve(
         element_shape=l1_element_shape,
         element_dtype=dtypes.float32,
         num_elements=3)
     l2 = list_ops.tensor_list_reserve(
         element_shape=l2_element_shape,
         element_dtype=dtypes.float32,
         num_elements=3)
     l = math_ops.add_n([l1, l2])
     with self.assertRaisesRegexp(
         errors.InvalidArgumentError,
         "TensorList arguments to AddN must all have the same shape"):
       session.run(
           list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), {
               l1_element_shape: [],
               l2_element_shape: [2]
           })
示例#27
0
 def testEagerTensorFormatForVariant(self):
     t = list_ops.tensor_list_reserve(element_shape=[1],
                                      num_elements=1,
                                      element_dtype=dtypes.float32)
     self.assertEqual(f"{t}", "<TensorList>")
     self.assertEqual(str(t),
                      "tf.Tensor(<TensorList>, shape=(), dtype=variant)")
     self.assertEqual(f"{t!s}",
                      "tf.Tensor(<TensorList>, shape=(), dtype=variant)")
     self.assertEqual(
         repr(t),
         "<tf.Tensor: shape=(), dtype=variant, value=<TensorList>>")
     self.assertEqual(
         f"{t!r}",
         "<tf.Tensor: shape=(), dtype=variant, value=<TensorList>>")
示例#28
0
  def testZerosLikeUninitialized(self):
    l0 = list_ops.tensor_list_reserve([], 3, element_dtype=dtypes.float32)
    l1 = list_ops.tensor_list_set_item(l0, 0, 1.)  # [1., _, _]
    zeros_1 = array_ops.zeros_like(l1)  # [0., _, _]
    l2 = list_ops.tensor_list_set_item(l1, 2, 2.)  # [1., _, 2.]
    zeros_2 = array_ops.zeros_like(l2)  # [0., _, 0.]

    # Gather indices with zeros in `zeros_1`.
    res_1 = list_ops.tensor_list_gather(
        zeros_1, [0], element_dtype=dtypes.float32)
    # Gather indices with zeros in `zeros_2`.
    res_2 = list_ops.tensor_list_gather(
        zeros_2, [0, 2], element_dtype=dtypes.float32)

    self.assertAllEqual(self.evaluate(res_1), [0.])
    self.assertAllEqual(self.evaluate(res_2), [0., 0.])
示例#29
0
  def testZerosLikeUninitialized(self):
    l0 = list_ops.tensor_list_reserve([], 3, element_dtype=dtypes.float32)
    l1 = list_ops.tensor_list_set_item(l0, 0, 1.)  # [1., _, _]
    zeros_1 = array_ops.zeros_like(l1)  # [0., _, _]
    l2 = list_ops.tensor_list_set_item(l1, 2, 2.)  # [1., _, 2.]
    zeros_2 = array_ops.zeros_like(l2)  # [0., _, 0.]

    # Gather indices with zeros in `zeros_1`.
    res_1 = list_ops.tensor_list_gather(
        zeros_1, [0], element_dtype=dtypes.float32)
    # Gather indices with zeros in `zeros_2`.
    res_2 = list_ops.tensor_list_gather(
        zeros_2, [0, 2], element_dtype=dtypes.float32)

    self.assertAllEqual(self.evaluate(res_1), [0.])
    self.assertAllEqual(self.evaluate(res_2), [0., 0.])
 def testPushPopSeparateLists(self):
   with self.cached_session() as sess, self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_shape=scalar_shape(),
         num_elements=20,
         element_dtype=dtypes.float32)
     l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
     l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
     l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
     _, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
     l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
     l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
     l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
     result = sess.run([e11, [e21, e22], [e31, e32]])
     self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
 def testDoNotConstantFoldVariants(self):
   with self.cached_session() as sess, self.test_scope():
     val = array_ops.placeholder(dtype=dtypes.float32)
     l = list_ops.tensor_list_reserve(
         element_shape=(7, 15), num_elements=10, element_dtype=dtypes.float32)
     # Note: Pushing a Placeholder will force the constant folding code
     # to build a Const node with a DT_VARIANT output. This tests that XLA
     # passes a cf_consider_fn which prevent folding such nodes.
     l = list_ops.tensor_list_push_back(
         l, array_ops.fill(value=val, dims=(7, 15)))
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(2.0, shape=(7, 15)))
     l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     _, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e2, {val: 1.0}), 2.0 * np.ones((7, 15)))
     self.assertAllEqual(sess.run(e1, {val: 1.0}), 1.0 * np.ones((7, 15)))
示例#32
0
 def testPushPopSeparateLists(self):
   with self.cached_session() as sess, self.test_scope():
     num = array_ops.placeholder(dtypes.int32)
     l = list_ops.tensor_list_reserve(
         element_shape=scalar_shape(),
         num_elements=num,
         element_dtype=dtypes.float32)
     l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
     l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
     l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
     _, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
     l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
     l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
     l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
     result = sess.run([e11, [e21, e22], [e31, e32]], {num: 20})
     self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
示例#33
0
  def __init__(self,
               dtype,
               size=None,
               dynamic_size=None,
               clear_after_read=None,
               tensor_array_name=None,
               handle=None,
               flow=None,
               infer_shape=True,
               element_shape=None,
               colocate_with_first_write_call=True,
               name=None):
    """Constructs a graph mode TensorArray.

    Args:
      dtype: (required) data type of the TensorArray.
      size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
        Required if flow is not provided.
      dynamic_size: (optional) Python bool: If true, writes to the TensorArray
        can grow the TensorArray past its initial size.  Default: False.
      clear_after_read: (optional) unused. Not supported in TensorLists.
      tensor_array_name: (optional) unused.
      handle: (optional) Must always be None.
      flow: (optional) A variant `Tensor` scalar for a TensorList.
      infer_shape: (optional, default: True) If True, shape inference is
        enabled.  In this case, all elements must have the same shape.
      element_shape: (optional, default: None) A `TensorShape` object specifying
        the shape constraints of each of the elements of the TensorArray. Need
        not be fully defined.
      colocate_with_first_write_call: (optional). unused.
      name: (optional) A name for the operation.

    Raises:
      ValueError: if both handle and tensor_array_name are provided.
      TypeError: if handle is provided but is not a Tensor.
    """
    assert handle is None
    del handle
    del clear_after_read
    del tensor_array_name
    del colocate_with_first_write_call

    self._dynamic_size = dynamic_size

    if (flow is not None and
        (not isinstance(flow, ops.Tensor) or flow.dtype != dtypes.variant)):
      raise TypeError("flow must be a variant tensor")
    if flow is None and size is None:
      raise ValueError("Size must be provided if flow is not provided")
    if flow is not None and size is not None:
      raise ValueError("Cannot provide both a flow and size "
                       "at the same time")
    if flow is not None and element_shape is not None:
      raise ValueError("Cannot provide both a flow and element_shape "
                       "at the same time")

    self._dtype = dtypes.as_dtype(dtype).base_dtype

    # Record the current static shape for the array elements. The element
    # shape is defined either by `element_shape` or the shape of the tensor
    # of the first write. If `infer_shape` is true, all writes checks for
    # shape equality.
    if element_shape is None:
      self._infer_shape = infer_shape
      self._element_shape = []
    else:
      self._infer_shape = True
      self._element_shape = [tensor_shape.as_shape(element_shape)]
    with ops.name_scope(name, "TensorArrayV2", [size, flow]) as scope:
      if flow is None:
        self._flow = list_ops.tensor_list_reserve(
            element_shape=element_shape,
            num_elements=size,
            element_dtype=dtype,
            name=scope)
      else:
        self._flow = flow

    # For backwards compatibility.
    self._colocate_with_first_write_call = None
    self._colocate_with = None
示例#34
0
 def testStackWithUninitializedTensors(self):
   with self.cached_session(), self.test_scope():
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=[], num_elements=3)
     t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
     self.assertAllEqual(t, [0., 0., 0.])
示例#35
0
 def testMemoryviewFailsForVariant(self):
     variant_t = list_ops.tensor_list_reserve(element_shape=[],
                                              num_elements=1,
                                              element_dtype=dtypes.float32)
     with self.assertRaisesRegex(BufferError, "Cannot convert .+ variant"):
         np.asarray(memoryview(variant_t))
示例#36
0
  def __init__(self,
               dtype,
               size=None,
               dynamic_size=None,
               clear_after_read=None,
               tensor_array_name=None,
               handle=None,
               flow=None,
               infer_shape=True,
               element_shape=None,
               colocate_with_first_write_call=True,
               name=None):
    """Constructs a graph mode TensorArray.

    Args:
      dtype: (required) data type of the TensorArray.
      size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
        Required if flow is not provided.
      dynamic_size: (optional) Python bool: If true, writes to the TensorArray
        can grow the TensorArray past its initial size.  Default: False.
      clear_after_read: (optional) unused. Not supported in TensorLists.
      tensor_array_name: (optional) unused.
      handle: (optional) Must always be None.
      flow: (optional) A variant `Tensor` scalar for a TensorList.
      infer_shape: (optional, default: True) If True, shape inference is
        enabled.  In this case, all elements must have the same shape.
      element_shape: (optional, default: None) A `TensorShape` object specifying
        the shape constraints of each of the elements of the TensorArray. Need
        not be fully defined.
      colocate_with_first_write_call: (optional). unused.
      name: (optional) A name for the operation.

    Raises:
      ValueError: if both handle and tensor_array_name are provided.
      TypeError: if handle is provided but is not a Tensor.
    """
    assert handle is None
    del handle
    del clear_after_read
    del tensor_array_name
    del colocate_with_first_write_call

    self._dynamic_size = dynamic_size

    if (flow is not None and
        (not isinstance(flow, ops.Tensor) or flow.dtype != dtypes.variant)):
      raise TypeError("flow must be a variant tensor")
    if flow is None and size is None:
      raise ValueError("Size must be provided if flow is not provided")
    if flow is not None and size is not None:
      raise ValueError("Cannot provide both a flow and size "
                       "at the same time")
    if flow is not None and element_shape is not None:
      raise ValueError("Cannot provide both a flow and element_shape "
                       "at the same time")

    self._dtype = dtype

    # Record the current static shape for the array elements. The element
    # shape is defined either by `element_shape` or the shape of the tensor
    # of the first write. If `infer_shape` is true, all writes checks for
    # shape equality.
    if element_shape is None:
      self._infer_shape = infer_shape
      self._element_shape = []
    else:
      self._infer_shape = True
      self._element_shape = [tensor_shape.TensorShape(element_shape)]
    with ops.name_scope(name, "TensorArrayV2", [size, flow]) as scope:
      if flow is None:
        self._flow = list_ops.tensor_list_reserve(
            element_shape=element_shape,
            num_elements=size,
            element_dtype=dtype,
            name=scope)
      else:
        self._flow = flow

    # For backwards compatibility.
    self._colocate_with_first_write_call = None
    self._colocate_with = None