Example #1
0
 def testAddN(self):
   l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])
   l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
   l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
   result = math_ops.add_n((l1, l2, l3))
   result_t = list_ops.tensor_list_stack(result, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(result_t), [9., 12.])
Example #2
0
  def testConcat(self):
    c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
    l0 = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
    l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=scalar_shape())
    l_batch_0 = array_ops.stack([l0, l1])
    l_batch_1 = array_ops.stack([l1, l0])

    l_concat_01 = list_ops.tensor_list_concat_lists(
        l_batch_0, l_batch_1, element_dtype=dtypes.float32)
    l_concat_10 = list_ops.tensor_list_concat_lists(
        l_batch_1, l_batch_0, element_dtype=dtypes.float32)
    l_concat_00 = list_ops.tensor_list_concat_lists(
        l_batch_0, l_batch_0, element_dtype=dtypes.float32)
    l_concat_11 = list_ops.tensor_list_concat_lists(
        l_batch_1, l_batch_1, element_dtype=dtypes.float32)

    expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]]
    expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]]
    expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]]
    expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]]

    for i, (concat, expected) in enumerate(zip(
        [l_concat_00, l_concat_01, l_concat_10, l_concat_11],
        [expected_00, expected_01, expected_10, expected_11])):
      splitted = array_ops.unstack(concat)
      splitted_stacked_ret = self.evaluate(
          (list_ops.tensor_list_stack(splitted[0], dtypes.float32),
           list_ops.tensor_list_stack(splitted[1], dtypes.float32)))
      print("Test concat %d: %s, %s, %s, %s"
            % (i, expected[0], splitted_stacked_ret[0],
               expected[1], splitted_stacked_ret[1]))
      self.assertAllClose(expected[0], splitted_stacked_ret[0])
      self.assertAllClose(expected[1], splitted_stacked_ret[1])

    # Concatenating mismatched shapes fails.
    with self.assertRaises((errors.InvalidArgumentError, ValueError)):
      self.evaluate(
          list_ops.tensor_list_concat_lists(
              l_batch_0,
              list_ops.empty_tensor_list(scalar_shape(), dtypes.float32),
              element_dtype=dtypes.float32))

    with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                 "element shapes are not identical at index 0"):
      l_batch_of_vec_tls = array_ops.stack(
          [list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2)
      self.evaluate(
          list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_vec_tls,
                                            element_dtype=dtypes.float32))

    with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                 r"input_b\[0\].dtype != element_dtype."):
      l_batch_of_int_tls = array_ops.stack(
          [list_ops.tensor_list_from_tensor([1], element_shape=scalar_shape())]
          * 2)
      self.evaluate(
          list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls,
                                            element_dtype=dtypes.float32))
Example #3
0
  def testSerialize(self):
    # pylint: disable=g-import-not-at-top
    try:
      import portpicker
    except ImportError:
      return
    with context.graph_mode():
      worker_port = portpicker.pick_unused_port()
      ps_port = portpicker.pick_unused_port()
      cluster_dict = {
          "worker": ["localhost:%s" % worker_port],
          "ps": ["localhost:%s" % ps_port]
      }
      cs = server_lib.ClusterSpec(cluster_dict)

      worker = server_lib.Server(
          cs, job_name="worker", protocol="grpc", task_index=0, start=True)
      unused_ps = server_lib.Server(
          cs, job_name="ps", protocol="grpc", task_index=0, start=True)
      with ops.Graph().as_default(), session.Session(target=worker.target):
        with ops.device("/job:worker"):
          t = constant_op.constant([[1.0], [2.0]])
          l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
        with ops.device("/job:ps"):
          l_ps = array_ops.identity(l)
          l_ps, e = list_ops.tensor_list_pop_back(
              l_ps, element_dtype=dtypes.float32)
        with ops.device("/job:worker"):
          worker_e = array_ops.identity(e)
        self.assertAllEqual(worker_e.eval(), [2.0])
Example #4
0
 def testTensorListFromTensor(self):
   t = constant_op.constant([1.0, 2.0])
   l = list_ops.tensor_list_from_tensor(t, element_shape=[])
   l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(e), 2.0)
   l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(e), 1.0)
   self.assertAllEqual(self.evaluate(list_ops.tensor_list_length(l)), 0)
Example #5
0
 def testGetSetItem(self):
   t = constant_op.constant([1.0, 2.0])
   l = list_ops.tensor_list_from_tensor(t, element_shape=[])
   e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(e0), 1.0)
   l = list_ops.tensor_list_set_item(l, 0, 3.0)
   t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(t), [3.0, 2.0])
Example #6
0
  def test_get_item_tensor_list(self):
    initial_list = constant_op.constant([[1, 2], [3, 4]])
    elem_shape = constant_op.constant([2])
    l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
    t = slices.get_item(
        l, 1, slices.GetItemOpts(element_dtype=initial_list.dtype))

    with self.cached_session() as sess:
      self.assertAllEqual(self.evaluate(t), [3, 4])
def tf_tensor_list_new(elements, element_dtype=None, element_shape=None):
  """Overload of new_list that stages a Tensor list creation."""
  if tensor_util.is_tensor(elements):
    if element_shape is not None:
      raise ValueError(
          'element shape may not be specified when creating list from tensor')
    element_shape = array_ops.shape(elements)[1:]
    l = list_ops.tensor_list_from_tensor(elements, element_shape=element_shape)
    return l

  elements = tuple(ops.convert_to_tensor(el) for el in elements)

  all_dtypes = set(el.dtype for el in elements)
  if len(all_dtypes) == 1:
    inferred_dtype = tuple(all_dtypes)[0]
    if element_dtype is not None and element_dtype != inferred_dtype:
      raise ValueError(
          'incompatible dtype; specified: {}, inferred from {}: {}'.format(
              element_dtype, elements, inferred_dtype))
  elif all_dtypes:
    # Heterogeneous lists are ok.
    if element_dtype is not None:
      raise ValueError(
          'specified dtype {} is inconsistent with that of elements {}'.format(
              element_dtype, elements))
    inferred_dtype = dtypes.variant
  else:
    inferred_dtype = dtypes.variant

  all_shapes = set(tuple(el.shape.as_list()) for el in elements)
  if len(all_shapes) == 1:
    inferred_shape = array_ops.shape(elements[0])
    if element_shape is not None and element_shape != inferred_shape:
      raise ValueError(
          'incompatible shape; specified: {}, inferred from {}: {}'.format(
              element_shape, elements, inferred_shape))
  elif all_shapes:
    # Heterogeneous lists are ok.
    if element_shape is not None:
      raise ValueError(
          'specified shape {} is inconsistent with that of elements {}'.format(
              element_shape, elements))
    inferred_shape = constant_op.constant(-1)  # unknown shape, by convention
  else:
    inferred_shape = constant_op.constant(-1)  # unknown shape, by convention

  if element_dtype is None:
    element_dtype = inferred_dtype
  if element_shape is None:
    element_shape = inferred_shape

  element_shape = ops.convert_to_tensor(element_shape, dtype=dtypes.int32)
  l = list_ops.empty_tensor_list(
      element_shape=element_shape, element_dtype=element_dtype)
  for el in elements:
    l = list_ops.tensor_list_push_back(l, el)
  return l
Example #8
0
  def test_set_item_tensor_list(self):
    initial_list = constant_op.constant([[1, 2], [3, 4]])
    elem_shape = constant_op.constant([2])
    l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
    l = slices.set_item(l, 0, [5, 6])

    with self.cached_session() as sess:
      t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
      self.assertAllEqual(self.evaluate(t), [[5, 6], [3, 4]])
 def unstack(self, value, name=None):
   """See TensorArray."""
   with ops.name_scope(name, "TensorArrayUnstack", [self._flow, value]):
     value = ops.convert_to_tensor(value, name="value")
     if self._infer_shape and not context.executing_eagerly():
       self._merge_element_shape(value.shape[1:])
     flow_out = list_ops.tensor_list_from_tensor(
         tensor=value, element_shape=value.shape[1:])
     return build_ta_with_new_flow(self, flow_out)
Example #10
0
 def testGetSet(self):
   with self.cached_session(), self.test_scope():
     t = constant_op.constant([1.0, 2.0])
     l = list_ops.tensor_list_from_tensor(t, element_shape=[])
     e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     self.assertAllEqual(e0, 1.0)
     l = list_ops.tensor_list_set_item(l, 0, 3.0)
     t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
     self.assertAllEqual(t, [3.0, 2.0])
Example #11
0
 def testStackFromTensorGradients(self):
   with backprop.GradientTape() as tape:
     c = constant_op.constant([1.0, 2.0])
     tape.watch(c)
     l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
     c2 = list_ops.tensor_list_stack(
         l, element_dtype=dtypes.float32)
     result = c2 * 2.0
   self.assertAllEqual(tape.gradient(result, [c])[0], [2.0, 2.0])
Example #12
0
 def testStackFromTensorGradients(self):
   with backprop.GradientTape() as tape:
     c = constant_op.constant([1.0, 2.0])
     tape.watch(c)
     l = list_ops.tensor_list_from_tensor(c, element_shape=[])
     c2 = list_ops.tensor_list_stack(
         l, element_dtype=dtypes.float32, num_elements=2)
     result = c2 * 2.0
   grad = tape.gradient(result, [c])[0]
   self.assertAllEqual(self.evaluate(grad), [2.0, 2.0])
Example #13
0
 def testResourceVariableScatterGather(self):
   c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
   l = list_ops.tensor_list_from_tensor(c, element_shape=[])
   v = vs.get_variable("var", initializer=[l] * 10, use_resource=True)
   v_r_0_stacked = list_ops.tensor_list_stack(v[0], dtypes.float32)
   self.evaluate(v.initializer)
   self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_0_stacked))
   v_r_sparse_stacked = list_ops.tensor_list_stack(
       v.sparse_read(0), dtypes.float32)
   self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_sparse_stacked))
   l_new_0 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
   l_new_1 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
   updated_v = state_ops.scatter_update(v, [3, 5], [l_new_0, l_new_1])
   updated_v_elems = array_ops.unstack(updated_v)
   updated_v_stacked = [
       list_ops.tensor_list_stack(el, dtypes.float32) for el in updated_v_elems
   ]
   expected = ([[1.0, 2.0]] * 3 + [[3.0, 4.0], [1.0, 2.0], [5.0, 6.0]] +
               [[1.0, 2.0]] * 4)
   self.assertAllEqual(self.evaluate(updated_v_stacked), expected)
Example #14
0
 def testListFromTensor(self):
   with self.cached_session(), self.test_scope():
     t = constant_op.constant([1.0, 2.0])
     l = list_ops.tensor_list_from_tensor(t, element_shape=[])
     e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     self.assertAllEqual(e, 1.0)
     l, e0 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(e0, 2.0)
     l, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(e1, 1.0)
     self.assertAllEqual(list_ops.tensor_list_length(l), 0)
  def test_stack_tensor_list(self):
    initial_list = constant_op.constant([[1, 2], [3, 4]])
    elem_shape = constant_op.constant([2])
    l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)

    opts = data_structures.ListStackOpts(
        element_dtype=initial_list.dtype, original_call=None)

    with self.test_session() as sess:
      t = data_structures.list_stack(l, opts)
      self.assertAllEqual(sess.run(t), sess.run(initial_list))
 def testScatterUpdateVariant(self):
   with context.eager_mode():
     v = resource_variable_ops.ResourceVariable([
         list_ops.empty_tensor_list(
             element_dtype=dtypes.float32, element_shape=[])
     ])
     v.scatter_update(
         ops.IndexedSlices(
             list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]), 0))
     self.assertAllEqual(
         list_ops.tensor_list_get_item(v[0], 0, element_dtype=dtypes.float32),
         1.)
Example #17
0
 def testSkipEagerSetItemWithMismatchedShapeFails(self):
   with self.cached_session() as sess:
     ph = array_ops.placeholder(dtypes.float32)
     c = constant_op.constant([1.0, 2.0])
     l = list_ops.tensor_list_from_tensor(c, element_shape=[])
     # Set a placeholder with unknown shape to satisfy the shape inference
     # at graph building time.
     l = list_ops.tensor_list_set_item(l, 0, ph)
     l_0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "incompatible shape"):
       sess.run(l_0, {ph: [3.0]})
 def testScatterUpdateVariant(self):
   with context.eager_mode():
     v = resource_variable_ops.ResourceVariable([
         list_ops.empty_tensor_list(
             element_dtype=dtypes.float32, element_shape=[])
     ])
     v.scatter_update(
         ops.IndexedSlices(
             list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]), 0))
     self.assertAllEqual(
         list_ops.tensor_list_get_item(v[0], 0, element_dtype=dtypes.float32),
         1.)
Example #19
0
 def build_graph(parameters):
   """Build the TensorListResize op testing graph."""
   data = tf.placeholder(
       dtype=parameters["element_dtype"],
       shape=[parameters["num_elements"]] + parameters["element_shape"])
   tensor_list = list_ops.tensor_list_from_tensor(data,
                                                  parameters["element_shape"])
   tensor_list = list_ops.tensor_list_resize(tensor_list,
                                             parameters["new_size"])
   out = list_ops.tensor_list_stack(
       tensor_list, element_dtype=parameters["element_dtype"])
   return [data], [out]
Example #20
0
 def testSkipEagerSetItemWithMismatchedShapeFails(self):
   with self.cached_session() as sess:
     ph = array_ops.placeholder(dtypes.float32)
     c = constant_op.constant([1.0, 2.0])
     l = list_ops.tensor_list_from_tensor(c, element_shape=[])
     # Set a placeholder with unknown shape to satisfy the shape inference
     # at graph building time.
     l = list_ops.tensor_list_set_item(l, 0, ph)
     l_0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "incompatible shape"):
       sess.run(l_0, {ph: [3.0]})
Example #21
0
    def test_stack_tensor_list(self):
        initial_list = constant_op.constant([[1, 2], [3, 4]])
        elem_shape = constant_op.constant([2])
        l = list_ops.tensor_list_from_tensor(initial_list,
                                             element_shape=elem_shape)

        opts = data_structures.ListStackOpts(element_dtype=initial_list.dtype,
                                             original_call=None)

        with self.cached_session() as sess:
            t = data_structures.list_stack(l, opts)
            self.assertAllEqual(self.evaluate(t), self.evaluate(initial_list))
Example #22
0
 def testStackFromTensorGradients(self):
     with backprop.GradientTape() as tape:
         c = constant_op.constant([1.0, 2.0])
         tape.watch(c)
         l = list_ops.tensor_list_from_tensor(c,
                                              element_shape=scalar_shape())
         c2 = list_ops.tensor_list_stack(l,
                                         element_dtype=dtypes.float32,
                                         num_elements=2)
         result = c2 * 2.0
     grad = tape.gradient(result, [c])[0]
     self.assertAllEqual(self.evaluate(grad), [2.0, 2.0])
Example #23
0
 def unstack(self, value, name=None):
   """See TensorArray."""
   with ops.name_scope(name, "TensorArrayUnstack", [self._flow, value]):
     # TODO(b/129870929): Fix after all callers provide proper init dtype.
     value = ops.convert_to_tensor(
         value, preferred_dtype=self._dtype, name="value")
     _check_dtypes(value, self._dtype)
     if self._infer_shape and not context.executing_eagerly():
       self._merge_element_shape(value.shape[1:])
     flow_out = list_ops.tensor_list_from_tensor(
         tensor=value, element_shape=value.shape[1:])
     return build_ta_with_new_flow(self, flow_out)
Example #24
0
  def testPushBackBatch(self):
    c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
    l0 = list_ops.tensor_list_from_tensor(c, element_shape=[])
    l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[])
    l_batch = array_ops.stack([l0, l1])
    l_push = list_ops.tensor_list_push_back_batch(l_batch, [3.0, 4.0])
    l_unstack = array_ops.unstack(l_push)
    l0_ret = list_ops.tensor_list_stack(l_unstack[0], dtypes.float32)
    l1_ret = list_ops.tensor_list_stack(l_unstack[1], dtypes.float32)
    self.assertAllClose([1.0, 2.0, 3.0], self.evaluate(l0_ret))
    self.assertAllClose([-1.0, 4.0], self.evaluate(l1_ret))

    with ops.control_dependencies([l_push]):
      l_unstack_orig = array_ops.unstack(l_batch)
      l0_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[0],
                                               dtypes.float32)
      l1_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[1],
                                               dtypes.float32)

    # Check that without aliasing, push_back_batch still works; and
    # that it doesn't modify the input.
    l0_r_v, l1_r_v, l0_orig_v, l1_orig_v = self.evaluate(
        (l0_ret, l1_ret, l0_orig_ret, l1_orig_ret))
    self.assertAllClose([1.0, 2.0, 3.0], l0_r_v)
    self.assertAllClose([-1.0, 4.0], l1_r_v)
    self.assertAllClose([1.0, 2.0], l0_orig_v)
    self.assertAllClose([-1.0], l1_orig_v)

    # Pushing back mismatched shapes fails.
    with self.assertRaises((errors.InvalidArgumentError, ValueError)):
      self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, []))

    with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                 "incompatible shape to a list at index 0"):
      self.evaluate(
          list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]]))

    with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                 "Invalid data type at index 0"):
      self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4]))
Example #25
0
  def testPushBackBatch(self):
    c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
    l0 = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
    l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=scalar_shape())
    l_batch = array_ops.stack([l0, l1])
    l_push = list_ops.tensor_list_push_back_batch(l_batch, [3.0, 4.0])
    l_unstack = array_ops.unstack(l_push)
    l0_ret = list_ops.tensor_list_stack(l_unstack[0], dtypes.float32)
    l1_ret = list_ops.tensor_list_stack(l_unstack[1], dtypes.float32)
    self.assertAllClose([1.0, 2.0, 3.0], self.evaluate(l0_ret))
    self.assertAllClose([-1.0, 4.0], self.evaluate(l1_ret))

    with ops.control_dependencies([l_push]):
      l_unstack_orig = array_ops.unstack(l_batch)
      l0_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[0],
                                               dtypes.float32)
      l1_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[1],
                                               dtypes.float32)

    # Check that without aliasing, push_back_batch still works; and
    # that it doesn't modify the input.
    l0_r_v, l1_r_v, l0_orig_v, l1_orig_v = self.evaluate(
        (l0_ret, l1_ret, l0_orig_ret, l1_orig_ret))
    self.assertAllClose([1.0, 2.0, 3.0], l0_r_v)
    self.assertAllClose([-1.0, 4.0], l1_r_v)
    self.assertAllClose([1.0, 2.0], l0_orig_v)
    self.assertAllClose([-1.0], l1_orig_v)

    # Pushing back mismatched shapes fails.
    with self.assertRaises((errors.InvalidArgumentError, ValueError)):
      self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, []))

    with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                 "incompatible shape to a list at index 0"):
      self.evaluate(
          list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]]))

    with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                 "Invalid data type at index 0"):
      self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4]))
Example #26
0
 def testSerializeListWithUnknownRank(self):
     worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
     with ops.Graph().as_default(), session.Session(target=worker.target):
         with ops.device("/job:worker"):
             t = constant_op.constant([[1.0], [2.0]])
             l = list_ops.tensor_list_from_tensor(t, element_shape=-1)
         with ops.device("/job:ps"):
             l_ps = array_ops.identity(l)
             element_shape = list_ops.tensor_list_element_shape(
                 l_ps, shape_type=dtypes.int32)
         with ops.device("/job:worker"):
             element_shape = array_ops.identity(element_shape)
         self.assertEqual(self.evaluate(element_shape), -1)
Example #27
0
 def testAddNNestedList(self):
   l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])
   l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
   l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
   l4 = list_ops.tensor_list_from_tensor([7.0, 8.0], element_shape=[])
   a = list_ops.empty_tensor_list(
       element_dtype=dtypes.variant, element_shape=[])
   a = list_ops.tensor_list_push_back(a, l1)
   a = list_ops.tensor_list_push_back(a, l2)
   b = list_ops.empty_tensor_list(
       element_dtype=dtypes.variant, element_shape=[])
   b = list_ops.tensor_list_push_back(b, l3)
   b = list_ops.tensor_list_push_back(b, l4)
   result = math_ops.add_n((a, b))
   result_0 = list_ops.tensor_list_stack(
       list_ops.tensor_list_get_item(result, 0, element_dtype=dtypes.variant),
       element_dtype=dtypes.float32)
   result_1 = list_ops.tensor_list_stack(
       list_ops.tensor_list_get_item(result, 1, element_dtype=dtypes.variant),
       element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(result_0), [6., 8.])
   self.assertAllEqual(self.evaluate(result_1), [10., 12.])
Example #28
0
 def testSerializeListWithUnknownRank(self):
   worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
   with ops.Graph().as_default(), session.Session(target=worker.target):
     with ops.device("/job:worker"):
       t = constant_op.constant([[1.0], [2.0]])
       l = list_ops.tensor_list_from_tensor(t, element_shape=None)
     with ops.device("/job:ps"):
       l_ps = array_ops.identity(l)
       element_shape = list_ops.tensor_list_element_shape(
           l_ps, shape_type=dtypes.int32)
     with ops.device("/job:worker"):
       element_shape = array_ops.identity(element_shape)
     self.assertEqual(self.evaluate(element_shape), -1)
Example #29
0
 def testSerialize(self):
   worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
   with ops.Graph().as_default(), session.Session(target=worker.target):
     with ops.device("/job:worker"):
       t = constant_op.constant([[1.0], [2.0]])
       l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
     with ops.device("/job:ps"):
       l_ps = array_ops.identity(l)
       l_ps, e = list_ops.tensor_list_pop_back(
           l_ps, element_dtype=dtypes.float32)
     with ops.device("/job:worker"):
       worker_e = array_ops.identity(e)
     self.assertAllEqual(self.evaluate(worker_e), [2.0])
Example #30
0
 def testAddNNestedList(self):
   l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])
   l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
   l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
   l4 = list_ops.tensor_list_from_tensor([7.0, 8.0], element_shape=[])
   a = list_ops.empty_tensor_list(
       element_dtype=dtypes.variant, element_shape=[])
   a = list_ops.tensor_list_push_back(a, l1)
   a = list_ops.tensor_list_push_back(a, l2)
   b = list_ops.empty_tensor_list(
       element_dtype=dtypes.variant, element_shape=[])
   b = list_ops.tensor_list_push_back(b, l3)
   b = list_ops.tensor_list_push_back(b, l4)
   result = math_ops.add_n((a, b))
   result_0 = list_ops.tensor_list_stack(
       list_ops.tensor_list_get_item(result, 0, element_dtype=dtypes.variant),
       element_dtype=dtypes.float32)
   result_1 = list_ops.tensor_list_stack(
       list_ops.tensor_list_get_item(result, 1, element_dtype=dtypes.variant),
       element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(result_0), [6., 8.])
   self.assertAllEqual(self.evaluate(result_1), [10., 12.])
Example #31
0
 def testSerialize(self):
     worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
     with ops.Graph().as_default(), session.Session(target=worker.target):
         with ops.device("/job:worker"):
             t = constant_op.constant([[1.0], [2.0]])
             l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
         with ops.device("/job:ps"):
             l_ps = array_ops.identity(l)
             l_ps, e = list_ops.tensor_list_pop_back(
                 l_ps, element_dtype=dtypes.float32)
         with ops.device("/job:worker"):
             worker_e = array_ops.identity(e)
         self.assertAllEqual(self.evaluate(worker_e), [2.0])
Example #32
0
 def testResourceVariableScatterGather(self):
   c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
   l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
   v = vs.get_variable("var", initializer=[l] * 10, use_resource=True)
   v_r_0_stacked = list_ops.tensor_list_stack(v[0], dtypes.float32)
   self.evaluate(v.initializer)
   self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_0_stacked))
   v_r_sparse_stacked = list_ops.tensor_list_stack(
       v.sparse_read(0), dtypes.float32)
   self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_sparse_stacked))
   l_new_0 = list_ops.tensor_list_from_tensor(
       [3.0, 4.0], element_shape=scalar_shape())
   l_new_1 = list_ops.tensor_list_from_tensor(
       [5.0, 6.0], element_shape=scalar_shape())
   updated_v = state_ops.scatter_update(v, [3, 5], [l_new_0, l_new_1])
   updated_v_elems = array_ops.unstack(updated_v)
   updated_v_stacked = [
       list_ops.tensor_list_stack(el, dtypes.float32) for el in updated_v_elems
   ]
   expected = ([[1.0, 2.0]] * 3 + [[3.0, 4.0], [1.0, 2.0], [5.0, 6.0]] +
               [[1.0, 2.0]] * 4)
   self.assertAllEqual(self.evaluate(updated_v_stacked), expected)
Example #33
0
 def _to_tensor_list(self, value):
     if not isinstance(value, tensor_array_ops.TensorArray):
         raise TypeError("value must be a TensorArray, but saw: {}".format(
             type(value)))
     if value.flow is not None and value.flow.dtype == dtypes.variant:
         return [value.flow]
     else:
         # Convert to a TF2-style TensorArray.
         # TODO(ebrevdo): Add an "_as_variant" method to TensorArray class, or
         # "implementation / as_variant" arg to TensorArray constructor.
         with ops.name_scope("convert_tensor_array"):
             flow = list_ops.tensor_list_from_tensor(
                 tensor=value.stack(), element_shape=value.element_shape)
         return [flow]
Example #34
0
 def testGetSetGradients(self):
   with backprop.GradientTape() as tape:
     c = constant_op.constant([1.0, 2.0])
     tape.watch(c)
     l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
     c2 = constant_op.constant(3.0)
     tape.watch(c2)
     l = list_ops.tensor_list_set_item(l, 0, c2)
     e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     ee = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
     y = e * e + ee * ee
   grad_c, grad_c2 = tape.gradient(y, [c, c2])
   self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0])
   self.assertAllEqual(self.evaluate(grad_c2), 6.0)
Example #35
0
 def _to_tensor_list(self, value):
   if not isinstance(value, tensor_array_ops.TensorArray):
     raise TypeError("value must be a TensorArray, but saw: {}"
                     .format(type(value)))
   if value.flow is not None and value.flow.dtype == dtypes.variant:
     return [value.flow]
   else:
     # Convert to a TF2-style TensorArray.
     # TODO(ebrevdo): Add an "_as_variant" method to TensorArray class, or
     # "implementation / as_variant" arg to TensorArray constructor.
     with ops.name_scope("convert_tensor_array"):
       flow = list_ops.tensor_list_from_tensor(
           tensor=value.stack(), element_shape=value.element_shape)
     return [flow]
Example #36
0
 def testGetSetGradients(self):
   with backprop.GradientTape() as tape:
     c = constant_op.constant([1.0, 2.0])
     tape.watch(c)
     l = list_ops.tensor_list_from_tensor(c, element_shape=[])
     c2 = constant_op.constant(3.0)
     tape.watch(c2)
     l = list_ops.tensor_list_set_item(l, 0, c2)
     e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     ee = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
     y = e * e + ee * ee
   grad_c, grad_c2 = tape.gradient(y, [c, c2])
   self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0])
   self.assertAllEqual(self.evaluate(grad_c2), 6.0)
Example #37
0
 def testListFromTensor(self):
     with self.cached_session(), self.test_scope():
         t = constant_op.constant([1.0, 2.0])
         l = list_ops.tensor_list_from_tensor(t, element_shape=[])
         e = list_ops.tensor_list_get_item(l,
                                           0,
                                           element_dtype=dtypes.float32)
         self.assertAllEqual(e, 1.0)
         l, e0 = list_ops.tensor_list_pop_back(l,
                                               element_dtype=dtypes.float32)
         self.assertAllEqual(e0, 2.0)
         l, e1 = list_ops.tensor_list_pop_back(l,
                                               element_dtype=dtypes.float32)
         self.assertAllEqual(e1, 1.0)
         self.assertAllEqual(list_ops.tensor_list_length(l), 0)
Example #38
0
    def test_basic(self):
        self.assertEqual(special_functions.stack(1), 1)
        self.assertListEqual(special_functions.stack([1, 2, 3]), [1, 2, 3])
        # TODO(mdan): This should probably forward to tf.stack.
        self.assertTrue(
            isinstance(
                special_functions.stack(
                    [constant_op.constant(1),
                     constant_op.constant(2)]), list))

        t = constant_op.constant([1.0, 2.0])
        l = list_ops.tensor_list_from_tensor(
            t, element_shape=constant_op.constant([], dtype=dtypes.int32))
        self.assertTrue(
            tensor_util.is_tensor(
                special_functions.stack(l, element_dtype=dtypes.float32)))
 def testCPUGPUCopy(self):
     if not context.num_gpus():
         return
     t = constant_op.constant([1.0, 2.0])
     l = list_ops.tensor_list_from_tensor(t, element_shape=scalar_shape())
     with context.device("gpu:0"):
         l_gpu = array_ops.identity(l)
         self.assertAllEqual(
             self.evaluate(
                 list_ops.tensor_list_pop_back(
                     l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
     l_cpu = array_ops.identity(l_gpu)
     self.assertAllEqual(
         self.evaluate(
             list_ops.tensor_list_pop_back(
                 l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
Example #40
0
 def testCPUGPUCopy(self):
   if not context.num_gpus():
     return
   t = constant_op.constant([1.0, 2.0])
   l = list_ops.tensor_list_from_tensor(t, element_shape=[])
   with context.device("gpu:0"):
     l_gpu = array_ops.identity(l)
     self.assertAllEqual(
         self.evaluate(
             list_ops.tensor_list_pop_back(
                 l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
   l_cpu = array_ops.identity(l_gpu)
   self.assertAllEqual(
       self.evaluate(
           list_ops.tensor_list_pop_back(
               l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
  def test_basic(self):
    self.assertEqual(special_functions.stack(1), 1)
    self.assertListEqual(special_functions.stack([1, 2, 3]), [1, 2, 3])
    # TODO(mdan): This should probably forward to tf.stack.
    self.assertTrue(
        isinstance(
            special_functions.stack(
                [constant_op.constant(1),
                 constant_op.constant(2)]), list))

    t = constant_op.constant([1.0, 2.0])
    l = list_ops.tensor_list_from_tensor(
        t, element_shape=constant_op.constant([], dtype=dtypes.int32))
    self.assertTrue(
        tensor_util.is_tensor(
            special_functions.stack(l, element_dtype=dtypes.float32)))
 def build_graph(parameters):
     """Build the TensorListSetItem op testing graph."""
     data = tf.placeholder(dtype=parameters["element_dtype"],
                           shape=[parameters["num_elements"]] +
                           parameters["element_shape"])
     item = tf.placeholder(dtype=parameters["element_dtype"],
                           shape=parameters["element_shape"])
     tensor_list = list_ops.tensor_list_from_tensor(
         data, parameters["element_shape"])
     tensor_list = list_ops.tensor_list_set_item(tensor_list,
                                                 parameters["index"], item)
     out = list_ops.tensor_list_stack(
         tensor_list,
         num_elements=parameters["num_elements"],
         element_dtype=parameters["element_dtype"])
     return [data, item], [out]
Example #43
0
 def unstack(self, value, name=None):
     """See TensorArray."""
     with ops.name_scope(name, "TensorArrayUnstack", [self._flow, value]):
         value = ops.convert_to_tensor(value, name="value")
         if self._infer_shape and not context.executing_eagerly():
             self._merge_element_shape(value.shape[1:])
         flow_out = list_ops.tensor_list_from_tensor(
             tensor=value, element_shape=value.shape[1:])
         ta = TensorArray(dtype=self._dtype,
                          handle=self.handle,
                          flow=flow_out,
                          colocate_with_first_write_call=self.
                          _colocate_with_first_write_call)
         ta._infer_shape = self._infer_shape
         ta._element_shape = self._element_shape
         ta._colocate_with = self._colocate_with
         return ta
 def unstack(self, value, name=None):
   """See TensorArray."""
   with ops.name_scope(name, "TensorArrayUnstack", [self._flow, value]):
     value = ops.convert_to_tensor(value, name="value")
     if self._infer_shape and not context.executing_eagerly():
       self._merge_element_shape(value.shape[1:])
     flow_out = list_ops.tensor_list_from_tensor(
         tensor=value, element_shape=value.shape[1:])
     ta = TensorArray(
         dtype=self._dtype,
         handle=self.handle,
         flow=flow_out,
         colocate_with_first_write_call=self._colocate_with_first_write_call)
     ta._infer_shape = self._infer_shape
     ta._element_shape = self._element_shape
     ta._colocate_with = self._colocate_with
     return ta
Example #45
0
    def test_index_access(self):
        def test_fn(l):
            return l[1]

        node, ctx = self.prepare(test_fn, {})
        def_, = anno.getanno(node.args.args[0], anno.Static.DEFINITIONS)
        def_.directives[directives.set_element_type] = {
            'dtype': parser.parse_expression('tf.int32')
        }
        node = slices.transform(node, ctx)

        with self.compiled(node, {}, dtypes.int32) as result:
            with self.test_session() as sess:
                tl = list_ops.tensor_list_from_tensor(
                    [1, 2],
                    element_shape=constant_op.constant([], dtype=dtypes.int32))
                y = result.test_fn(tl)
                self.assertEqual(2, sess.run(y))
  def test_pop_tensor_list(self):
    initial_list = constant_op.constant([[1, 2], [3, 4]])
    elem_shape = constant_op.constant([2])
    l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)

    opts = data_structures.ListPopOpts(
        element_dtype=initial_list.dtype,
        element_shape=(2,))

    with self.assertRaises(NotImplementedError):
      data_structures.list_pop(l, 0, opts)

    with self.test_session() as sess:
      l, x = data_structures.list_pop(l, None, opts)
      self.assertAllEqual(sess.run(x), [3, 4])

      t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
      self.assertAllEqual(sess.run(t), [[1, 2]])
Example #47
0
  def test_index_access(self):

    def test_fn(l):
      return l[1]

    node, ctx = self.prepare(test_fn, {})
    def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
    def_.directives[directives.set_element_type] = {
        'dtype': parser.parse_expression('tf.int32')
    }
    node = slices.transform(node, ctx)

    with self.compiled(node, {}, dtypes.int32) as result:
      with self.test_session() as sess:
        tl = list_ops.tensor_list_from_tensor(
            [1, 2], element_shape=constant_op.constant([], dtype=dtypes.int32))
        y = result.test_fn(tl)
        self.assertEqual(2, sess.run(y))
Example #48
0
    def test_pop_tensor_list(self):
        initial_list = constant_op.constant([[1, 2], [3, 4]])
        elem_shape = constant_op.constant([2])
        l = list_ops.tensor_list_from_tensor(initial_list,
                                             element_shape=elem_shape)

        opts = data_structures.ListPopOpts(element_dtype=initial_list.dtype,
                                           element_shape=(2, ))

        with self.assertRaises(NotImplementedError):
            data_structures.list_pop(l, 0, opts)

        with self.test_session() as sess:
            l, x = data_structures.list_pop(l, None, opts)
            self.assertAllEqual(sess.run(x), [3, 4])

            t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
            self.assertAllEqual(sess.run(t), [[1, 2]])
Example #49
0
  def test_stack(self):
    self.assertEqual(special_functions.stack(1, strict=False), 1)
    self.assertListEqual(
        special_functions.stack([1, 2, 3], strict=False), [1, 2, 3])
    # TODO(mdan): This should probably forward to tf.stack.
    self.assertTrue(
        isinstance(
            special_functions.stack(
                [constant_op.constant(1),
                 constant_op.constant(2)], strict=False), list))

    with self.assertRaises(ValueError):
      special_functions.stack([1, 2, 3])

    t = constant_op.constant([1.0, 2.0])
    l = list_ops.tensor_list_from_tensor(
        t, element_shape=constant_op.constant([], dtype=dtypes.int32))
    self.assertTrue(
        tensor_util.is_tf_type(
            special_functions.stack(l, element_dtype=dtypes.float32)))
    def testSerialize(self):
        # pylint: disable=g-import-not-at-top
        try:
            import portpicker
        except ImportError:
            return
        with context.graph_mode():
            worker_port = portpicker.pick_unused_port()
            ps_port = portpicker.pick_unused_port()
            cluster_dict = {
                "worker": ["localhost:%s" % worker_port],
                "ps": ["localhost:%s" % ps_port]
            }
            cs = server_lib.ClusterSpec(cluster_dict)

            worker = server_lib.Server(cs,
                                       job_name="worker",
                                       protocol="grpc",
                                       task_index=0,
                                       start=True)
            unused_ps = server_lib.Server(cs,
                                          job_name="ps",
                                          protocol="grpc",
                                          task_index=0,
                                          start=True)
            with ops.Graph().as_default(), session.Session(
                    target=worker.target):
                with ops.device("/job:worker"):
                    t = constant_op.constant([[1.0], [2.0]])
                    l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
                with ops.device("/job:ps"):
                    l_ps = array_ops.identity(l)
                    l_ps, e = list_ops.tensor_list_pop_back(
                        l_ps, element_dtype=dtypes.float32)
                with ops.device("/job:worker"):
                    worker_e = array_ops.identity(e)
                self.assertAllEqual(self.evaluate(worker_e), [2.0])
Example #51
0
 def testCPUGPUCopyNested(self):
   if not context.num_gpus():
     return
   t = constant_op.constant([1.0, 2.0])
   child_l = list_ops.tensor_list_from_tensor(t, element_shape=[])
   l = list_ops.empty_tensor_list(
       element_shape=constant_op.constant([], dtype=dtypes.int32),
       element_dtype=dtypes.variant)
   l = list_ops.tensor_list_push_back(l, child_l)
   with context.device("gpu:0"):
     l_gpu = array_ops.identity(l)
     _, child_l_gpu = list_ops.tensor_list_pop_back(
         l_gpu, element_dtype=dtypes.variant)
     self.assertAllEqual(
         self.evaluate(
             list_ops.tensor_list_pop_back(
                 child_l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
   l_cpu = array_ops.identity(l_gpu)
   _, child_l_cpu = list_ops.tensor_list_pop_back(
       l_cpu, element_dtype=dtypes.variant)
   self.assertAllEqual(
       self.evaluate(
           list_ops.tensor_list_pop_back(
               child_l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
Example #52
0
 def testCPUGPUCopyNested(self):
   if not context.num_gpus():
     return
   t = constant_op.constant([1.0, 2.0])
   child_l = list_ops.tensor_list_from_tensor(t, element_shape=[])
   l = list_ops.empty_tensor_list(
       element_shape=constant_op.constant([], dtype=dtypes.int32),
       element_dtype=dtypes.variant)
   l = list_ops.tensor_list_push_back(l, child_l)
   with context.device("gpu:0"):
     l_gpu = array_ops.identity(l)
     _, child_l_gpu = list_ops.tensor_list_pop_back(
         l_gpu, element_dtype=dtypes.variant)
     self.assertAllEqual(
         self.evaluate(
             list_ops.tensor_list_pop_back(
                 child_l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
   l_cpu = array_ops.identity(l_gpu)
   _, child_l_cpu = list_ops.tensor_list_pop_back(
       l_cpu, element_dtype=dtypes.variant)
   self.assertAllEqual(
       self.evaluate(
           list_ops.tensor_list_pop_back(
               child_l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
Example #53
0
  def test_index_access(self):

    def test_fn(l):
      utils.set_element_type(l, dtypes.int32)
      return l[1]

    node = self.parse_and_analyze(
        test_fn,
        {
            'utils': utils,
            'dtypes': dtypes
        },
        include_type_analysis=True,
    )
    node = slices.transform(node, self.ctx)

    with self.compiled(node, dtypes.int32) as result:
      result.utils = utils
      result.dtypes = dtypes
      with self.test_session() as sess:
        tl = list_ops.tensor_list_from_tensor(
            [1, 2], element_shape=constant_op.constant([], dtype=dtypes.int32))
        y = result.test_fn(tl)
        self.assertEqual(2, sess.run(y))
    def testConcat(self):
        c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
        l0 = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
        l1 = list_ops.tensor_list_from_tensor([-1.0],
                                              element_shape=scalar_shape())
        l_batch_0 = array_ops.stack([l0, l1])
        l_batch_1 = array_ops.stack([l1, l0])

        l_concat_01 = list_ops.tensor_list_concat_lists(
            l_batch_0, l_batch_1, element_dtype=dtypes.float32)
        l_concat_10 = list_ops.tensor_list_concat_lists(
            l_batch_1, l_batch_0, element_dtype=dtypes.float32)
        l_concat_00 = list_ops.tensor_list_concat_lists(
            l_batch_0, l_batch_0, element_dtype=dtypes.float32)
        l_concat_11 = list_ops.tensor_list_concat_lists(
            l_batch_1, l_batch_1, element_dtype=dtypes.float32)

        expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]]
        expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]]
        expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]]
        expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]]

        for i, (concat, expected) in enumerate(
                zip([l_concat_00, l_concat_01, l_concat_10, l_concat_11],
                    [expected_00, expected_01, expected_10, expected_11])):
            splitted = array_ops.unstack(concat)
            splitted_stacked_ret = self.evaluate(
                (list_ops.tensor_list_stack(splitted[0], dtypes.float32),
                 list_ops.tensor_list_stack(splitted[1], dtypes.float32)))
            print("Test concat %d: %s, %s, %s, %s" %
                  (i, expected[0], splitted_stacked_ret[0], expected[1],
                   splitted_stacked_ret[1]))
            self.assertAllClose(expected[0], splitted_stacked_ret[0])
            self.assertAllClose(expected[1], splitted_stacked_ret[1])

        # Concatenating mismatched shapes fails.
        with self.assertRaises((errors.InvalidArgumentError, ValueError)):
            self.evaluate(
                list_ops.tensor_list_concat_lists(
                    l_batch_0,
                    list_ops.empty_tensor_list(scalar_shape(), dtypes.float32),
                    element_dtype=dtypes.float32))

        with self.assertRaisesRegexp(
                errors.InvalidArgumentError,
                "element shapes are not identical at index 0"):
            l_batch_of_vec_tls = array_ops.stack(
                [list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])
                 ] * 2)
            self.evaluate(
                list_ops.tensor_list_concat_lists(
                    l_batch_0,
                    l_batch_of_vec_tls,
                    element_dtype=dtypes.float32))

        with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                     r"input_b\[0\].dtype != element_dtype."):
            l_batch_of_int_tls = array_ops.stack([
                list_ops.tensor_list_from_tensor([1],
                                                 element_shape=scalar_shape())
            ] * 2)
            self.evaluate(
                list_ops.tensor_list_concat_lists(
                    l_batch_0,
                    l_batch_of_int_tls,
                    element_dtype=dtypes.float32))
 def testSetOutOfBounds(self):
     c = constant_op.constant([1.0, 2.0])
     l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
     with self.assertRaises(errors.InvalidArgumentError):
         self.evaluate(list_ops.tensor_list_set_item(l, 20, 3.0))
Example #56
0
def tf_tensor_list_new(elements, element_dtype=None, element_shape=None):
    """Overload of new_list that stages a Tensor list creation."""
    if tensor_util.is_tensor(elements):
        if element_shape is not None:
            raise ValueError(
                'element shape may not be specified when creating list from tensor'
            )
        element_shape = array_ops.shape(elements)[1:]
        l = list_ops.tensor_list_from_tensor(elements,
                                             element_shape=element_shape)
        return l

    elements = tuple(ops.convert_to_tensor(el) for el in elements)

    all_dtypes = set(el.dtype for el in elements)
    if len(all_dtypes) == 1:
        inferred_dtype = tuple(all_dtypes)[0]
        if element_dtype is not None and element_dtype != inferred_dtype:
            raise ValueError(
                'incompatible dtype; specified: {}, inferred from {}: {}'.
                format(element_dtype, elements, inferred_dtype))
    elif all_dtypes:
        # Heterogeneous lists are ok.
        if element_dtype is not None:
            raise ValueError(
                'specified dtype {} is inconsistent with that of elements {}'.
                format(element_dtype, elements))
        inferred_dtype = dtypes.variant
    else:
        inferred_dtype = dtypes.variant

    all_shapes = set(tuple(el.shape.as_list()) for el in elements)
    if len(all_shapes) == 1:
        inferred_shape = array_ops.shape(elements[0])
        if element_shape is not None and element_shape != inferred_shape:
            raise ValueError(
                'incompatible shape; specified: {}, inferred from {}: {}'.
                format(element_shape, elements, inferred_shape))
    elif all_shapes:
        # Heterogeneous lists are ok.
        if element_shape is not None:
            raise ValueError(
                'specified shape {} is inconsistent with that of elements {}'.
                format(element_shape, elements))
        inferred_shape = constant_op.constant(
            -1)  # unknown shape, by convention
    else:
        inferred_shape = constant_op.constant(
            -1)  # unknown shape, by convention

    if element_dtype is None:
        element_dtype = inferred_dtype
    if element_shape is None:
        element_shape = inferred_shape

    element_shape = ops.convert_to_tensor(element_shape, dtype=dtypes.int32)
    l = list_ops.empty_tensor_list(element_shape=element_shape,
                                   element_dtype=element_dtype)
    for el in elements:
        l = list_ops.tensor_list_push_back(l, el)
    return l