def _testDuplicateTensorArrayFails(self, use_gpu):
   with self.test_session(use_gpu=use_gpu) as sess:
     h1 = gen_data_flow_ops._tensor_array(
         size=1, dtype=tf.float32, tensor_array_name="foo")
     c1 = gen_data_flow_ops._tensor_array_write(h1, 0, 4.0)
     h2 = gen_data_flow_ops._tensor_array(
         size=1, dtype=tf.float32, tensor_array_name="foo")
     c2 = gen_data_flow_ops._tensor_array_write(h2, 0, 5.0)
     with self.assertRaises(errors.AlreadyExistsError):
       sess.run([c1, c2])
  def _testTensorArrayWriteMultipleFails(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      h = gen_data_flow_ops._tensor_array(
          dtype=tf.float32, tensor_array_name="foo", size=3)

      with self.assertRaisesOpError(
          "Could not write to TensorArray index 2 because "
          "it has already been written to."):
        with tf.control_dependencies([
            gen_data_flow_ops._tensor_array_write(h, 2, 3.0)]):
          gen_data_flow_ops._tensor_array_write(h, 2, 3.0).run()
 def _testWriteCloseTensorArray(self, use_gpu):
   with self.test_session(use_gpu=use_gpu) as sess:
     h = gen_data_flow_ops._tensor_array(
         dtype=tf.float32, tensor_array_name="foo", size=3)
     writes = [
         gen_data_flow_ops._tensor_array_write(h, 0, [[4.0, 5.0]]),
         gen_data_flow_ops._tensor_array_write(h, 1, [3.0]),
         gen_data_flow_ops._tensor_array_write(h, 2, -1.0)]
     with tf.control_dependencies(writes):
       close = gen_data_flow_ops._tensor_array_close(h)
     sess.run(close)
  def _testTensorArrayWriteGradientAddMultipleAddsType(self, use_gpu, dtype):
    with self.test_session(use_gpu=use_gpu):
      h = gen_data_flow_ops._tensor_array(
          dtype=dtype, tensor_array_name="foo", size=3)

      c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)

      writes = [
          gen_data_flow_ops._tensor_array_write(
              h, 2, c(3.0), gradient_add=True),
          gen_data_flow_ops._tensor_array_write(
              h, 2, c(4.0), gradient_add=True)]

      with tf.control_dependencies(writes):
        self.assertAllEqual(
            c(7.00), gen_data_flow_ops._tensor_array_read(h, 2, dtype).eval())
  def _testTensorArrayReadWrongIndexOrDataTypeFails(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      h = gen_data_flow_ops._tensor_array(
          dtype=tf.float32, tensor_array_name="foo", size=3)

      with tf.control_dependencies([
          gen_data_flow_ops._tensor_array_write(h, 0, [[4.0, 5.0]])]):

        # Test reading wrong datatype
        r0_bad = gen_data_flow_ops._tensor_array_read(h, 0, tf.int64)
        with self.assertRaisesOpError(
            "TensorArray dtype is float but Op requested dtype int64."):
          r0_bad.eval()

        # Test reading from a different index than the one we wrote to
        r1 = gen_data_flow_ops._tensor_array_read(h, 1, tf.float32)
        with self.assertRaisesOpError(
            "Could not read from TensorArray index 1 because "
            "it has not yet been written to."):
          r1.eval()

      # Test reading from a negative index
      with self.assertRaisesOpError(
          "Tried to read from index -1 but array size is: 3"):
        gen_data_flow_ops._tensor_array_read(h, -1, tf.float32).eval()

      # Test reading from too large an index
      with self.assertRaisesOpError(
          "Tried to read from index 3 but array size is: 3"):
        gen_data_flow_ops._tensor_array_read(h, 3, tf.float32).eval()
  def _testMultiTensorArray(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      h1 = gen_data_flow_ops._tensor_array(
          size=1, dtype=tf.float32, tensor_array_name="foo")
      with tf.control_dependencies([
          gen_data_flow_ops._tensor_array_write(h1, 0, 4.0)]):
        r1 = gen_data_flow_ops._tensor_array_read(h1, 0, tf.float32)

      h2 = gen_data_flow_ops._tensor_array(
          size=1, dtype=tf.float32, tensor_array_name="bar")

      with tf.control_dependencies([
          gen_data_flow_ops._tensor_array_write(h2, 0, 5.0)]):
        r2 = gen_data_flow_ops._tensor_array_read(h2, 0, tf.float32)
      r = r1 + r2
      self.assertAllClose(9.0, r.eval())
    def write(self, index, value, name=None):
        """Write `value` into index `index` of the TensorArray.

    Args:
      index: 0-D.  int32 scalar with the index to write to.
      value: N-D.  Tensor of type `dtype`.  The Tensor to write to this index.
      name: A name for the operation (optional).

    Returns:
      A new TensorArray object with flow that ensures the write occurs.
      Use this object all for subsequent operations.

    Raises:
      ValueError: if there are more writers than specified.
    """
        with ops.colocate_with(self._handle):
            flow_out = gen_data_flow_ops._tensor_array_write(
                handle=self._handle, index=index, value=value, flow_in=self._flow, name=name
            )
            ta = TensorArray(dtype=self._dtype, handle=self._handle)
            ta._flow = flow_out
            ta._infer_shape = self._infer_shape
            ta._elem_shape = self._elem_shape
            if ta._infer_shape:
                val_shape = flow_out.op.inputs[2].get_shape()
                if ta._elem_shape:
                    if not val_shape == ta._elem_shape[0]:
                        raise ValueError(
                            "Inconsistent shapes: saw %s but expected %s "
                            "(and infer_shape=True)" % (val_shape, ta._elem_shape[0])
                        )
                else:
                    ta._elem_shape.append(val_shape)
            return ta
    def write(self, index, value, name=None):
        """Write `value` into index `index` of the TensorArray.

    Args:
      index: 0-D.  int32 scalar with the index to write to.
      value: N-D.  Tensor of type `dtype`.  The Tensor to write to this index.
      name: A name for the operation (optional).

    Returns:
      A new TensorArray object with flow that ensures the write occurs.
      Use this object all for subsequent operations.

    Raises:
      ValueError: if there are more writers than specified.
    """
        with ops.colocate_with(self._handle):
            flow_out = gen_data_flow_ops._tensor_array_write(
                handle=self._handle,
                index=index,
                value=value,
                flow_in=self._flow,
                name=name)
            ta = TensorArray(dtype=self._dtype, handle=self._handle)
            ta._flow = flow_out
            ta._infer_shape = self._infer_shape
            ta._elem_shape = self._elem_shape
            if ta._infer_shape:
                val_shape = flow_out.op.inputs[2].get_shape()
                if ta._elem_shape:
                    if not val_shape == ta._elem_shape[0]:
                        raise ValueError("Shape inference failed.")
                else:
                    ta._elem_shape.append(val_shape)
            return ta
Esempio n. 9
0
 def write(self, index, value, name=None):
   """Write `value` into index `index` of the TensorArray."""
   flow_out = gen_data_flow_ops._tensor_array_write(
       handle=self._handle, index=index, value=value, flow_in=self._flow,
       name=name)
   # Size below is ignored
   ta = TensorArray(dtype=self._dtype, size=-1, handle=self._handle)
   ta._flow = flow_out
   return ta
Esempio n. 10
0
 def write(self, index, value, name=None):
   """Write `value` into index `index` of the TensorArray."""
   flow_out = gen_data_flow_ops._tensor_array_write(
       handle=self._handle, index=index, value=value, flow_in=self._flow,
       name=name)
   # Size below is ignored
   ta = TensorArray(dtype=self._dtype, handle=self._handle)
   ta._flow = flow_out
   return ta
Esempio n. 11
0
  def _testTensorArrayWriteRead(self, use_gpu):
    with self.test_session(use_gpu=use_gpu) as sess:
      h = gen_data_flow_ops._tensor_array(
          dtype=tf.float32, tensor_array_name="foo", size=3)

      writes = [
          gen_data_flow_ops._tensor_array_write(h, 0, [[4.0, 5.0]]),
          gen_data_flow_ops._tensor_array_write(h, 1, [[1.0]]),
          gen_data_flow_ops._tensor_array_write(h, 2, -3.0)]

      with tf.control_dependencies(writes):
        r0 = gen_data_flow_ops._tensor_array_read(h, 0, tf.float32)
        r1 = gen_data_flow_ops._tensor_array_read(h, 1, tf.float32)
        r2 = gen_data_flow_ops._tensor_array_read(h, 2, tf.float32)

      d0, d1, d2 = sess.run([r0, r1, r2])
      self.assertAllEqual([[4.0, 5.0]], d0)
      self.assertAllEqual([[1.0]], d1)
      self.assertAllEqual(-3.0, d2)
Esempio n. 12
0
  def testTensorArrayPackNotAllValuesAvailableFails(self):
    with self.test_session():
      h = gen_data_flow_ops._tensor_array(
          dtype=tf.float32, tensor_array_name="foo", size=3)

      with self.assertRaisesOpError(
          "Could not read from TensorArray index 1 "
          "because it has not yet been written to."):
        with tf.control_dependencies([
            gen_data_flow_ops._tensor_array_write(h, 0, [[4.0, 5.0]])]):
          gen_data_flow_ops._tensor_array_pack(h, tf.float32).eval()
Esempio n. 13
0
  def _testTensorArrayWritePack(self, tf_dtype, use_gpu):
    dtype = tf_dtype.as_numpy_dtype()
    with self.test_session(use_gpu=use_gpu):
      h = gen_data_flow_ops._tensor_array(
          dtype=tf_dtype, tensor_array_name="foo", size=3)

      if tf_dtype == tf.string:
        convert = lambda x: np.asarray(x).astype(np.str)
      else:
        convert = lambda x: np.asarray(x).astype(dtype)

      writes = [
          gen_data_flow_ops._tensor_array_write(h, 0, convert([[4.0, 5.0]])),
          gen_data_flow_ops._tensor_array_write(h, 1, convert([[6.0, 7.0]])),
          gen_data_flow_ops._tensor_array_write(h, 2, convert([[8.0, 9.0]]))]

      with tf.control_dependencies(writes):
        c0 = gen_data_flow_ops._tensor_array_pack(h, tf_dtype)

      self.assertAllEqual(
          convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
Esempio n. 14
0
 def write(self, index, value):
     """Write `value` into index `index` of the TensorArray."""
     flow_out = gen_data_flow_ops._tensor_array_write(
         handle=self._handle,
         index=index,
         value=value,
         flow_in=self._flow,
         gradient_add=self._gradient_add)
     # Size below is ignored
     ta = TensorArray(dtype=self._dtype, size=-1, handle=self._handle)
     ta._gradient_add = self._gradient_add
     ta._flow = flow_out
     return ta
Esempio n. 15
0
  def _testTensorGradAccessTwiceReceiveSameObject(self, use_gpu):
    with self.test_session(use_gpu=use_gpu) as sess:
      h = gen_data_flow_ops._tensor_array(
          dtype=tf.float32, tensor_array_name="foo", size=3)
      g_h_0 = gen_data_flow_ops._tensor_array_grad(h)
      g_h_1 = gen_data_flow_ops._tensor_array_grad(h)

      with tf.control_dependencies([
          gen_data_flow_ops._tensor_array_write(g_h_0, 0, [[4.0, 5.0]])]):
        # Write with one gradient handle, read with another copy of it
        r1_0 = gen_data_flow_ops._tensor_array_read(g_h_1, 0, tf.float32)

      t_g_h_0, t_g_h_1, d_r1_0 = sess.run([g_h_0, g_h_1, r1_0])
      self.assertAllEqual(t_g_h_0, t_g_h_1)
      self.assertAllEqual([[4.0, 5.0]], d_r1_0)
Esempio n. 16
0
    def write(self, index, value, name=None):
        """Write `value` into index `index` of the TensorArray.

    Args:
      index: 0-D.  int32 scalar with the index to write to.
      value: N-D.  Tensor of type `dtype`.  The Tensor to write to this index.
      name: A name for the operation (optional).

    Returns:
      A new TensorArray object with flow that ensures the write occurs.
      Use this object all for subsequent operations.
    """
        with ops.colocate_with(self._handle):
            flow_out = gen_data_flow_ops._tensor_array_write(
                handle=self._handle, index=index, value=value, flow_in=self._flow, name=name
            )
            ta = TensorArray(dtype=self._dtype, handle=self._handle)
            ta._flow = flow_out
            return ta
Esempio n. 17
0
    def write(self, index, value, name=None):
        """Write `value` into index `index` of the TensorArray.

    Args:
      index: 0-D.  int32 scalar with the index to write to.
      value: N-D.  Tensor of type `dtype`.  The Tensor to write to this index.
      name: A name for the operation (optional).

    Returns:
      A new TensorArray object with flow that ensures the write occurs.
      Use this object all for subsequent operations.
    """
        with ops.colocate_with(self._handle):
            flow_out = gen_data_flow_ops._tensor_array_write(
                handle=self._handle,
                index=index,
                value=value,
                flow_in=self._flow,
                name=name)
            ta = TensorArray(dtype=self._dtype, handle=self._handle)
            ta._flow = flow_out
            return ta
Esempio n. 18
0
  def _testTensorArrayWriteWrongIndexOrDataTypeFails(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      h = gen_data_flow_ops._tensor_array(
          dtype=tf.float32, tensor_array_name="foo", size=3)

      # Test writing the wrong datatype
      with self.assertRaisesOpError(
          "TensorArray dtype is float but Op is trying to write dtype string"):
        gen_data_flow_ops._tensor_array_write(h, -1, "wrong_type_scalar").run()

      # Test writing to a negative index
      with self.assertRaisesOpError(
          "Tried to write to index -1 but array size is: 3"):
        gen_data_flow_ops._tensor_array_write(h, -1, 3.0).run()

      # Test reading from too large an index
      with self.assertRaisesOpError(
          "Tried to write to index 3 but array size is: 3"):
        gen_data_flow_ops._tensor_array_write(h, 3, 3.0).run()