def _testTensorGradAccessTwiceReceiveSameObject(self, use_gpu):
    with self.test_session(use_gpu=use_gpu) as sess:
      h = gen_data_flow_ops._tensor_array(
          dtype=tf.float32, tensor_array_name="foo", size=3)
      g_h_0 = gen_data_flow_ops._tensor_array_grad(h)
      g_h_1 = gen_data_flow_ops._tensor_array_grad(h)

      with tf.control_dependencies([
          gen_data_flow_ops._tensor_array_write(g_h_0, 0, [[4.0, 5.0]])]):
        # Write with one gradient handle, read with another copy of it
        r1_0 = gen_data_flow_ops._tensor_array_read(g_h_1, 0, tf.float32)

      t_g_h_0, t_g_h_1, d_r1_0 = sess.run([g_h_0, g_h_1, r1_0])
      self.assertAllEqual(t_g_h_0, t_g_h_1)
      self.assertAllEqual([[4.0, 5.0]], d_r1_0)
 def grad(self):
   g = TensorArray(
       dtype=self._dtype,
       size=-1,
       handle=gen_data_flow_ops._tensor_array_grad(self._handle))
   g._gradient_add = True
   return g
Example #3
0
 def grad(self):
     g = TensorArray(dtype=self._dtype,
                     size=-1,
                     handle=gen_data_flow_ops._tensor_array_grad(
                         self._handle))
     g._gradient_add = True
     return g
  def _testTensorGradArrayWriteRead(self, use_gpu):
    with self.test_session(use_gpu=use_gpu) as sess:
      h = gen_data_flow_ops._tensor_array(
          dtype=tf.float32, tensor_array_name="foo", size=3)
      g_h = gen_data_flow_ops._tensor_array_grad(h)

      writes = [
          gen_data_flow_ops._tensor_array_write(h, 0, [[4.0, 5.0]]),
          gen_data_flow_ops._tensor_array_write(h, 1, [[1.0]]),
          gen_data_flow_ops._tensor_array_write(h, 2, -3.0)]

      grad_writes = [
          gen_data_flow_ops._tensor_array_write(g_h, 0, [[5.0, 6.0]]),
          gen_data_flow_ops._tensor_array_write(g_h, 1, [[2.0]]),
          gen_data_flow_ops._tensor_array_write(g_h, 2, -2.0)]

      with tf.control_dependencies(writes):
        r0 = gen_data_flow_ops._tensor_array_read(h, 0, tf.float32)
        r1 = gen_data_flow_ops._tensor_array_read(h, 1, tf.float32)
        r2 = gen_data_flow_ops._tensor_array_read(h, 2, tf.float32)

      with tf.control_dependencies(grad_writes):
        g_r0 = gen_data_flow_ops._tensor_array_read(g_h, 0, tf.float32)
        g_r1 = gen_data_flow_ops._tensor_array_read(g_h, 1, tf.float32)
        g_r2 = gen_data_flow_ops._tensor_array_read(g_h, 2, tf.float32)

      d0, d1, d2, g_d0, g_d1, g_d2 = sess.run([r0, r1, r2, g_r0, g_r1, g_r2])
      self.assertAllEqual([[4.0, 5.0]], d0)
      self.assertAllEqual([[1.0]], d1)
      self.assertAllEqual(-3.0, d2)
      self.assertAllEqual([[5.0, 6.0]], g_d0)
      self.assertAllEqual([[2.0]], g_d1)
      self.assertAllEqual(-2.0, g_d2)
Example #5
0
 def grad(self, source, flow=None):
     g_handle = gen_data_flow_ops._tensor_array_grad(handle=self._handle,
                                                     source=source)
     g = TensorArray(dtype=self._dtype,
                     size=None,
                     handle=g_handle,
                     flow=flow)
     return g
Example #6
0
 def grad(self, source, flow=None):
   # tensor_array_grad requires a flow input when forward
   # TensorArrays are dynamically sized.  This forces the creation
   # of the grad TensorArray only once the final forward array's size
   # is fixed.
   g_handle = gen_data_flow_ops._tensor_array_grad(
       handle=self._handle, source=source, flow_in=flow or self.flow)
   g = TensorArray(dtype=self._dtype, handle=g_handle, flow=flow or self.flow)
   return g
Example #7
0
 def grad(self, source, flow=None):
   # tensor_array_grad requires a flow input when forward
   # TensorArrays are dynamically sized.  This forces the creation
   # of the grad TensorArray only once the final forward array's size
   # is fixed.
   g_handle = gen_data_flow_ops._tensor_array_grad(
       handle=self._handle, source=source, flow_in=flow or self.flow)
   g = TensorArray(dtype=self._dtype, handle=g_handle, flow=flow or self.flow)
   return g
Example #8
0
 def grad(self, source, flow=None, name=None):
   # tensor_array_grad requires a flow input when forward
   # TensorArrays are dynamically sized.  This forces the creation
   # of the grad TensorArray only once the final forward array's size
   # is fixed.
   if flow is None:
     flow = self.flow
   with ops.op_scope([self._handle], name, "TensorArrayGrad"):
     with ops.colocate_with(self._handle):
       g_handle = gen_data_flow_ops._tensor_array_grad(
           handle=self._handle, source=source, flow_in=flow, name=name)
       with ops.control_dependencies([g_handle]):
         flow = array_ops.identity(flow, name="gradient_flow")
       g = TensorArray(dtype=self._dtype, handle=g_handle, flow=flow)
       return g
 def grad(self, source, flow=None, name=None):
   # tensor_array_grad requires a flow input when forward
   # TensorArrays are dynamically sized.  This forces the creation
   # of the grad TensorArray only once the final forward array's size
   # is fixed.
   if flow is None:
     flow = self.flow
   with ops.op_scope([self._handle], name, "TensorArrayGrad"):
     with ops.colocate_with(self._handle):
       g_handle = gen_data_flow_ops._tensor_array_grad(
           handle=self._handle, source=source, flow_in=flow, name=name)
       with ops.control_dependencies([g_handle]):
         flow = array_ops.identity(flow, name="gradient_flow")
       g = TensorArray(dtype=self._dtype, handle=g_handle, flow=flow,
                       infer_shape=self._infer_shape)
       return g
Example #10
0
 def grad(self, source, flow=None):
   g_handle = gen_data_flow_ops._tensor_array_grad(
       handle=self._handle, source=source)
   g = TensorArray(dtype=self._dtype, size=None, handle=g_handle, flow=flow)
   return g