Esempio n. 1
0
 def testMultipleWhileLoops(self):
   x = constant_op.constant(2.)
   ret1 = while_loop_v2(lambda v: v < 4., lambda v: v * v, [x])  # x**2
   ret2 = while_loop_v2(lambda v: v < 16., lambda v: v * v, ret1)  # x**4
   grad = gradients_impl.gradients(ret2, [x])  # 4x**3
   grad_grad = gradients_impl.gradients(grad, [x])  # 12x**2
   with self.cached_session() as sess:
     self.assertSequenceEqual(sess.run(grad), [32.])
     self.assertSequenceEqual(sess.run(grad_grad), [48.])
Esempio n. 2
0
  def testNestedWhileAndTensorArray(self):
    n = constant_op.constant(3.0)

    def Body(row, ta, n):

      def InnerBody(row, col, ta, n):
        # Note: row and col are 1-based.
        ta = ta.write(
            math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
        return row, col + 1., ta, n

      # TODO(b/118457764): Remove n from loop_vars from both loops once fixed.
      ta = while_loop_v2(
          lambda _, col, _1, n: col <= n,
          InnerBody, [row, constant_op.constant(1.), ta, n],
          return_same_structure=False)[2]
      return row + 1., ta, n

    ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9)
    ta = while_loop_v2(
        lambda row, _, _1: row <= n,
        Body, [constant_op.constant(1.), ta, n],
        return_same_structure=False)[1]

    output = array_ops.reshape(ta.stack(), [3, 3])
    self.assertAllEqual(
        self.evaluate(output), [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]])
Esempio n. 3
0
 def fnWithLoop():  # pylint: disable=invalid-name
   with backprop.GradientTape() as tape:
     _, x = while_loop_v2(
         lambda i, _: i < 2,
         lambda i, x: (i + 1, x * v),
         [0, 2.])
   return tape.gradient(x, v)
Esempio n. 4
0
 def Fn():
   x = constant_op.constant(2.)
   with backprop.GradientTape() as tape:
     tape.watch(x)
     ret1 = while_loop_v2(
         lambda v: v < 4.,
         lambda v: v * v, [x],
         return_same_structure=False,
         name="while_1")  # x**2
     ret2 = while_loop_v2(
         lambda v: v < 16.,
         lambda v: v * v, [x],
         return_same_structure=False,
         name="while_2")  # x**4
     loss = ret1 + ret2
   return tape.gradient(loss, x)
Esempio n. 5
0
    def testMultipleLoopVars(self):
        x = constant_op.constant(5.)
        y = constant_op.constant(3.)

        # x = 5.
        # y = 3.
        # while x < 45.:
        #   x = x * y
        #   y = x + y
        ret = while_loop_v2(lambda v, _: v < 45.,
                            lambda v, w: (v * w, v + w), [x, y],
                            return_same_structure=False)
        # ret = [y*x**2 + x*y**2, x*y + x + y]

        gradx_0 = gradients_impl.gradients(ret[0], [x])  # [2*x*y + y**2]
        gradx_1 = gradients_impl.gradients(ret[1], [x])  # [y + 1]
        gradx_2 = gradients_impl.gradients(ret,
                                           [x])  # [2*x*y + y**2 + 2*y + 1]
        grady_0 = gradients_impl.gradients(ret[0], [y])  # [2*x*y + x**2]
        grady_1 = gradients_impl.gradients(ret[1], [y])  # [x + 1]
        grady_2 = gradients_impl.gradients(ret, [y])  # [2*x*y + x**2 + x + 1]
        with self.cached_session() as sess:
            self.assertSequenceEqual(self.evaluate(ret), [120., 23.])
            self.assertSequenceEqual(self.evaluate(gradx_0), [39.])
            self.assertSequenceEqual(self.evaluate(gradx_1), [4.])
            self.assertSequenceEqual(self.evaluate(gradx_2), [43.])
            self.assertSequenceEqual(self.evaluate(grady_0), [55.])
            self.assertSequenceEqual(self.evaluate(grady_1), [6.])
            self.assertSequenceEqual(self.evaluate(grady_2), [61.])
Esempio n. 6
0
    def testNestedWhileAndTensorArray(self):
        n = constant_op.constant(3.0)

        def Body(row, ta, n):
            def InnerBody(row, col, ta, n):
                # Note: row and col are 1-based.
                ta = ta.write(
                    math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32),
                    row * col)
                return row, col + 1., ta, n

            # TODO(b/118457764): Remove n from loop_vars from both loops once fixed.
            ta = while_loop_v2(lambda _, col, _1, n: col <= n,
                               InnerBody,
                               [row, constant_op.constant(1.), ta, n],
                               return_same_structure=False)[2]
            return row + 1., ta, n

        ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9)
        ta = while_loop_v2(lambda row, _, _1: row <= n,
                           Body, [constant_op.constant(1.), ta, n],
                           return_same_structure=False)[1]

        output = array_ops.reshape(ta.stack(), [3, 3])
        self.assertAllEqual(self.evaluate(output),
                            [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]])
Esempio n. 7
0
  def testDuplicateAccumulator(self):
    x = constant_op.constant(2.)

    tensor_list = list_ops.empty_tensor_list(
        element_dtype=dtypes.float32, element_shape=ScalarShape())

    def Cond(x, tl):
      del tl  # Unused for Cond.
      return x < 5.

    def Body(x, tl):
      # There is an accumulator in the loop already so we should not add
      # another.
      tl = list_ops.tensor_list_push_back(tl, x)
      return x**2., tl

    ret = while_loop_v2(
        Cond, Body, [x, tensor_list], return_same_structure=False)

    for op in ops.get_default_graph().get_operations():
      if op.type == "While":
        while_op = op

    body_graph = while_v2._get_graph(while_op, "body")
    x_input_index = [i for i, inp in enumerate(while_op.inputs) if inp == x][0]
    x_input_t = body_graph.inputs[x_input_index]
    accumulator_count = len(
        [c for c in x_input_t.consumers() if c.type == "TensorListPushBack"])
    self.assertEqual(accumulator_count, 1)

    grad = gradients_impl.gradients(ret[0], x)
    with self.cached_session() as sess:
      self.assertEqual(sess.run(ret[0]), 16.)
      self.assertSequenceEqual(self.evaluate(grad), [32.])
 def testSingleLoopVar(self):
     x = constant_op.constant(2.)
     ret = while_loop_v2(lambda v: v < 8., lambda v: v * v, [x])
     grad = gradients_impl.gradients(ret, [x])
     with self.cached_session() as sess:
         self.assertEqual(sess.run(ret), 16.)
         self.assertSequenceEqual(sess.run(grad), [32.])
Esempio n. 9
0
  def testDuplicateAccumulator(self):
    x = constant_op.constant(2.)

    tensor_list = list_ops.empty_tensor_list(
        element_dtype=dtypes.float32, element_shape=ScalarShape())

    def Cond(x, tl):
      del tl  # Unused for Cond.
      return x < 5.

    def Body(x, tl):
      # There is an accumulator in the loop already so we should not add
      # another.
      tl = list_ops.tensor_list_push_back(tl, x)
      return x**2., tl

    ret = while_loop_v2(Cond, Body, [x, tensor_list])

    for op in ops.get_default_graph().get_operations():
      if op.type == "While":
        while_op = op

    body_graph = while_v2._get_body_graph(while_op)
    # body_graph.inputs: [counter_arg, x_arg, tl_arg, *accumulators]
    x_input_t = body_graph.inputs[1]
    accumulator_count = len(
        [c for c in x_input_t.consumers() if c.type == "TensorListPushBack"])
    self.assertEqual(accumulator_count, 1)

    grad = gradients_impl.gradients(ret[0], x)
    with self.cached_session() as sess:
      self.assertEqual(sess.run(ret[0]), 16.)
      self.assertSequenceEqual(sess.run(grad), [32.])
Esempio n. 10
0
 def testSingleLoopVar(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(lambda v: v < 8., lambda v: v * v, [x])
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     self.assertEqual(sess.run(ret), 16.)
     self.assertSequenceEqual(sess.run(grad), [32.])
Esempio n. 11
0
  def testMultipleLoopVars(self):
    x = constant_op.constant(5.)
    y = constant_op.constant(3.)

    # x = 5.
    # y = 3.
    # while x < 45.:
    #   x = x * y
    #   y = x + y
    ret = while_loop_v2(lambda v, _: v < 45., lambda v, w: (v * w, v + w),
                        [x, y])
    # ret = [y*x**2 + x*y**2, x*y + x + y]

    gradx_0 = gradients_impl.gradients(ret[0], [x])  # [2*x*y + y**2]
    gradx_1 = gradients_impl.gradients(ret[1], [x])  # [y + 1]
    gradx_2 = gradients_impl.gradients(ret, [x])  # [2*x*y + y**2 + 2*y + 1]
    grady_0 = gradients_impl.gradients(ret[0], [y])  # [2*x*y + x**2]
    grady_1 = gradients_impl.gradients(ret[1], [y])  # [x + 1]
    grady_2 = gradients_impl.gradients(ret, [y])  # [2*x*y + x**2 + x + 1]
    with self.cached_session() as sess:
      self.assertSequenceEqual(sess.run(ret), [120., 23.])
      self.assertSequenceEqual(sess.run(gradx_0), [39.])
      self.assertSequenceEqual(sess.run(gradx_1), [4.])
      self.assertSequenceEqual(sess.run(gradx_2), [43.])
      self.assertSequenceEqual(sess.run(grady_0), [55.])
      self.assertSequenceEqual(sess.run(grady_1), [6.])
      self.assertSequenceEqual(sess.run(grady_2), [61.])
    def testNestedWhileWithLegacyDefun(self):
        n = constant_op.constant(3.)
        m = constant_op.constant(5.)
        sum_of_powers = constant_op.constant(0.)

        def Body(i, previous_sum):
            prod = constant_op.constant(1.)

            def InnerBodyWrapper(c, v):
                @function.Defun(dtypes.float32, dtypes.float32)
                def InnerBody(c, v):
                    return c - 1., v * n

                results = InnerBody(c, v)
                results[0].set_shape([])
                results[1].set_shape([])
                return results

            return i - 1., previous_sum + while_loop_v2(
                lambda c, _: c > 0,
                InnerBodyWrapper, [i, prod],
                return_same_structure=False)[1]

        result = while_loop_v2(lambda i, _: i >= 0,
                               Body, [m, sum_of_powers],
                               return_same_structure=False)[1]
        grad = gradients_impl.gradients(result, [n])
        self.assertEqual(self.evaluate(result), 364.)
        self.assertSequenceEqual(self.evaluate(grad), [547.])
        def Fn():
            with backprop.GradientTape() as tape:
                x = constant_op.constant(2.)
                tape.watch(x)

                def Body(i, x):
                    forward_graph = ops.get_default_graph()

                    @custom_gradient.custom_gradient
                    def SquaredWithZeroGrad(x):
                        def Grad(unused_g, variables=None):  # pylint: disable=redefined-outer-name
                            del variables
                            gradient_graph = ops.get_default_graph()
                            shape = gen_array_ops.shape(x)
                            assert shape.graph is forward_graph
                            rank = gen_array_ops.rank(x)
                            assert rank.graph is forward_graph
                            size = gen_array_ops.size(x)
                            assert size.graph is forward_graph
                            zeros = array_ops.zeros(shape)
                            assert zeros.graph is gradient_graph
                            return zeros

                        return x * 2, Grad

                    return i + 1, SquaredWithZeroGrad(x)

                _, result = while_loop_v2(lambda i, _: i < 2, Body, [0, x])
            grad = tape.gradient(result, x)
            return grad
Esempio n. 14
0
    def Fn():

      def Body1(v):
        x1.assign(x1)
        return v * x1

      ret1 = while_loop_v2(
          lambda v: v < 4.,
          Body1, [c],
          return_same_structure=False,
          name="while_1")  # 2x

      def Body2(v):
        x1.assign(x1)
        return v * x1 * x1

      ret2 = while_loop_v2(
          lambda v: v < 16.,
          Body2, [c],
          return_same_structure=False,
          name="while_2")  # 4x

      def Body3(v):
        x2.assign(x2)
        return v * x2

      ret3 = while_loop_v2(
          lambda v: v < 4.,
          Body3, [c],
          return_same_structure=False,
          name="while_3")  # 3x

      def Body4(v):
        x2.assign(x2)
        return v * x2 * x2

      ret4 = while_loop_v2(
          lambda v: v < 16.,
          Body4, [c],
          return_same_structure=False,
          name="while_4")  # 9x
      ret5 = while_loop_v2(
          lambda v: v < 16.,
          lambda v: v * v, [c],
          return_same_structure=False,
          name="while_stateless")  # x**2
      return ret1, ret2, ret3, ret4, ret5
Esempio n. 15
0
 def testSingleLoopVar(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(
       lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False)
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     self.assertEqual(self.evaluate(ret), 16.)
     self.assertSequenceEqual(self.evaluate(grad), [32.])
Esempio n. 16
0
 def testCaptureExternalTensorInBody(self):
     x = constant_op.constant(2.)
     y = constant_op.constant(3.)
     ret = while_loop_v2(lambda v: v < 8., lambda v: v * y, [x])
     grad = gradients_impl.gradients(ret, [x])
     with self.cached_session() as sess:
         self.assertEqual(self.evaluate(ret), 18.)
         self.assertSequenceEqual(self.evaluate(grad), [9.])
Esempio n. 17
0
 def Func():
     x = constant_op.constant(2.)
     ret = while_loop_v2(lambda v: v < 8.,
                         lambda v: v**2, [x],
                         return_same_structure=False)  # x**4
     grad = gradients_impl.gradients(ret, [x])[0]  # 4x**3
     grad_grad = gradients_impl.gradients(grad, [x])[0]  # 12x**2
     return ret, grad, grad_grad
Esempio n. 18
0
 def testCaptureExternalTensorInCond(self):
   x = constant_op.constant(2.)
   y = constant_op.constant(1.)
   ret = while_loop_v2(lambda v: v + y < 9., lambda v: v * 3., [x])
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     self.assertEqual(self.evaluate(ret), 18.)
     self.assertSequenceEqual(self.evaluate(grad), [9.])
Esempio n. 19
0
 def testCaptureExternalTensorInCond(self):
     x = constant_op.constant(2.)
     y = constant_op.constant(1.)
     ret = while_loop_v2(lambda v: v + y < 9., lambda v: v * 3., [x])
     grad = gradients_impl.gradients(ret, [x])
     with self.cached_session() as sess:
         self.assertEqual(sess.run(ret), 18.)
         self.assertSequenceEqual(sess.run(grad), [9.])
Esempio n. 20
0
 def testCaptureExternalTensorInBody(self):
   x = constant_op.constant(2.)
   y = constant_op.constant(3.)
   ret = while_loop_v2(lambda v: v < 8., lambda v: v * y, [x])
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     self.assertEqual(sess.run(ret), 18.)
     self.assertSequenceEqual(sess.run(grad), [9.])
Esempio n. 21
0
 def testSingleLoopVar(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(
       lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False)
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session():
     self.assertEqual(self.evaluate(ret), 16.)
     self.assertSequenceEqual(self.evaluate(grad), [32.])
Esempio n. 22
0
 def testDoubleDerivative(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(lambda v: v < 8., lambda v: v**2, [x])  # x**4
   grad = gradients_impl.gradients(ret, [x])  # 4x**3
   grad_grad = gradients_impl.gradients(grad, [x])  # 12x**2
   with self.cached_session() as sess:
     self.assertEqual(sess.run(ret), 16.)
     self.assertSequenceEqual(sess.run(grad), [32.])
     self.assertSequenceEqual(sess.run(grad_grad), [48.])
Esempio n. 23
0
 def testDoubleDerivative(self):
     x = constant_op.constant(2.)
     ret = while_loop_v2(lambda v: v < 8., lambda v: v**2, [x])  # x**4
     grad = gradients_impl.gradients(ret, [x])  # 4x**3
     grad_grad = gradients_impl.gradients(grad, [x])  # 12x**2
     with self.cached_session() as sess:
         self.assertEqual(self.evaluate(ret), 16.)
         self.assertSequenceEqual(self.evaluate(grad), [32.])
         self.assertSequenceEqual(self.evaluate(grad_grad), [48.])
Esempio n. 24
0
    def testAccumulatorElementShape(self, shape):
        def MatchShape(actual_tensor_shape):
            # Compare the shapes, treating None dimensions as equal. We do not
            # directly check actual_tensor_shape and tf.TensorShape(shape) for
            # equality because tf.Dimension.__eq__ returns None if either dimension is
            # None.
            if shape is None:
                self.assertIsNone(actual_tensor_shape.dims)
            else:
                self.assertListEqual(actual_tensor_shape.as_list(), shape)

        def GetAccumulatorForInputAtIndex(while_op, idx):
            body_graph = while_v2._get_graph(while_op, "body")
            y_input_t = body_graph.inputs[idx]
            push_back_node = [
                c for c in y_input_t.consumers()
                if c.type == "TensorListPushBack"
            ][0]
            output_idx = body_graph.outputs.index(push_back_node.outputs[0])
            return while_op.outputs[output_idx]

        x = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
        y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)

        # Forward pass.
        ret = while_loop_v2(lambda v, u: v < 8.,
                            lambda v, u: (math_ops.pow(v, u), u), [x, y],
                            return_same_structure=True)
        while_op = ret[0].op.inputs[0].op
        # Gradient pass.
        grad = gradients_impl.gradients(ret[0], x)
        # Note: There is an Identity b/w grad[0] and the While op.
        grad_while_op = grad[0].op.inputs[0].op

        # Get the TensorList output of While op containing the accumulated values
        # of y.
        x_input_index = [
            i for i, inp in enumerate(while_op.inputs) if x == inp
        ][0]
        output = GetAccumulatorForInputAtIndex(while_op, x_input_index)
        _, val = list_ops.tensor_list_pop_back(output,
                                               element_dtype=dtypes.float32)
        MatchShape(val.shape)

        # Take second derivative to generate intermediate grad_while_op outputs
        gradients_impl.gradients(grad, x)

        # Get the TensorList output of gradient While op containing the accumulated
        # values of grad_x (note that grad_x is needed by the second derivative).
        # grad_while_op.inputs:
        grad_output_index = grad_while_op.outputs.index(grad[0].op.inputs[0])
        grad_output = GetAccumulatorForInputAtIndex(grad_while_op,
                                                    grad_output_index)
        _, val = list_ops.tensor_list_pop_back(grad_output,
                                               element_dtype=dtypes.float32)
        MatchShape(val.shape)
Esempio n. 25
0
 def testSingleLoopVarBackPropFalse(self):
     x = constant_op.constant(2.)
     ret = while_loop_v2(lambda v: v < 8.,
                         lambda v: v * v, [x],
                         return_same_structure=False,
                         back_prop=False)
     grad = gradients_impl.gradients(ret, [x])
     self.assertEqual(grad, [None])
     with self.cached_session():
         self.assertEqual(self.evaluate(ret), 16.)
Esempio n. 26
0
 def testGradientTape(self):
     with backprop.GradientTape() as t:
         x = constant_op.constant(2.)
         t.watch(x)
         ret = while_loop_v2(lambda v: v < 4.,
                             lambda v: v * v, [x],
                             return_same_structure=False)  # x**2
     grad = t.gradient(ret, x)
     with self.cached_session() as sess:
         self.assertAllEqual(sess.run(grad), 4.0)
Esempio n. 27
0
 def testGradientTape(self):
   with backprop.GradientTape() as t:
     x = constant_op.constant(2.)
     t.watch(x)
     ret = while_loop_v2(
         lambda v: v < 4., lambda v: v * v, [x],
         return_same_structure=False)  # x**2
   grad = t.gradient(ret, x)
   with self.cached_session() as sess:
     self.assertAllEqual(sess.run(grad), 4.0)
Esempio n. 28
0
        def Fn():
            def Body1(v):
                x.assign(x)
                return v * x

            ret1 = while_loop_v2(lambda v: v < 4.,
                                 Body1, [c],
                                 return_same_structure=False,
                                 name="while_1")  # 2x

            def Body2(v):
                x.assign(x)
                return v * x * x

            ret2 = while_loop_v2(lambda v: v < 16.,
                                 Body2, [c],
                                 return_same_structure=False,
                                 name="while_2")  # 4x
            return ret1, ret2
Esempio n. 29
0
 def testCaptureExternalTensorInCond(self):
     x = constant_op.constant(2.)
     y = constant_op.constant(1.)
     ret = while_loop_v2(lambda v: v + y < 9.,
                         lambda v: v * 3., [x],
                         return_same_structure=False)
     grad = gradients_impl.gradients(ret, [x])
     with self.cached_session():
         self.assertEqual(self.evaluate(ret), 18.)
         self.assertSequenceEqual(self.evaluate(grad), [9.])
Esempio n. 30
0
 def testReturnSameStructureTrue(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(
       lambda v: v < 8., lambda v: v * v, [x], return_same_structure=True)
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     eval_result = sess.run(ret)
     self.assertIsInstance(eval_result, list)
     self.assertLen(eval_result, 1)
     self.assertEqual(16., eval_result[0])
     self.assertSequenceEqual(sess.run(grad), [32.])
Esempio n. 31
0
  def testAccumulatorElementShape(self, shape):

    def MatchShape(actual_tensor_shape):
      # Compare the shapes, treating None dimensions as equal. We do not
      # directly check actual_tensor_shape and tf.TensorShape(shape) for
      # equality because tf.Dimension.__eq__ returns None if either dimension is
      # None.
      if shape is None:
        self.assertIsNone(actual_tensor_shape.dims)
      else:
        self.assertListEqual(actual_tensor_shape.as_list(), shape)

    def GetAccumulatorForInputAtIndex(while_op, idx):
      body_graph = while_v2._get_graph(while_op, "body")
      y_input_t = body_graph.inputs[idx]
      push_back_node = [c for c in y_input_t.consumers()
                        if c.type == "TensorListPushBack"][0]
      output_idx = body_graph.outputs.index(push_back_node.outputs[0])
      return while_op.outputs[output_idx]

    x = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
    y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)

    # Forward pass.
    ret = while_loop_v2(lambda v, u: v < 8.,
                        lambda v, u: (math_ops.pow(v, u), u),
                        [x, y],
                        return_same_structure=True)
    while_op = ret[0].op.inputs[0].op
    # Gradient pass.
    grad = gradients_impl.gradients(ret[0], x)
    # Note: There is an Identity b/w grad[0] and the While op.
    grad_while_op = grad[0].op.inputs[0].op

    # Get the TensorList output of While op containing the accumulated values
    # of y.
    x_input_index = [i for i, inp in enumerate(while_op.inputs) if x == inp][0]
    output = GetAccumulatorForInputAtIndex(while_op, x_input_index)
    _, val = list_ops.tensor_list_pop_back(output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)

    # Take second derivative to generate intermediate grad_while_op outputs
    gradients_impl.gradients(grad, x)

    # Get the TensorList output of gradient While op containing the accumulated
    # values of grad_x (note that grad_x is needed by the second derivative).
    # grad_while_op.inputs:
    grad_output_index = grad_while_op.outputs.index(grad[0].op.inputs[0])
    grad_output = GetAccumulatorForInputAtIndex(grad_while_op,
                                                grad_output_index)
    _, val = list_ops.tensor_list_pop_back(grad_output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)
Esempio n. 32
0
 def testReturnSameStructureTrue(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(
       lambda v: v < 8., lambda v: v * v, [x], return_same_structure=True)
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     eval_result = sess.run(ret)
     self.assertIsInstance(eval_result, list)
     self.assertLen(eval_result, 1)
     self.assertEqual(16., eval_result[0])
     self.assertSequenceEqual(sess.run(grad), [32.])
Esempio n. 33
0
    def Body(row, ta, n):

      def InnerBody(row, col, ta, n):
        # Note: row and col are 1-based.
        ta = ta.write(
            math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
        return row, col + 1., ta, n

      # TODO(b/118457764): Remove n from loop_vars from both loops once fixed.
      ta = while_loop_v2(lambda _, col, _1, n: col <= n, InnerBody,
                         [row, constant_op.constant(1.), ta, n])[2]
      return row + 1., ta, n
Esempio n. 34
0
    def testExternalColocationGrad(self):
        external_t = constant_op.constant(2.)
        v0 = constant_op.constant(2.)

        def Body(v):
            with ops.colocate_with(external_t):
                return v * v

        ret = while_loop_v2(lambda v: v < 8., Body, [v0])[0]
        grad = gradients_impl.gradients(ret, [v0])[0]
        self.assertAllEqual(ret, 16.)
        self.assertAllEqual(grad, 32.)
Esempio n. 35
0
    def testIdentityNodeInBody(self):
        def Body(v):
            v = array_ops.identity(v)
            v = array_ops.identity(v)
            return v * v

        x = constant_op.constant(2.)
        ret = while_loop_v2(lambda v: v < 8., Body, [x])
        grad = gradients_impl.gradients(ret, [x])
        with self.cached_session() as sess:
            self.assertEqual(self.evaluate(ret), 16.)
            self.assertSequenceEqual(self.evaluate(grad), [32.])
Esempio n. 36
0
        def Body(row, ta, n):
            def InnerBody(row, col, ta, n):
                # Note: row and col are 1-based.
                ta = ta.write(
                    math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32),
                    row * col)
                return row, col + 1., ta, n

            # TODO(b/118457764): Remove n from loop_vars from both loops once fixed.
            ta = while_loop_v2(lambda _, col, _1, n: col <= n, InnerBody,
                               [row, constant_op.constant(1.), ta, n])[2]
            return row + 1., ta, n
Esempio n. 37
0
 def Fn():
     ret1 = while_loop_v2(lambda v: v < 4.,
                          lambda v: v * x1, [c],
                          return_same_structure=False,
                          name="while_1")  # 2x
     ret2 = while_loop_v2(lambda v: v < 16.,
                          lambda v: v * x1 * x1, [c],
                          return_same_structure=False,
                          name="while_2")  # 4x
     ret3 = while_loop_v2(lambda v: v < 4.,
                          lambda v: v * x2, [c],
                          return_same_structure=False,
                          name="while_3")  # 3x
     ret4 = while_loop_v2(lambda v: v < 16.,
                          lambda v: v * x2 * x2, [c],
                          return_same_structure=False,
                          name="while_4")  # 9x
     ret5 = while_loop_v2(lambda v: v < 16.,
                          lambda v: v * v, [c],
                          return_same_structure=False,
                          name="while_stateless")  # x**2
     return ret1, ret2, ret3, ret4, ret5
Esempio n. 38
0
  def testIdentityNodeInBody(self):

    def Body(v):
      v = array_ops.identity(v)
      v = array_ops.identity(v)
      return v * v

    x = constant_op.constant(2.)
    ret = while_loop_v2(lambda v: v < 8., Body, [x])
    grad = gradients_impl.gradients(ret, [x])
    with self.cached_session() as sess:
      self.assertEqual(self.evaluate(ret), 16.)
      self.assertSequenceEqual(self.evaluate(grad), [32.])
Esempio n. 39
0
    def testExternalControlDependencies(self):
        with ops.Graph().as_default(), self.test_session():
            v = variables.Variable(1.)
            v.initializer.run()
            op = v.assign_add(1.)

            def body_fn(i):  # pylint: disable=invalid-name
                with ops.control_dependencies([op]):
                    return i + 1

            loop = while_loop_v2(lambda i: i < 1, body_fn, [0])
            loop[0].op.run()
            self.assertAllEqual(self.evaluate(v), 2.0)
Esempio n. 40
0
  def testExternalControlDependencies(self):
    with ops.Graph().as_default(), self.test_session():
      v = variables.Variable(1.)
      v.initializer.run()
      op = v.assign_add(1.)

      def body_fn(i):  # pylint: disable=invalid-name
        with ops.control_dependencies([op]):
          return i + 1

      loop = while_loop_v2(lambda i: i < 1, body_fn, [0])
      loop[0].op.run()
      self.assertAllEqual(self.evaluate(v), 2.0)
Esempio n. 41
0
    def testIdentityNodeInBody(self):
        def Body(v):
            v = array_ops.identity(v)
            v = array_ops.identity(v)
            return v * v

        x = constant_op.constant(2.)
        ret = while_loop_v2(lambda v: v < 8.,
                            Body, [x],
                            return_same_structure=False)
        grad = gradients_impl.gradients(ret, [x])
        self.assertEqual(self.evaluate(ret), 16.)
        self.assertSequenceEqual(self.evaluate(grad), [32.])
Esempio n. 42
0
    def testRandomUniformShape(self):
        shape = constant_op.constant([3])

        def Body(i, u):
            shape_extended = array_ops.concat([[5], shape], axis=0)
            u = random_ops.random_uniform(shape_extended)
            self.assertAllEqual(u.shape.as_list(), [5, 3])
            return i + 1, u

        _, _ = while_loop_v2(cond=lambda i, _: i < 3,
                             body=Body,
                             loop_vars=[
                                 0,
                                 array_ops.zeros([5, 3], dtype=dtypes.float32),
                             ])
Esempio n. 43
0
  def testAccumulatorElementShape(self, shape):

    def MatchShape(actual_tensor_shape):
      # Compare the shapes, treating None dimensions as equal. We do not
      # directly check actual_tensor_shape and tf.TensorShape(shape) for
      # equality because tf.Dimension.__eq__ returns None if either dimension is
      # None.
      if shape is None:
        self.assertIsNone(actual_tensor_shape.dims)
      else:
        self.assertListEqual(actual_tensor_shape.as_list(), shape)

    def GetAccumulatorForInputAtIndex(while_op, idx):
      body_graph = while_v2._get_body_graph(while_op)
      y_input_t = body_graph.inputs[idx]
      push_back_node = [c for c in y_input_t.consumers()
                        if c.type == "TensorListPushBack"][0]
      output_idx = body_graph.outputs.index(push_back_node.outputs[0])
      return while_op.outputs[output_idx]

    x = constant_op.constant(2.)
    y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)

    # Forward pass.
    ret = while_loop_v2(
        lambda v, u: v < 8.,
        lambda v, u: (v * v, u), [x, y],
        return_same_structure=False)
    while_op = ret[0].op.inputs[0].op
    # Get the TensorList output of While op containing the accumulated values
    # of y.
    # while_op.inputs: [counter_arg, x_arg, y_arg, *accumulators]
    output = GetAccumulatorForInputAtIndex(while_op, 2)
    _, val = list_ops.tensor_list_pop_back(output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)

    # Gradient pass.
    grad = gradients_impl.gradients(ret[1], y)
    grad_while_op = grad[0].op.inputs[0].op
    # Get the TensorList output of gradient While op containing the accumulated
    # values of grad_y.
    # grad_while_op.inputs:
    # [counter_arg, total_iters_arg, grad_x_arg, grad_y_arg, *other_args]
    grad_output = GetAccumulatorForInputAtIndex(grad_while_op, 3)
    _, val = list_ops.tensor_list_pop_back(grad_output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)
Esempio n. 44
0
    def testAccumulatorElementShape(self, shape):
        def MatchShape(actual_tensor_shape):
            # Compare the shapes, treating None dimensions as equal. We do not
            # directly check actual_tensor_shape and tf.TensorShape(shape) for
            # equality because tf.Dimension.__eq__ returns None if either dimension is
            # None.
            if shape is None:
                self.assertIsNone(actual_tensor_shape.dims)
            else:
                self.assertListEqual(actual_tensor_shape.as_list(), shape)

        def GetAccumulatorForInputAtIndex(while_op, idx):
            body_graph = while_v2._get_body_graph(while_op)
            y_input_t = body_graph.inputs[idx]
            push_back_node = [
                c for c in y_input_t.consumers()
                if c.type == "TensorListPushBack"
            ][0]
            output_idx = body_graph.outputs.index(push_back_node.outputs[0])
            return while_op.outputs[output_idx]

        x = constant_op.constant(2.)
        y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)

        # Forward pass.
        ret = while_loop_v2(lambda v, u: v < 8.,
                            lambda v, u: (v * v, u), [x, y],
                            return_same_structure=False)
        while_op = ret[0].op.inputs[0].op
        # Get the TensorList output of While op containing the accumulated values
        # of y.
        # while_op.inputs: [counter_arg, x_arg, y_arg, *accumulators]
        output = GetAccumulatorForInputAtIndex(while_op, 2)
        _, val = list_ops.tensor_list_pop_back(output,
                                               element_dtype=dtypes.float32)
        MatchShape(val.shape)

        # Gradient pass.
        grad = gradients_impl.gradients(ret[1], y)
        grad_while_op = grad[0].op.inputs[0].op
        # Get the TensorList output of gradient While op containing the accumulated
        # values of grad_y.
        # grad_while_op.inputs:
        # [counter_arg, total_iters_arg, grad_x_arg, grad_y_arg, *other_args]
        grad_output = GetAccumulatorForInputAtIndex(grad_while_op, 3)
        _, val = list_ops.tensor_list_pop_back(grad_output,
                                               element_dtype=dtypes.float32)
        MatchShape(val.shape)
Esempio n. 45
0
    def testRandomOpsShape(self, random_fn, expected_shape):
        shape = constant_op.constant([3])

        def Body(i, u):
            shape_extended = array_ops.concat([[5], shape], axis=0)
            u = random_fn(shape_extended)
            assert u.shape.as_list() == expected_shape, str(u.shape.as_list())
            return i + 1, u

        _, _ = while_loop_v2(cond=lambda i, _: i < 3,
                             body=Body,
                             loop_vars=[
                                 0,
                                 array_ops.zeros(expected_shape,
                                                 dtype=dtypes.float32),
                             ])
Esempio n. 46
0
  def testMultipleLoopVarsBasic(self):
    x = constant_op.constant(5.)
    y = constant_op.constant(3.)

    # x = 5.
    # y = 3.
    # while x < 45.:
    #   x = x * y
    ret = while_loop_v2(lambda v, _: v < 45., lambda v, w: (v * w, w), [x, y])
    # ret = [x*y^2, y]

    # Note: This is simply d_ret[0]/d_x since d_ret[1]/d_x is 0.
    grad = gradients_impl.gradients(ret, [x])  # [2*x*y]
    with self.cached_session() as sess:
      self.assertSequenceEqual(sess.run(ret), [45., 3.])
      self.assertSequenceEqual(sess.run(grad), [9.])
Esempio n. 47
0
    def testFillOpsShape(self, fill_fn):
        shape = constant_op.constant([3, 4])

        def Body(i, u):
            shape_extended = array_ops.concat([[5], shape], axis=0)
            u = fill_fn(shape_extended)
            assert u.shape.as_list() == [5, 3, 4], str(u.shape.as_list())
            return i + 1, u

        _, _ = while_loop_v2(cond=lambda i, _: i < 3,
                             body=Body,
                             loop_vars=[
                                 0,
                                 array_ops.zeros([5, 3, 4],
                                                 dtype=dtypes.float32),
                             ])
Esempio n. 48
0
  def testNestedWhile(self):
    # Compute sum of geometric progression: n^0 + n^1 + ... + n^m
    # We compute the pow using a while loop.
    n = constant_op.constant(3.)
    m = constant_op.constant(5.)
    sum_of_powers = constant_op.constant(0.)

    def Body(i, previous_sum):
      prod = constant_op.constant(1.)
      return i - 1., previous_sum + while_loop_v2(
          lambda c, _: c > 0, lambda c, v: (c - 1., v * n), [i, prod])[1]

    result = while_loop_v2(lambda i, _: i >= 0, Body, [m, sum_of_powers])[1]
    grad = gradients_impl.gradients(result, [n])
    with self.cached_session() as sess:
      self.assertEqual(self.evaluate(result), 364.)
      self.assertSequenceEqual(self.evaluate(grad), [547.])
Esempio n. 49
0
  def testLoopWithTensorListPushBack(self):
    x = constant_op.constant(2.)

    tensor_list = list_ops.empty_tensor_list(
        element_dtype=dtypes.float32, element_shape=ScalarShape())

    def Cond(x, tl):
      del tl  # Unused for Cond.
      return x < 5.

    def Body(x, tl):
      tl = list_ops.tensor_list_push_back(tl, x)
      tl = list_ops.tensor_list_push_back(tl, constant_op.constant(100.))
      return x**2., tl

    ret = while_loop_v2(Cond, Body, [x, tensor_list])
    grad = gradients_impl.gradients(ret[0], x)
    with self.cached_session() as sess:
      self.assertEqual(sess.run(ret[0]), 16.)
      self.assertSequenceEqual(sess.run(grad), [32.])
Esempio n. 50
0
 def Body(i, previous_sum):
   prod = constant_op.constant(1.)
   return i - 1., previous_sum + while_loop_v2(
       lambda c, _: c > 0,
       lambda c, v: (c - 1., v * n), [i, prod],
       return_same_structure=False)[1]
Esempio n. 51
0
 def Body(i, previous_sum):
   prod = constant_op.constant(1.)
   return i - 1., previous_sum + while_loop_v2(
       lambda c, _: c > 0, lambda c, v: (c - 1., v * n), [i, prod])[1]