Example #1
0
  def testContToUpdateA(self):
    stepper = NodeStepper(self.sess, "optim")

    result = stepper.cont("a:0")
    self.assertAllClose(1.0, result)
    self.assertEqual({}, stepper.last_feed_types())

    result = stepper.cont("optim/learning_rate:0")
    self.assertAllClose(0.01, result)
    self.assertEqual({}, stepper.last_feed_types())

    # Before any cont calls on ApplyGradientDescent, there should be no "dirty"
    # variables.
    self.assertEqual(set(), stepper.dirty_variables())

    # First, all the two control inputs to optim.
    result = stepper.cont("optim/update_a/ApplyGradientDescent")

    # Now variable a should have been marked as dirty due to the update
    # by optim/update_a/ApplyGradientDescent.
    self.assertEqual({"a:0"}, stepper.dirty_variables())
    self.assertIsNone(result)
    self.assertEqual({
        "optim/learning_rate:0": NodeStepper.FEED_TYPE_HANDLE
    }, stepper.last_feed_types())

    # Check that Variable "a" has been updated properly, but "b", "c" and "d"
    # remain the same.
    # For backprop on Variable a:
    #   Because f = a * b * b * c, df / da = b * b * c.
    #   1.0 - learning_rate * b * b * c
    #     = 1.0 -  0.01 * 2.0 * 2.0 * 4.0 = 0.84.
    self.assertAllClose(0.84, self.sess.run(self.a))
    self.assertAllClose(2.0, self.sess.run(self.b))
    self.assertAllClose(4.0, self.sess.run(self.c))
Example #2
0
    def testContToUpdateA(self):
        stepper = NodeStepper(self.sess, "optim")

        result = stepper.cont("a:0")
        self.assertAllClose(1.0, result)
        self.assertEqual({}, stepper.last_feed_types())

        result = stepper.cont("optim/learning_rate:0")
        self.assertAllClose(0.01, result)
        self.assertEqual({}, stepper.last_feed_types())

        # Before any cont calls on ApplyGradientDescent, there should be no "dirty"
        # variables.
        self.assertEqual(set(), stepper.dirty_variables())

        # First, all the two control inputs to optim.
        result = stepper.cont("optim/update_a/ApplyGradientDescent")

        # Now variable a should have been marked as dirty due to the update
        # by optim/update_a/ApplyGradientDescent.
        self.assertEqual({"a:0"}, stepper.dirty_variables())
        self.assertIsNone(result)
        self.assertEqual(
            {"optim/learning_rate:0": NodeStepper.FEED_TYPE_HANDLE},
            stepper.last_feed_types())

        # Check that Variable "a" has been updated properly, but "b", "c" and "d"
        # remain the same.
        # For backprop on Variable a:
        #   Because f = a * b * b * c, df / da = b * b * c.
        #   1.0 - learning_rate * b * b * c
        #     = 1.0 -  0.01 * 2.0 * 2.0 * 4.0 = 0.84.
        self.assertAllClose(0.84, self.sess.run(self.a))
        self.assertAllClose(2.0, self.sess.run(self.b))
        self.assertAllClose(4.0, self.sess.run(self.c))
    def testOverrideThenContToUpdate(self):
        """Test cont() to update nodes after overriding tensor values."""

        stepper = NodeStepper(self.sess, "optim")

        result = stepper.cont("d:0")
        self.assertAllClose(2.0, result)
        self.assertEqual({}, stepper.last_feed_types())
        self.assertEqual(set(), stepper.dirty_variables())
        self.assertEqual(["d:0"], stepper.handle_names())

        # Override the value from 1.0 to 10.0.
        stepper.override_tensor("a/read:0", 10.0)

        self.assertEqual(["a/read:0"], stepper.override_names())

        result = stepper.cont("optim/update_c/ApplyGradientDescent",
                              restore_variable_values=True)
        self.assertIsNone(result)

        # The last cont() call should have not used the tensor handle to d:0,
        # because the transitive closure of d:0 contains an override tensor.
        self.assertEqual({"a/read:0": NodeStepper.FEED_TYPE_OVERRIDE},
                         stepper.last_feed_types())

        # The tensor handle to d:0 should have been removed due to the dirty
        # transitive closure.
        self.assertEqual([], stepper.handle_names())

        # For this backprop on c, the overriding value of a/read:0 should have been
        # used:
        #   4.0 - learning_rate * a * b * b
        #     = 4.0 - 0.01 * 10.0 * 2.0 * 2.0 = 3.6.
        self.assertAllClose(3.6, self.sess.run(self.c))

        # Now remove the overriding value of a/read:0.
        stepper.remove_override("a/read:0")
        self.assertEqual([], stepper.override_names())

        # Obtain the tensor handle to d:0 again.
        result = stepper.cont("d:0")
        self.assertAllClose(2.0, result)
        self.assertEqual(["d:0"], stepper.handle_names())

        # Then call update_c again, without restoring c.
        result = stepper.cont("optim/update_c/ApplyGradientDescent",
                              restore_variable_values=False)
        self.assertIsNone(result)

        # This time, the d:0 tensor handle should have been used, because its
        # transitive closure is clean.
        self.assertEqual({"d:0": NodeStepper.FEED_TYPE_HANDLE},
                         stepper.last_feed_types())

        # For this backprop on c, the overriding value of a/read:0 should have been
        # used:
        #   3.6 - learning_rate * a * b * b
        #     = 3.6 - 0.01 * 1.0 * 2.0 * 2.0 = 3.56.
        self.assertAllClose(3.56, self.sess.run(self.c))
Example #4
0
  def testSelectiveHandleUsageDependingOnTransitiveCleanliness(self):
    """Test tensor handlers are using only during clean transitive closure.

    "clean" means no Variables have been updated by preceding cont() calls.
    """

    stepper = NodeStepper(self.sess, "optim")

    # First, call cont() on the two tensors on the intermediate level: e and f.
    result = stepper.cont("d:0")
    self.assertAllClose(2.0, result)
    self.assertEqual({}, stepper.last_feed_types())
    self.assertEqual(set(), stepper.dirty_variables())

    # The cont call above should have restored Variable "b".
    result = stepper.cont("e:0")
    self.assertAllClose(8.0, result)
    self.assertEqual({}, stepper.last_feed_types())
    self.assertEqual(set(), stepper.dirty_variables())

    # Now run update_a, so as to let Variable a be diry.
    result = stepper.cont("optim/update_a/ApplyGradientDescent",
                          restore_variable_values=True)
    self.assertIsNone(result)
    self.assertEqual({"a:0"}, stepper.dirty_variables())

    # Now, run update_b.
    result = stepper.cont("optim/update_b/ApplyGradientDescent",
                          restore_variable_values=True)
    self.assertIsNone(result)

    # The last cont() run should have use the handle of tensor e, but not the
    # handle of tensor d, because the transitive closure of e is clean, whereas
    # that of d is dirty due to the update to a in the previous cont() call.
    self.assertEqual({
        "e:0": NodeStepper.FEED_TYPE_HANDLE
    }, stepper.last_feed_types())

    # The result of the update_b should be identical to as if no other
    # update_* cont() calls have occurred before.
    self.assertAllClose(1.0, self.sess.run(self.a))
    self.assertAllClose(1.84, self.sess.run(self.b))
    self.assertAllClose(4.0, self.sess.run(self.c))
Example #5
0
  def testSelectiveHandleUsageDependingOnTransitiveCleanliness(self):
    """Test tensor handlers are using only during clean transitive closure.

    "clean" means no Variables have been updated by preceding cont() calls.
    """

    stepper = NodeStepper(self.sess, "optim")

    # First, call cont() on the two tensors on the intermediate level: e and f.
    result = stepper.cont("d:0")
    self.assertAllClose(2.0, result)
    self.assertEqual({}, stepper.last_feed_types())
    self.assertEqual(set(), stepper.dirty_variables())

    # The cont call above should have restored Variable "b".
    result = stepper.cont("e:0")
    self.assertAllClose(8.0, result)
    self.assertEqual({}, stepper.last_feed_types())
    self.assertEqual(set(), stepper.dirty_variables())

    # Now run update_a, so as to let Variable a be diry.
    result = stepper.cont("optim/update_a/ApplyGradientDescent",
                          restore_variable_values=True)
    self.assertIsNone(result)
    self.assertEqual({"a:0"}, stepper.dirty_variables())

    # Now, run update_b.
    result = stepper.cont("optim/update_b/ApplyGradientDescent",
                          restore_variable_values=True)
    self.assertIsNone(result)

    # The last cont() run should have use the handle of tensor e, but not the
    # handle of tensor d, because the transitive closure of e is clean, whereas
    # that of d is dirty due to the update to a in the previous cont() call.
    self.assertEqual({
        "e:0": NodeStepper.FEED_TYPE_HANDLE
    }, stepper.last_feed_types())

    # The result of the update_b should be identical to as if no other
    # update_* cont() calls have occurred before.
    self.assertAllClose(1.0, self.sess.run(self.a))
    self.assertAllClose(1.84, self.sess.run(self.b))
    self.assertAllClose(4.0, self.sess.run(self.c))
Example #6
0
    def testUpdateTwiceRestoreVariable(self):
        stepper = NodeStepper(self.sess, "optim")

        result = stepper.cont("optim/update_a/ApplyGradientDescent",
                              restore_variable_values=True)
        self.assertIsNone(result)
        self.assertEqual({"a:0"}, stepper.dirty_variables())

        result = stepper.cont("optim/update_b/ApplyGradientDescent",
                              restore_variable_values=True)
        self.assertIsNone(result)
        # Variables a and c should have been restored and hence no longer dirty.
        # Variable b should have been marked as dirty.
        self.assertEqual({"b:0"}, stepper.dirty_variables())

        # The result of the update should be identitcal to as if only update_b is
        # run.
        self.assertAllClose(1.0, self.sess.run(self.a))
        self.assertAllClose(1.84, self.sess.run(self.b))
        self.assertAllClose(4.0, self.sess.run(self.c))
Example #7
0
  def testUpdateTwiceRestoreVariable(self):
    stepper = NodeStepper(self.sess, "optim")

    result = stepper.cont("optim/update_a/ApplyGradientDescent",
                          restore_variable_values=True)
    self.assertIsNone(result)
    self.assertEqual({"a:0"}, stepper.dirty_variables())

    result = stepper.cont("optim/update_b/ApplyGradientDescent",
                          restore_variable_values=True)
    self.assertIsNone(result)
    # Variables a and c should have been restored and hence no longer dirty.
    # Variable b should have been marked as dirty.
    self.assertEqual({"b:0"}, stepper.dirty_variables())

    # The result of the update should be identitcal to as if only update_b is
    # run.
    self.assertAllClose(1.0, self.sess.run(self.a))
    self.assertAllClose(1.84, self.sess.run(self.b))
    self.assertAllClose(4.0, self.sess.run(self.c))
Example #8
0
    def testContToUpdateB(self):
        stepper = NodeStepper(self.sess, "optim")

        result = stepper.cont("optim/update_b/ApplyGradientDescent")
        self.assertIsNone(result)
        self.assertEqual(set(["b:0"]), stepper.dirty_variables())

        # For backprop on Variable b:
        #   Because f = a * b * b * c, df / da = 2 * a * b * c.
        #   2.0 - learning_rate * 2 * a * b * c
        #     = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
        self.assertAllClose(1.0, self.sess.run(self.a))
        self.assertAllClose(1.84, self.sess.run(self.b))
        self.assertAllClose(4.0, self.sess.run(self.c))
Example #9
0
  def testContToUpdateB(self):
    stepper = NodeStepper(self.sess, "optim")

    result = stepper.cont("optim/update_b/ApplyGradientDescent")
    self.assertIsNone(result)
    self.assertEqual(set(["b:0"]), stepper.dirty_variables())

    # For backprop on Variable b:
    #   Because f = a * b * b * c, df / da = 2 * a * b * c.
    #   2.0 - learning_rate * 2 * a * b * c
    #     = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
    self.assertAllClose(1.0, self.sess.run(self.a))
    self.assertAllClose(1.84, self.sess.run(self.b))
    self.assertAllClose(4.0, self.sess.run(self.c))
Example #10
0
    def testContAfterUpdateWithoutRestoringVariableValue(self):
        stepper = NodeStepper(self.sess, "optim")

        # First, update Variable a from 1.0 to 0.84.
        result = stepper.cont("optim/update_a/ApplyGradientDescent",
                              restore_variable_values=True)
        self.assertIsNone(result)
        self.assertEqual(set(["a:0"]), stepper.dirty_variables())
        self.assertAllClose(0.84, self.sess.run(self.a))
        self.assertAllClose(2.0, self.sess.run(self.b))
        self.assertAllClose(4.0, self.sess.run(self.c))

        # Second, update Variable b without the default restore_variable_values.
        result = stepper.cont("optim/update_b/ApplyGradientDescent",
                              restore_variable_values=False)
        self.assertIsNone(result)
        # For the backprop on Variable b under the updated value of a:
        #   2.0 - learning_rate * 2 * a' * b * c
        #     = 2.0 - 0.01 * 2 * 0.84 * 2.0 * 4.0 = 1.8656
        self.assertAllClose(0.84, self.sess.run(self.a))
        self.assertAllClose(1.8656, self.sess.run(self.b))
        self.assertAllClose(4.0, self.sess.run(self.c))
Example #11
0
  def testContAfterUpdateWithoutRestoringVariableValue(self):
    stepper = NodeStepper(self.sess, "optim")

    # First, update Variable a from 1.0 to 0.84.
    result = stepper.cont("optim/update_a/ApplyGradientDescent",
                          restore_variable_values=True)
    self.assertIsNone(result)
    self.assertEqual(set(["a:0"]), stepper.dirty_variables())
    self.assertAllClose(0.84, self.sess.run(self.a))
    self.assertAllClose(2.0, self.sess.run(self.b))
    self.assertAllClose(4.0, self.sess.run(self.c))

    # Second, update Variable b without the default restore_variable_values.
    result = stepper.cont(
        "optim/update_b/ApplyGradientDescent", restore_variable_values=False)
    self.assertIsNone(result)
    # For the backprop on Variable b under the updated value of a:
    #   2.0 - learning_rate * 2 * a' * b * c
    #     = 2.0 - 0.01 * 2 * 0.84 * 2.0 * 4.0 = 1.8656
    self.assertAllClose(0.84, self.sess.run(self.a))
    self.assertAllClose(1.8656, self.sess.run(self.b))
    self.assertAllClose(4.0, self.sess.run(self.c))
Example #12
0
  def testOverrideThenContToUpdate(self):
    """Test cont() to update nodes after overriding tensor values."""

    stepper = NodeStepper(self.sess, "optim")

    result = stepper.cont("d:0")
    self.assertAllClose(2.0, result)
    self.assertEqual({}, stepper.last_feed_types())
    self.assertEqual(set(), stepper.dirty_variables())
    self.assertEqual(["d:0"], stepper.handle_names())

    # Override the value from 1.0 to 10.0.
    stepper.override_tensor("a/read:0", 10.0)

    self.assertEqual(["a/read:0"], stepper.override_names())

    result = stepper.cont("optim/update_c/ApplyGradientDescent",
                          restore_variable_values=True)
    self.assertIsNone(result)

    # The last cont() call should have not used the tensor handle to d:0,
    # because the transitive closure of d:0 contains an override tensor.
    self.assertEqual({
        "a/read:0": NodeStepper.FEED_TYPE_OVERRIDE
    }, stepper.last_feed_types())

    # The tensor handle to d:0 should have been removed due to the dirty
    # transitive closure.
    self.assertEqual([], stepper.handle_names())

    # For this backprop on c, the overriding value of a/read:0 should have been
    # used:
    #   4.0 - learning_rate * a * b * b
    #     = 4.0 - 0.01 * 10.0 * 2.0 * 2.0 = 3.6.
    self.assertAllClose(3.6, self.sess.run(self.c))

    # Now remove the overriding value of a/read:0.
    stepper.remove_override("a/read:0")
    self.assertEqual([], stepper.override_names())

    # Obtain the tensor handle to d:0 again.
    result = stepper.cont("d:0")
    self.assertAllClose(2.0, result)
    self.assertEqual(["d:0"], stepper.handle_names())

    # Then call update_c again, without restoring c.
    result = stepper.cont(
        "optim/update_c/ApplyGradientDescent", restore_variable_values=False)
    self.assertIsNone(result)

    # This time, the d:0 tensor handle should have been used, because its
    # transitive closure is clean.
    self.assertEqual({
        "d:0": NodeStepper.FEED_TYPE_HANDLE
    }, stepper.last_feed_types())

    # For this backprop on c, the overriding value of a/read:0 should have been
    # used:
    #   3.6 - learning_rate * a * b * b
    #     = 3.6 - 0.01 * 1.0 * 2.0 * 2.0 = 3.56.
    self.assertAllClose(3.56, self.sess.run(self.c))