Пример #1
0
    def testIdentifyGradientWorksOnMultipleLosses(self):
        grad_debugger_1 = debug_gradients.GradientsDebugger()
        grad_debugger_2 = debug_gradients.GradientsDebugger()

        y = math_ops.add(self.w, -1.0, name="y")
        debug_y = grad_debugger_1.identify_gradient(y)
        z1 = math_ops.square(debug_y, name="z1")

        debug_y = grad_debugger_2.identify_gradient(y)
        z2 = math_ops.sqrt(debug_y, name="z2")

        with grad_debugger_1:
            gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
        with grad_debugger_2:
            gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)

        dz1_dy = grad_debugger_1.gradient_tensor(y)
        dz2_dy = grad_debugger_2.gradient_tensor(y)
        self.assertIsInstance(dz1_dy, ops.Tensor)
        self.assertIsInstance(dz2_dy, ops.Tensor)
        self.assertIsNot(dz1_dy, dz2_dy)

        self.sess.run(variables.global_variables_initializer())
        self.assertAllClose(5.0**2, self.sess.run(z1))
        self.assertAllClose(5.0**0.5, self.sess.run(z2))
        self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
        self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
Пример #2
0
    def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self):
        grad_debugger = debug_gradients.GradientsDebugger()
        id_grad_w = grad_debugger.identify_gradient(self.w)
        y = math_ops.add(id_grad_w, -1.0, name="y")

        with grad_debugger:
            grads = gradients_impl.gradients(y, [self.u, self.v])
        self.assertEqual(2, len(grads))
        u_grad = grads[0]
        v_grad = grads[1]

        self.sess.run(variables.global_variables_initializer())
        self.assertAllClose(5.0, self.sess.run(y))
        self.assertAllClose(3.0, self.sess.run(u_grad))
        self.assertAllClose(2.0, self.sess.run(v_grad))

        # Fetch the gradient tensor with the x-tensor object.
        w_grad = grad_debugger.gradient_tensor(self.w)
        self.assertIsInstance(w_grad, ops.Tensor)
        self.assertAllClose(1.0, self.sess.run(w_grad))

        # Fetch the gradient tensor with the x-tensor's name.
        w_grad = grad_debugger.gradient_tensor(self.w.name)
        self.assertIsInstance(w_grad, ops.Tensor)
        self.assertAllClose(1.0, self.sess.run(w_grad))

        # Fetch the gradient tensor with the x-tensor name.
        w_grad = grad_debugger.gradient_tensor(self.w.name)
        self.assertIsInstance(w_grad, ops.Tensor)
        self.assertAllClose(1.0, self.sess.run(w_grad))
Пример #3
0
    def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self):
        y = math_ops.add(self.w, -1.0, name="y")

        # The constructrion of the forward graph has completed.
        # But we can still get the gradient tensors by using
        # watch_gradients_by_tensor_names().
        grad_debugger = debug_gradients.GradientsDebugger()
        grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$")
        grads = gradients_impl.gradients(y, [self.u, self.v])
        self.assertEqual(2, len(grads))
        u_grad = grads[0]
        v_grad = grads[1]

        self.sess.run(variables.global_variables_initializer())
        self.assertAllClose(5.0, self.sess.run(y))
        self.assertAllClose(3.0, self.sess.run(u_grad))
        self.assertAllClose(2.0, self.sess.run(v_grad))

        w_grad = grad_debugger.gradient_tensor(self.w)
        self.assertIsInstance(w_grad, ops.Tensor)
        self.assertAllClose(1.0, self.sess.run(w_grad))

        w_grad = grad_debugger.gradient_tensor("w:0")
        self.assertIsInstance(w_grad, ops.Tensor)
        self.assertAllClose(1.0, self.sess.run(w_grad))
Пример #4
0
 def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(
         self):
     grad_debugger = debug_gradients.GradientsDebugger()
     grad_debugger.identify_gradient(self.w)
     with self.assertRaisesRegexp(
             ValueError, "The graph already contains an op named .*"):
         grad_debugger.identify_gradient(self.w)
Пример #5
0
 def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):
   grad_debugger = debug_gradients.GradientsDebugger()
   with self.assertRaisesRegexp(
       TypeError,
       r"x_tensor must be a str or tf\.Tensor or tf\.Variable, but instead "
       r"has type .*Operation.*"):
     grad_debugger.gradient_tensor(variables.global_variables_initializer())
Пример #6
0
  def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self):
    grad_debugger_1 = debug_gradients.GradientsDebugger()
    grad_debugger_2 = debug_gradients.GradientsDebugger()
    id_grad_w = grad_debugger_1.identify_gradient(self.w)
    y = math_ops.add(id_grad_w, -1.0, name="y")

    # There are >1 gradient debuggers registered, and grad_debugger is not used
    # as a context manager here, so the gradient w.r.t. self.w will not be
    # registered.
    gradients_impl.gradients(y, [self.u, self.v])

    with self.assertRaisesRegexp(
        LookupError,
        r"This GradientsDebugger has not received any gradient tensor for "):
      grad_debugger_1.gradient_tensor(self.w)
    with self.assertRaisesRegexp(
        LookupError,
        r"This GradientsDebugger has not received any gradient tensor for "):
      grad_debugger_2.gradient_tensor(self.w)
Пример #7
0
    def testGradientsValuesFromDumpWorks(self):
        y = math_ops.add(self.w, -1.0, name="y")
        z = math_ops.square(y, name="z")

        grad_debugger = debug_gradients.GradientsDebugger()
        with grad_debugger.watch_gradients_by_tensors(self.sess.graph,
                                                      [self.w, self.u, y]):
            train_op = gradient_descent.GradientDescentOptimizer(0.1).minimize(
                z)

        self.sess.run(variables.global_variables_initializer())

        run_options = config_pb2.RunOptions(output_partition_graphs=True)
        dump_dir = tempfile.mkdtemp()
        debug_url = "file://" + dump_dir
        debug_utils.watch_graph(run_options,
                                self.sess.graph,
                                debug_urls=debug_url)
        run_metadata = config_pb2.RunMetadata()
        self.assertAllClose(2.0, self.sess.run(self.u))
        self.sess.run(train_op, options=run_options, run_metadata=run_metadata)
        self.assertAllClose(-1.0, self.sess.run(self.u))

        dump = debug_data.DebugDumpDir(
            dump_dir, partition_graphs=run_metadata.partition_graphs)
        dump.set_python_graph(self.sess.graph)

        y_grad_values = debug_gradients.gradient_values_from_dump(
            grad_debugger, y, dump)
        self.assertEqual(1, len(y_grad_values))
        self.assertAllClose(10.0, y_grad_values[0])

        w_grad_values = debug_gradients.gradient_values_from_dump(
            grad_debugger, self.w, dump)
        self.assertEqual(1, len(w_grad_values))
        self.assertAllClose(10.0, w_grad_values[0])

        u_grad_values = debug_gradients.gradient_values_from_dump(
            grad_debugger, self.u, dump)
        self.assertEqual(1, len(u_grad_values))
        self.assertAllClose(30.0, u_grad_values[0])

        with self.assertRaisesRegexp(
                LookupError,
                r"This GradientsDebugger has not received any gradient tensor for "
                r"x-tensor v:0"):
            debug_gradients.gradient_values_from_dump(grad_debugger, self.v,
                                                      dump)

        # Cleanup.
        shutil.rmtree(dump_dir)
Пример #8
0
    def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self):
        grad_debugger = debug_gradients.GradientsDebugger()
        id_grad_w = grad_debugger.identify_gradient(self.w)
        y = math_ops.add(id_grad_w, -1.0, name="y")

        with grad_debugger:
            gradient_descent.GradientDescentOptimizer(0.1).minimize(y)

        self.sess.run(variables.global_variables_initializer())

        # Fetch the gradient tensor with the x-tensor object.
        w_grad = grad_debugger.gradient_tensor(self.w)
        self.assertIsInstance(w_grad, ops.Tensor)
        self.assertAllClose(1.0, self.sess.run(w_grad))
Пример #9
0
    def testWatchGradientsByTensorCanWorkOnMultipleLosses(self):
        y = math_ops.add(self.w, -1.0, name="y")
        z1 = math_ops.square(y, name="z1")
        z2 = math_ops.sqrt(y, name="z2")

        grad_debugger_1 = debug_gradients.GradientsDebugger()
        with grad_debugger_1.watch_gradients_by_tensors(self.sess.graph, y):
            gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)

        grad_debugger_2 = debug_gradients.GradientsDebugger()
        with grad_debugger_2.watch_gradients_by_tensors(self.sess.graph, y):
            gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)

        dz1_dy = grad_debugger_1.gradient_tensor(y)
        dz2_dy = grad_debugger_2.gradient_tensor(y)
        self.assertIsInstance(dz1_dy, ops.Tensor)
        self.assertIsInstance(dz2_dy, ops.Tensor)
        self.assertIsNot(dz1_dy, dz2_dy)

        self.sess.run(variables.global_variables_initializer())
        self.assertAllClose(5.0**2, self.sess.run(z1))
        self.assertAllClose(5.0**0.5, self.sess.run(z2))
        self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
        self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
Пример #10
0
  def testWatchGradientsWorksOnRefTensor(self):
    y = math_ops.add(self.w, -1.0, name="y")

    grad_debugger = debug_gradients.GradientsDebugger()
    with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "u:0$"):
      grads = gradients_impl.gradients(y, [self.u, self.v])
    self.assertEqual(2, len(grads))
    u_grad = grads[0]
    v_grad = grads[1]

    self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))

    self.sess.run(variables.global_variables_initializer())
    self.assertAllClose(3.0, self.sess.run(u_grad))
    self.assertAllClose(2.0, self.sess.run(v_grad))
    self.assertAllClose(
        3.0, self.sess.run(grad_debugger.gradient_tensor("u:0")))
Пример #11
0
    def testWatchGradientsByXTensorsWorks(self):
        y = math_ops.add(self.w, -1.0, name="foo/y")
        z = math_ops.square(y, name="foo/z")

        # The constructrion of the forward graph has completed.
        # But we can still get the gradient tensors by using
        # watch_gradients_by_x_tensors().
        grad_debugger = debug_gradients.GradientsDebugger()
        with grad_debugger.watch_gradients_by_tensors(self.sess.graph,
                                                      [self.w, self.u, y]):
            gradient_descent.GradientDescentOptimizer(0.1).minimize(z)

        self.assertEqual(3, len(grad_debugger.gradient_tensors()))
        u_grad = grad_debugger.gradient_tensor(self.u)
        w_grad = grad_debugger.gradient_tensor(self.w)
        y_grad = grad_debugger.gradient_tensor(y)

        self.sess.run(variables.global_variables_initializer())
        self.assertAllClose(10.0, self.sess.run(y_grad))
        self.assertAllClose(10.0, self.sess.run(w_grad))
        self.assertAllClose(30.0, self.sess.run(u_grad))