Exemple #1
0
    def testAddDebugTensorWatches_defaultDebugOp(self):
        debug_utils.add_debug_tensor_watch(self._run_options,
                                           "foo/node_a",
                                           1,
                                           debug_urls="file:///tmp/tfdbg_1")
        debug_utils.add_debug_tensor_watch(self._run_options,
                                           "foo/node_b",
                                           0,
                                           debug_urls="file:///tmp/tfdbg_2")

        self.assertEqual(2, len(self._run_options.debug_tensor_watch_opts))

        watch_0 = self._run_options.debug_tensor_watch_opts[0]
        watch_1 = self._run_options.debug_tensor_watch_opts[1]

        self.assertEqual("foo/node_a", watch_0.node_name)
        self.assertEqual(1, watch_0.output_slot)
        self.assertEqual("foo/node_b", watch_1.node_name)
        self.assertEqual(0, watch_1.output_slot)

        # Verify default debug op name.
        self.assertEqual(["DebugIdentity"], watch_0.debug_ops)
        self.assertEqual(["DebugIdentity"], watch_1.debug_ops)

        # Verify debug URLs.
        self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
        self.assertEqual(["file:///tmp/tfdbg_2"], watch_1.debug_urls)
  def testDumpStringTensorsWorks(self):
    with session.Session() as sess:
      str1_init_val = np.array(b"abc")
      str2_init_val = np.array(b"def")

      str1_init = constant_op.constant(str1_init_val)
      str2_init = constant_op.constant(str2_init_val)

      str1_name = "str1"
      str2_name = "str2"
      str1 = variables.Variable(str1_init, name=str1_name)
      str2 = variables.Variable(str2_init, name=str2_name)
      # Concatenate str1 and str2
      str_concat = math_ops.add(str1, str2, name="str_concat")

      str1.initializer.run()
      str2.initializer.run()

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_urls = self._debug_urls()

      # Add debug tensor watch for u.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
      # Add debug tensor watch for v.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)

      run_metadata = config_pb2.RunMetadata()
      sess.run(str_concat, options=run_options, run_metadata=run_metadata)

      # String ops are located on CPU.
      self.assertEqual(1, len(run_metadata.partition_graphs))

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)

      self.assertIn(str1_name, dump.nodes())
      self.assertIn(str2_name, dump.nodes())

      self.assertEqual(2, dump.size)

      self.assertEqual([str1_init_val],
                       dump.get_tensors("%s/read" % str1_name, 0,
                                        "DebugIdentity"))
      self.assertEqual([str2_init_val],
                       dump.get_tensors("%s/read" % str2_name, 0,
                                        "DebugIdentity"))

      self.assertGreaterEqual(
          dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
          0)
      self.assertGreaterEqual(
          dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
          0)
  def testDumpToFileOverlappingParentDir(self):
    with session.Session() as sess:
      u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
      v_init_val = np.array([[2.0], [-1.0]])

      # Use node names with overlapping namespace (i.e., parent directory) to
      # test concurrent, non-racing directory creation.
      u_name = "testDumpToFile/u"
      v_name = "testDumpToFile/v"

      u_init = constant_op.constant(u_init_val, shape=[2, 2])
      u = variables.Variable(u_init, name=u_name)
      v_init = constant_op.constant(v_init_val, shape=[2, 1])
      v = variables.Variable(v_init, name=v_name)

      w = math_ops.matmul(u, v, name="testDumpToFile/matmul")

      u.initializer.run()
      v.initializer.run()

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_urls = "file://%s" % self._dump_root

      # Add debug tensor watch for u.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
      # Add debug tensor watch for v.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)

      run_metadata = config_pb2.RunMetadata()

      # Invoke Session.run().
      sess.run(w, options=run_options, run_metadata=run_metadata)

      self.assertEqual(self._expected_partition_graph_count,
                       len(run_metadata.partition_graphs))

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)
      self.assertTrue(dump.loaded_partition_graphs())

      # Verify the dumped tensor values for u and v.
      self.assertEqual(2, dump.size)

      self.assertAllClose([u_init_val], dump.get_tensors("%s/read" % u_name, 0,
                                                         "DebugIdentity"))
      self.assertAllClose([v_init_val], dump.get_tensors("%s/read" % v_name, 0,
                                                         "DebugIdentity"))

      self.assertGreaterEqual(
          dump.get_rel_timestamps("%s/read" % u_name, 0, "DebugIdentity")[0], 0)
      self.assertGreaterEqual(
          dump.get_rel_timestamps("%s/read" % v_name, 0, "DebugIdentity")[0], 0)
    def testDifferentWatchesOnDifferentRuns(self):
        """Test watching different tensors on different runs of the same graph."""

        with session.Session() as sess:
            u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
            v_init_val = np.array([[2.0], [-1.0]])

            # Use node names with overlapping namespace (i.e., parent directory) to
            # test concurrent, non-racing directory creation.
            u_name = "diff_Watch/u"
            v_name = "diff_Watch/v"

            u_init = constant_op.constant(u_init_val, shape=[2, 2])
            u = variables.Variable(u_init, name=u_name)
            v_init = constant_op.constant(v_init_val, shape=[2, 1])
            v = variables.Variable(v_init, name=v_name)

            w = math_ops.matmul(u, v, name="diff_Watch/matmul")

            u.initializer.run()
            v.initializer.run()

            for i in xrange(2):
                run_options = config_pb2.RunOptions(output_partition_graphs=True)

                run_dump_root = self._debug_dump_dir(run_number=i)
                debug_urls = self._debug_urls(run_number=i)

                if i == 0:
                    # First debug run: Add debug tensor watch for u.
                    debug_utils.add_debug_tensor_watch(run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
                else:
                    # Second debug run: Add debug tensor watch for v.
                    debug_utils.add_debug_tensor_watch(run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)

                run_metadata = config_pb2.RunMetadata()

                # Invoke Session.run().
                sess.run(w, options=run_options, run_metadata=run_metadata)

                self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs))

                dump = debug_data.DebugDumpDir(run_dump_root, partition_graphs=run_metadata.partition_graphs)
                self.assertTrue(dump.loaded_partition_graphs())

                # Each run should have generated only one dumped tensor, not two.
                self.assertEqual(1, dump.size)

                if i == 0:
                    self.assertAllClose([u_init_val], dump.get_tensors("%s/read" % u_name, 0, "DebugIdentity"))
                    self.assertGreaterEqual(dump.get_rel_timestamps("%s/read" % u_name, 0, "DebugIdentity")[0], 0)
                else:
                    self.assertAllClose([v_init_val], dump.get_tensors("%s/read" % v_name, 0, "DebugIdentity"))
                    self.assertGreaterEqual(dump.get_rel_timestamps("%s/read" % v_name, 0, "DebugIdentity")[0], 0)
    def testDumpUninitializedVariable(self):
        op_namespace = "testDumpUninitializedVariable"
        with session.Session() as sess:
            u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
            s_init_val = b"str1"

            u_name = "%s/u" % op_namespace
            s_name = "%s/s" % op_namespace

            u_init = constant_op.constant(u_init_val, shape=[2, 2])
            u = variables.Variable(u_init, name=u_name)
            s_init = constant_op.constant(s_init_val)
            s = variables.Variable(s_init, name=s_name)

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_urls = self._debug_urls()

            # Add debug tensor watch for u.
            debug_utils.add_debug_tensor_watch(run_options,
                                               "%s" % u_name,
                                               0,
                                               debug_urls=debug_urls)
            debug_utils.add_debug_tensor_watch(run_options,
                                               "%s" % s_name,
                                               0,
                                               debug_urls=debug_urls)

            run_metadata = config_pb2.RunMetadata()

            # Initialize u and s.
            sess.run(variables.global_variables_initializer(),
                     options=run_options,
                     run_metadata=run_metadata)

            # Verify the dump file for the uninitialized value of u.
            dump = debug_data.DebugDumpDir(
                self._dump_root,
                partition_graphs=run_metadata.partition_graphs)

            self.assertEqual(2, dump.size)
            self.assertEqual(self._expected_partition_graph_count,
                             len(run_metadata.partition_graphs))

            # Verify that the variable is properly initialized by the run() call.
            u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
            s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
            self.assertEqual(1, len(u_vals))
            self.assertIsNone(u_vals[0])
            self.assertEqual(1, len(s_vals))
            self.assertIsNone(s_vals[0])

            # Call run() again, to check that u is initialized properly.
            self.assertAllClose(u_init_val, sess.run(u))
            self.assertEqual(s_init_val, sess.run(s))
  def testDumpToFileOverlappingParentDir(self):
    with session.Session() as sess:
      u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
      v_init_val = np.array([[2.0], [-1.0]])

      # Use node names with overlapping namespace (i.e., parent directory) to
      # test concurrent, non-racing directory creation.
      u_name = "testDumpToFile/u"
      v_name = "testDumpToFile/v"

      u_init = constant_op.constant(u_init_val, shape=[2, 2])
      u = variables.Variable(u_init, name=u_name)
      v_init = constant_op.constant(v_init_val, shape=[2, 1])
      v = variables.Variable(v_init, name=v_name)

      w = math_ops.matmul(u, v, name="testDumpToFile/matmul")

      u.initializer.run()
      v.initializer.run()

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_urls = "file://%s" % self._dump_root

      # Add debug tensor watch for u.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
      # Add debug tensor watch for v.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)

      run_metadata = config_pb2.RunMetadata()

      # Invoke Session.run().
      sess.run(w, options=run_options, run_metadata=run_metadata)

      self.assertEqual(self._expected_partition_graph_count,
                       len(run_metadata.partition_graphs))

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)
      self.assertTrue(dump.loaded_partition_graphs())

      # Verify the dumped tensor values for u and v.
      self.assertEqual(2, dump.size)

      self.assertAllClose([u_init_val], dump.get_tensors("%s/read" % u_name, 0,
                                                         "DebugIdentity"))
      self.assertAllClose([v_init_val], dump.get_tensors("%s/read" % v_name, 0,
                                                         "DebugIdentity"))

      self.assertGreaterEqual(
          dump.get_rel_timestamps("%s/read" % u_name, 0, "DebugIdentity")[0], 0)
      self.assertGreaterEqual(
          dump.get_rel_timestamps("%s/read" % v_name, 0, "DebugIdentity")[0], 0)
  def testDumpStringTensorsToFileSystem(self):
    with session.Session() as sess:
      str1_init_val = np.array(b"abc")
      str2_init_val = np.array(b"def")

      str1_init = constant_op.constant(str1_init_val)
      str2_init = constant_op.constant(str2_init_val)

      str1_name = "str1"
      str2_name = "str2"
      str1 = variables.Variable(str1_init, name=str1_name)
      str2 = variables.Variable(str2_init, name=str2_name)
      # Concatenate str1 and str2
      str_concat = math_ops.add(str1, str2, name="str_concat")

      str1.initializer.run()
      str2.initializer.run()

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_urls = self._debug_urls()

      # Add debug tensor watch for u.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
      # Add debug tensor watch for v.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)

      run_metadata = config_pb2.RunMetadata()
      sess.run(str_concat, options=run_options, run_metadata=run_metadata)

      # String ops are located on CPU.
      self.assertEqual(1, len(run_metadata.partition_graphs))

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)

      self.assertIn(str1_name, dump.nodes())
      self.assertIn(str2_name, dump.nodes())

      self.assertEqual(2, dump.size)

      self.assertEqual([str1_init_val], dump.get_tensors("%s/read" % str1_name,
                                                         0, "DebugIdentity"))
      self.assertEqual([str2_init_val], dump.get_tensors("%s/read" % str2_name,
                                                         0, "DebugIdentity"))

      self.assertGreaterEqual(
          dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
          0)
      self.assertGreaterEqual(
          dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
          0)
Exemple #8
0
    def _generate_dump_from_simple_addition_graph(self):
        with session.Session() as sess:
            u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
            v_init_val = np.array([[2.0], [-1.0]])

            # Use node names with overlapping namespace (i.e., parent directory) to
            # test concurrent, non-racing directory creation.
            u_name = "u"
            v_name = "v"
            w_name = "w"

            u_init = constant_op.constant(u_init_val, shape=[2, 2])
            u = variables.Variable(u_init, name=u_name)
            v_init = constant_op.constant(v_init_val, shape=[2, 1])
            v = variables.Variable(v_init, name=v_name)

            w = math_ops.matmul(u, v, name=w_name)

            u.initializer.run()
            v.initializer.run()

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_urls = "file://%s" % self._dump_root

            # Add debug tensor watch for u.
            debug_utils.add_debug_tensor_watch(run_options,
                                               "%s/read" % u_name,
                                               0,
                                               debug_urls=debug_urls)
            # Add debug tensor watch for v.
            debug_utils.add_debug_tensor_watch(run_options,
                                               "%s/read" % v_name,
                                               0,
                                               debug_urls=debug_urls)

            run_metadata = config_pb2.RunMetadata()

            # Invoke Session.run().
            sess.run(w, options=run_options, run_metadata=run_metadata)

            self.assertEqual(self._expected_partition_graph_count,
                             len(run_metadata.partition_graphs))

            dump = debug_data.DebugDumpDir(
                self._dump_root,
                partition_graphs=run_metadata.partition_graphs)

        simple_add_results = collections.namedtuple("SimpleAddResults", [
            "u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name",
            "w_name", "dump"
        ])
        return simple_add_results(u_init_val, v_init_val, u, v, w, u_name,
                                  v_name, w_name, dump)
  def testDumpUninitializedVariable(self):
    op_namespace = "testDumpUninitializedVariable"
    with session.Session() as sess:
      u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
      s_init_val = b"str1"

      u_name = "%s/u" % op_namespace
      s_name = "%s/s" % op_namespace

      u_init = constant_op.constant(u_init_val, shape=[2, 2])
      u = variables.Variable(u_init, name=u_name)
      s_init = constant_op.constant(s_init_val)
      s = variables.Variable(s_init, name=s_name)

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_urls = self._debug_urls()

      # Add debug tensor watch for u.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s" % u_name, 0, debug_urls=debug_urls)
      debug_utils.add_debug_tensor_watch(
          run_options, "%s" % s_name, 0, debug_urls=debug_urls)

      run_metadata = config_pb2.RunMetadata()

      # Initialize u and s.
      sess.run(variables.global_variables_initializer(),
               options=run_options,
               run_metadata=run_metadata)

      # Verify the dump file for the uninitialized value of u.
      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)

      self.assertEqual(2, dump.size)
      self.assertEqual(self._expected_partition_graph_count,
                       len(run_metadata.partition_graphs))

      # Verify that the variable is properly initialized by the run() call.
      u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
      s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
      self.assertEqual(1, len(u_vals))
      self.assertIsNone(u_vals[0])
      self.assertEqual(1, len(s_vals))
      self.assertIsNone(s_vals[0])

      # Call run() again, to check that u is initialized properly.
      self.assertAllClose(u_init_val, sess.run(u))
      self.assertEqual(s_init_val, sess.run(s))
  def _generate_dump_from_simple_addition_graph(self):
    with session.Session() as sess:
      u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
      v_init_val = np.array([[2.0], [-1.0]])

      # Use node names with overlapping namespace (i.e., parent directory) to
      # test concurrent, non-racing directory creation.
      u_name = "u"
      v_name = "v"
      w_name = "w"

      u_init = constant_op.constant(u_init_val, shape=[2, 2])
      u = variables.Variable(u_init, name=u_name)
      v_init = constant_op.constant(v_init_val, shape=[2, 1])
      v = variables.Variable(v_init, name=v_name)

      w = math_ops.matmul(u, v, name=w_name)

      u.initializer.run()
      v.initializer.run()

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_urls = "file://%s" % self._dump_root

      # Add debug tensor watch for u.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
      # Add debug tensor watch for v.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)

      run_metadata = config_pb2.RunMetadata()

      # Invoke Session.run().
      sess.run(w, options=run_options, run_metadata=run_metadata)

      self.assertEqual(self._expected_partition_graph_count,
                       len(run_metadata.partition_graphs))

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)

    simple_add_results = collections.namedtuple("SimpleAddResults", [
        "u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
        "dump"
    ])
    return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
                              w_name, dump)
Exemple #11
0
    def testAddDebugTensorWatches_explicitDebugOp(self):
        debug_utils.add_debug_tensor_watch(self._run_options,
                                           "foo/node_a",
                                           0,
                                           debug_ops="DebugNanCount",
                                           debug_urls="file:///tmp/tfdbg_1")

        self.assertEqual(1, len(self._run_options.debug_tensor_watch_opts))

        watch_0 = self._run_options.debug_tensor_watch_opts[0]

        self.assertEqual("foo/node_a", watch_0.node_name)
        self.assertEqual(0, watch_0.output_slot)

        # Verify default debug op name.
        self.assertEqual(["DebugNanCount"], watch_0.debug_ops)

        # Verify debug URLs.
        self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
  def testAddDebugTensorWatches_multipleDebugOps(self):
    debug_utils.add_debug_tensor_watch(
        self._run_options,
        "foo/node_a",
        0,
        debug_ops=["DebugNanCount", "DebugIdentity"],
        debug_urls="file:///tmp/tfdbg_1")

    self.assertEqual(1, len(self._run_options.debug_tensor_watch_opts))

    watch_0 = self._run_options.debug_tensor_watch_opts[0]

    self.assertEqual("foo/node_a", watch_0.node_name)
    self.assertEqual(0, watch_0.output_slot)

    # Verify default debug op name.
    self.assertEqual(["DebugNanCount", "DebugIdentity"], watch_0.debug_ops)

    # Verify debug URLs.
    self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
Exemple #13
0
  def _prepare_cont_call_dump_path_and_run_options(self):
    """Prepare the dump path and RunOptions for next cont() call.

    Returns:
      dump_path: (str) Directory path to which the intermediate tensor will be
        dumped.
      run_options: (config_pb2.RunOptions) The RunOptions containing the tensor
        watch options for this graph.
    """
    run_options = config_pb2.RunOptions()
    dump_path = self._cont_call_dump_path()
    for element_name in self._closure_elements:
      if ":" in element_name:
        debug_utils.add_debug_tensor_watch(
            run_options,
            debug_data.get_node_name(element_name),
            output_slot=debug_data.get_output_slot(element_name),
            debug_urls=["file://" + dump_path])

    return dump_path, run_options
Exemple #14
0
    def _prepare_cont_call_dump_path_and_run_options(self):
        """Prepare the dump path and RunOptions for next cont() call.

    Returns:
      dump_path: (str) Directory path to which the intermediate tensor will be
        dumped.
      run_options: (config_pb2.RunOptions) The RunOptions containing the tensor
        watch options for this graph.
    """
        run_options = config_pb2.RunOptions()
        dump_path = self._cont_call_dump_path()
        for element_name in self._closure_elements:
            if ":" in element_name:
                debug_utils.add_debug_tensor_watch(
                    run_options,
                    debug_data.get_node_name(element_name),
                    output_slot=debug_data.get_output_slot(element_name),
                    debug_urls=["file://" + dump_path])

        return dump_path, run_options
  def testAddDebugTensorWatches_defaultDebugOp(self):
    debug_utils.add_debug_tensor_watch(
        self._run_options, "foo/node_a", 1, debug_urls="file:///tmp/tfdbg_1")
    debug_utils.add_debug_tensor_watch(
        self._run_options, "foo/node_b", 0, debug_urls="file:///tmp/tfdbg_2")

    self.assertEqual(2, len(self._run_options.debug_tensor_watch_opts))

    watch_0 = self._run_options.debug_tensor_watch_opts[0]
    watch_1 = self._run_options.debug_tensor_watch_opts[1]

    self.assertEqual("foo/node_a", watch_0.node_name)
    self.assertEqual(1, watch_0.output_slot)
    self.assertEqual("foo/node_b", watch_1.node_name)
    self.assertEqual(0, watch_1.output_slot)

    # Verify default debug op name.
    self.assertEqual(["DebugIdentity"], watch_0.debug_ops)
    self.assertEqual(["DebugIdentity"], watch_1.debug_ops)

    # Verify debug URLs.
    self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
    self.assertEqual(["file:///tmp/tfdbg_2"], watch_1.debug_urls)
Exemple #16
0
    def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
        with session.Session() as sess:
            x_name = "oneOfTwoSlots/x"
            u_name = "oneOfTwoSlots/u"
            v_name = "oneOfTwoSlots/v"
            w_name = "oneOfTwoSlots/w"
            y_name = "oneOfTwoSlots/y"

            x = variables.Variable([1, 3, 3, 7],
                                   dtype=dtypes.int32,
                                   name=x_name)
            sess.run(x.initializer)

            unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)

            v = math_ops.add(unique_x, unique_x, name=v_name)
            w = math_ops.add(indices, indices, name=w_name)
            y = math_ops.add(w, w, name=y_name)

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            # Watch only the first output slot of u, even though it has two output
            # slots.
            debug_utils.add_debug_tensor_watch(run_options,
                                               u_name,
                                               0,
                                               debug_urls=self._debug_urls())
            debug_utils.add_debug_tensor_watch(run_options,
                                               w_name,
                                               0,
                                               debug_urls=self._debug_urls())
            debug_utils.add_debug_tensor_watch(run_options,
                                               y_name,
                                               0,
                                               debug_urls=self._debug_urls())

            run_metadata = config_pb2.RunMetadata()
            sess.run([v, y], options=run_options, run_metadata=run_metadata)

            dump = debug_data.DebugDumpDir(
                self._dump_root,
                partition_graphs=run_metadata.partition_graphs,
                validate=True)

            self.assertAllClose([1, 3, 7],
                                dump.get_tensors(u_name, 0,
                                                 "DebugIdentity")[0])
  def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
    with session.Session() as sess:
      x_name = "oneOfTwoSlots/x"
      u_name = "oneOfTwoSlots/u"
      v_name = "oneOfTwoSlots/v"
      w_name = "oneOfTwoSlots/w"
      y_name = "oneOfTwoSlots/y"

      x = variables.Variable([1, 3, 3, 7], dtype=tf.int32, name=x_name)
      sess.run(x.initializer)

      unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)

      v = math_ops.add(unique_x, unique_x, name=v_name)
      w = math_ops.add(indices, indices, name=w_name)
      y = math_ops.add(w, w, name=y_name)

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      # Watch only the first output slot of u, even though it has two output
      # slots.
      debug_utils.add_debug_tensor_watch(
          run_options, u_name, 0, debug_urls=self._debug_urls())
      debug_utils.add_debug_tensor_watch(
          run_options, w_name, 0, debug_urls=self._debug_urls())
      debug_utils.add_debug_tensor_watch(
          run_options, y_name, 0, debug_urls=self._debug_urls())

      run_metadata = config_pb2.RunMetadata()
      sess.run([v, y], options=run_options, run_metadata=run_metadata)

      dump = debug_data.DebugDumpDir(
          self._dump_root,
          partition_graphs=run_metadata.partition_graphs,
          validate=True)

      self.assertAllClose([1, 3, 7],
                          dump.get_tensors(u_name, 0, "DebugIdentity")[0])
  def testDumpToFileWhileLoop(self):
    with session.Session() as sess:
      num_iter = 10

      # "u" is the Variable being updated in the loop.
      u_name = "testDumpToFileWhileLoop/u"
      u_namespace = u_name.split("/")[0]

      u_init_val = np.array(11.0)
      u_init = constant_op.constant(u_init_val)
      u = variables.Variable(u_init, name=u_name)

      # "v" is the increment.
      v_name = "testDumpToFileWhileLoop/v"
      v_namespace = v_name.split("/")[0]

      v_init_val = np.array(2.0)
      v_init = constant_op.constant(v_init_val)
      v = variables.Variable(v_init, name=v_name)

      u.initializer.run()
      v.initializer.run()

      i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")

      def cond(i):
        return math_ops.less(i, num_iter)

      def body(i):
        new_u = state_ops.assign_add(u, v)
        new_i = math_ops.add(i, 1)
        op = control_flow_ops.group(new_u)
        new_i = control_flow_ops.with_dependencies([op], new_i)
        return [new_i]

      loop = control_flow_ops.while_loop(cond, body, [i], parallel_iterations=1)

      # Create RunOptions for debug-watching tensors
      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_urls = self._debug_urls()

      # Add debug tensor watch for u.
      debug_utils.add_debug_tensor_watch(
          run_options, u_name, 0, debug_urls=debug_urls)
      # Add debug tensor watch for v.
      debug_utils.add_debug_tensor_watch(
          run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
      # Add debug tensor watch for while/Identity.
      debug_utils.add_debug_tensor_watch(
          run_options, "while/Identity", 0, debug_urls=debug_urls)
      # Add debug tensor watch for while/Add/y.
      debug_utils.add_debug_tensor_watch(
          run_options, "while/Add/y", 0, debug_urls=debug_urls)

      run_metadata = config_pb2.RunMetadata()
      r = sess.run(loop, options=run_options, run_metadata=run_metadata)

      self.assertEqual(self._expected_partition_graph_count,
                       len(run_metadata.partition_graphs))

      self.assertEqual(num_iter, r)

      u_val_final = sess.run(u)
      self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)

      # Verify dump files
      self.assertTrue(os.path.isdir(self._dump_root))

      self.assertTrue(os.path.isdir(os.path.join(self._dump_root, u_namespace)))
      self.assertTrue(
          os.path.isdir(os.path.join(self._dump_root, v_namespace, "v")))

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)

      # Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
      # and 10 iterations of while/Add/y.
      self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)

      # Verify tensor values.
      self.assertAllClose([u_init_val], dump.get_tensors(u_name, 0,
                                                         "DebugIdentity"))
      self.assertAllClose([v_init_val], dump.get_tensors("%s/read" % v_name, 0,
                                                         "DebugIdentity"))

      while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
      self.assertEqual(10, len(while_id_tensors))
      for k in xrange(len(while_id_tensors)):
        self.assertAllClose(np.array(k), while_id_tensors[k])

      # Verify ascending timestamps from the while loops.
      while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
                                                        "DebugIdentity")
      self.assertEqual(10, len(while_id_rel_timestamps))
      prev_rel_time = 0
      for rel_time in while_id_rel_timestamps:
        self.assertGreaterEqual(rel_time, prev_rel_time)
        prev_rel_time = rel_time

      # Test querying debug watch keys from node name.
      watch_keys = dump.debug_watch_keys("while/Identity")
      self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)

      # Test querying debug datum instances from debug watch key.
      self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
      self.assertEqual([], dump.watch_key_to_data("foo"))
    def testDumpToFileWhileLoop(self):
        with session.Session() as sess:
            num_iter = 10

            # "u" is the Variable being updated in the loop.
            u_name = "testDumpToFileWhileLoop/u"
            u_namespace = u_name.split("/")[0]

            u_init_val = np.array(11.0)
            u_init = constant_op.constant(u_init_val)
            u = variables.Variable(u_init, name=u_name)

            # "v" is the increment.
            v_name = "testDumpToFileWhileLoop/v"
            v_namespace = v_name.split("/")[0]

            v_init_val = np.array(2.0)
            v_init = constant_op.constant(v_init_val)
            v = variables.Variable(v_init, name=v_name)

            u.initializer.run()
            v.initializer.run()

            i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")

            def cond(i):
                return math_ops.less(i, num_iter)

            def body(i):
                new_u = state_ops.assign_add(u, v)
                new_i = math_ops.add(i, 1)
                op = control_flow_ops.group(new_u)
                new_i = control_flow_ops.with_dependencies([op], new_i)
                return [new_i]

            loop = control_flow_ops.while_loop(cond,
                                               body, [i],
                                               parallel_iterations=1)

            # Create RunOptions for debug-watching tensors
            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_urls = self._debug_urls()

            # Add debug tensor watch for u.
            debug_utils.add_debug_tensor_watch(run_options,
                                               u_name,
                                               0,
                                               debug_urls=debug_urls)
            # Add debug tensor watch for v.
            debug_utils.add_debug_tensor_watch(run_options,
                                               "%s/read" % v_name,
                                               0,
                                               debug_urls=debug_urls)
            # Add debug tensor watch for while/Identity.
            debug_utils.add_debug_tensor_watch(run_options,
                                               "while/Identity",
                                               0,
                                               debug_urls=debug_urls)
            # Add debug tensor watch for while/Add/y.
            debug_utils.add_debug_tensor_watch(run_options,
                                               "while/Add/y",
                                               0,
                                               debug_urls=debug_urls)

            run_metadata = config_pb2.RunMetadata()
            r = sess.run(loop, options=run_options, run_metadata=run_metadata)

            self.assertEqual(self._expected_partition_graph_count,
                             len(run_metadata.partition_graphs))

            self.assertEqual(num_iter, r)

            u_val_final = sess.run(u)
            self.assertAllClose(u_init_val + num_iter * v_init_val,
                                u_val_final)

            # Verify dump files
            self.assertTrue(os.path.isdir(self._dump_root))

            self.assertTrue(
                os.path.isdir(os.path.join(self._dump_root, u_namespace)))
            self.assertTrue(
                os.path.isdir(os.path.join(self._dump_root, v_namespace, "v")))

            dump = debug_data.DebugDumpDir(
                self._dump_root,
                partition_graphs=run_metadata.partition_graphs)

            # Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
            # and 10 iterations of while/Add/y.
            self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)

            # Verify tensor values.
            self.assertAllClose([u_init_val],
                                dump.get_tensors(u_name, 0, "DebugIdentity"))
            self.assertAllClose([v_init_val],
                                dump.get_tensors("%s/read" % v_name, 0,
                                                 "DebugIdentity"))

            while_id_tensors = dump.get_tensors("while/Identity", 0,
                                                "DebugIdentity")
            self.assertEqual(10, len(while_id_tensors))
            for k in xrange(len(while_id_tensors)):
                self.assertAllClose(np.array(k), while_id_tensors[k])

            # Verify ascending timestamps from the while loops.
            while_id_rel_timestamps = dump.get_rel_timestamps(
                "while/Identity", 0, "DebugIdentity")
            self.assertEqual(10, len(while_id_rel_timestamps))
            prev_rel_time = 0
            for rel_time in while_id_rel_timestamps:
                self.assertGreaterEqual(rel_time, prev_rel_time)
                prev_rel_time = rel_time

            # Test querying debug watch keys from node name.
            watch_keys = dump.debug_watch_keys("while/Identity")
            self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)

            # Test querying debug datum instances from debug watch key.
            self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
            self.assertEqual([], dump.watch_key_to_data("foo"))
    def testDifferentWatchesOnDifferentRuns(self):
        """Test watching different tensors on different runs of the same graph."""

        with session.Session() as sess:
            u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
            v_init_val = np.array([[2.0], [-1.0]])

            # Use node names with overlapping namespace (i.e., parent directory) to
            # test concurrent, non-racing directory creation.
            u_name = "diff_Watch/u"
            v_name = "diff_Watch/v"

            u_init = constant_op.constant(u_init_val, shape=[2, 2])
            u = variables.Variable(u_init, name=u_name)
            v_init = constant_op.constant(v_init_val, shape=[2, 1])
            v = variables.Variable(v_init, name=v_name)

            w = math_ops.matmul(u, v, name="diff_Watch/matmul")

            u.initializer.run()
            v.initializer.run()

            for i in xrange(2):
                run_options = config_pb2.RunOptions(
                    output_partition_graphs=True)

                run_dump_root = self._debug_dump_dir(run_number=i)
                debug_urls = self._debug_urls(run_number=i)

                if i == 0:
                    # First debug run: Add debug tensor watch for u.
                    debug_utils.add_debug_tensor_watch(run_options,
                                                       "%s/read" % u_name,
                                                       0,
                                                       debug_urls=debug_urls)
                else:
                    # Second debug run: Add debug tensor watch for v.
                    debug_utils.add_debug_tensor_watch(run_options,
                                                       "%s/read" % v_name,
                                                       0,
                                                       debug_urls=debug_urls)

                run_metadata = config_pb2.RunMetadata()

                # Invoke Session.run().
                sess.run(w, options=run_options, run_metadata=run_metadata)

                self.assertEqual(self._expected_partition_graph_count,
                                 len(run_metadata.partition_graphs))

                dump = debug_data.DebugDumpDir(
                    run_dump_root,
                    partition_graphs=run_metadata.partition_graphs)
                self.assertTrue(dump.loaded_partition_graphs())

                # Each run should have generated only one dumped tensor, not two.
                self.assertEqual(1, dump.size)

                if i == 0:
                    self.assertAllClose([u_init_val],
                                        dump.get_tensors(
                                            "%s/read" % u_name, 0,
                                            "DebugIdentity"))
                    self.assertGreaterEqual(
                        dump.get_rel_timestamps("%s/read" % u_name, 0,
                                                "DebugIdentity")[0], 0)
                else:
                    self.assertAllClose([v_init_val],
                                        dump.get_tensors(
                                            "%s/read" % v_name, 0,
                                            "DebugIdentity"))
                    self.assertGreaterEqual(
                        dump.get_rel_timestamps("%s/read" % v_name, 0,
                                                "DebugIdentity")[0], 0)