Пример #1
0
    def testGetNonexistentTensorFilter(self):
        analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)

        analyzer.add_tensor_filter("foo_filter", lambda datum, tensor: True)
        with self.assertRaisesRegexp(
                ValueError, "There is no tensor filter named \"bar\""):
            analyzer.get_tensor_filter("bar")
Пример #2
0
    def setUpClass(cls):
        cls._dump_root = tempfile.mkdtemp()

        with session.Session() as sess:
            # 2400 elements should exceed the default threshold (2000).
            x = constant_op.constant(np.zeros([300, 8]),
                                     name="large_tensors/x")

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_utils.watch_graph(run_options,
                                    sess.graph,
                                    debug_ops=["DebugIdentity"],
                                    debug_urls="file://%s" % cls._dump_root)

            # Invoke Session.run().
            run_metadata = config_pb2.RunMetadata()
            sess.run(x, options=run_options, run_metadata=run_metadata)

        cls._debug_dump = debug_data.DebugDumpDir(
            cls._dump_root, partition_graphs=run_metadata.partition_graphs)

        # Construct the analyzer.
        cls._analyzer = analyzer_cli.DebugAnalyzer(cls._debug_dump)

        # Construct the handler registry.
        cls._registry = debugger_cli_common.CommandHandlerRegistry()

        # Register command handler.
        cls._registry.register_command_handler(
            "print_tensor",
            cls._analyzer.print_tensor,
            cls._analyzer.get_help("print_tensor"),
            prefix_aliases=["pt"])
Пример #3
0
    def testAddTensorFilterNonStrName(self):
        analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)

        with self.assertRaisesRegexp(
                TypeError, "Input argument filter_name is expected to be str, "
                "but is not"):
            analyzer.add_tensor_filter(1, lambda datum, tensor: True)
Пример #4
0
    def setUpClass(cls):
        cls._dump_root = tempfile.mkdtemp()

        cls._is_gpu_available = test.is_gpu_available()
        if cls._is_gpu_available:
            cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
        else:
            cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"

        with session.Session() as sess:
            x_init_val = np.array([5.0, 3.0])
            x_init = constant_op.constant(x_init_val, shape=[2])
            x = variables.Variable(x_init, name="control_deps/x")

            y = math_ops.add(x, x, name="control_deps/y")
            y = control_flow_ops.with_dependencies(
                [x], y, name="control_deps/ctrl_dep_y")

            z = math_ops.mul(x, y, name="control_deps/z")

            z = control_flow_ops.with_dependencies(
                [x, y], z, name="control_deps/ctrl_dep_z")

            x.initializer.run()

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_utils.watch_graph(run_options,
                                    sess.graph,
                                    debug_ops=["DebugIdentity"],
                                    debug_urls="file://%s" % cls._dump_root)

            # Invoke Session.run().
            run_metadata = config_pb2.RunMetadata()
            sess.run(z, options=run_options, run_metadata=run_metadata)

        debug_dump = debug_data.DebugDumpDir(
            cls._dump_root, partition_graphs=run_metadata.partition_graphs)

        # Construct the analyzer.
        analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

        # Construct the handler registry.
        cls._registry = debugger_cli_common.CommandHandlerRegistry()

        # Register command handlers.
        cls._registry.register_command_handler("node_info",
                                               analyzer.node_info,
                                               analyzer.get_help("node_info"),
                                               prefix_aliases=["ni"])
        cls._registry.register_command_handler(
            "list_inputs",
            analyzer.list_inputs,
            analyzer.get_help("list_inputs"),
            prefix_aliases=["li"])
        cls._registry.register_command_handler(
            "list_outputs",
            analyzer.list_outputs,
            analyzer.get_help("list_outputs"),
            prefix_aliases=["lo"])
Пример #5
0
    def testAddGetTensorFilterNonCallable(self):
        analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)

        with self.assertRaisesRegexp(
                TypeError,
                "Input argument filter_callable is expected to be callable, "
                "but is not."):
            analyzer.add_tensor_filter("foo_filter", "bar")
Пример #6
0
    def testAddGetTensorFilterNestedFunction(self):
        analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)

        def foo_filter(unused_arg_0, unused_arg_1):
            return True

        analyzer.add_tensor_filter("foo_filter", foo_filter)
        self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
Пример #7
0
    def setUpClass(cls):
        cls._dump_root = tempfile.mkdtemp()

        cls._is_gpu_available = test.is_gpu_available()
        if cls._is_gpu_available:
            cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
        else:
            cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"

        with session.Session() as sess:
            u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
            v_init_val = np.array([[2.0], [-1.0]])

            u_name = "simple_mul_add/u"
            v_name = "simple_mul_add/v"

            u_init = constant_op.constant(u_init_val, shape=[2, 2])
            u = variables.Variable(u_init, name=u_name)
            v_init = constant_op.constant(v_init_val, shape=[2, 1])
            v = variables.Variable(v_init, name=v_name)

            w = math_ops.matmul(u, v, name="simple_mul_add/matmul")

            x = math_ops.add(w, w, name="simple_mul_add/add")

            u.initializer.run()
            v.initializer.run()

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_utils.watch_graph(run_options,
                                    sess.graph,
                                    debug_ops=["DebugIdentity"],
                                    debug_urls="file://%s" % cls._dump_root)

            # Invoke Session.run().
            run_metadata = config_pb2.RunMetadata()
            sess.run(x, options=run_options, run_metadata=run_metadata)

        debug_dump = debug_data.DebugDumpDir(
            cls._dump_root, partition_graphs=run_metadata.partition_graphs)

        # Construct the analyzer.
        analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

        # Construct the handler registry.
        cls._registry = debugger_cli_common.CommandHandlerRegistry()

        # Register command handlers.
        cls._registry.register_command_handler(
            "list_tensors",
            analyzer.list_tensors,
            analyzer.get_help("list_tensors"),
            prefix_aliases=["lt"])
        cls._registry.register_command_handler("node_info",
                                               analyzer.node_info,
                                               analyzer.get_help("node_info"),
                                               prefix_aliases=["ni"])
Пример #8
0
    def setUpClass(cls):
        cls._dump_root = tempfile.mkdtemp()

        with session.Session() as sess:
            loop_var = constant_op.constant(0, name="while_loop_test/loop_var")
            cond = lambda loop_var: math_ops.less(loop_var, 10)
            body = lambda loop_var: math_ops.add(loop_var, 1)
            while_loop = control_flow_ops.while_loop(cond,
                                                     body, [loop_var],
                                                     parallel_iterations=1)

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_url = "file://%s" % cls._dump_root

            watch_opts = run_options.debug_options.debug_tensor_watch_opts

            # Add debug tensor watch for "while/Identity".
            watch = watch_opts.add()
            watch.node_name = "while/Identity"
            watch.output_slot = 0
            watch.debug_ops.append("DebugIdentity")
            watch.debug_urls.append(debug_url)

            # Invoke Session.run().
            run_metadata = config_pb2.RunMetadata()
            sess.run(while_loop,
                     options=run_options,
                     run_metadata=run_metadata)

        cls._debug_dump = debug_data.DebugDumpDir(
            cls._dump_root, partition_graphs=run_metadata.partition_graphs)

        cls._analyzer = analyzer_cli.DebugAnalyzer(cls._debug_dump)
        cls._registry = debugger_cli_common.CommandHandlerRegistry()
        cls._registry.register_command_handler(
            "list_tensors",
            cls._analyzer.list_tensors,
            cls._analyzer.get_help("list_tensors"),
            prefix_aliases=["lt"])
        cls._registry.register_command_handler(
            "print_tensor",
            cls._analyzer.print_tensor,
            cls._analyzer.get_help("print_tensor"),
            prefix_aliases=["pt"])
Пример #9
0
    def on_run_end(self, request):
        """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

        if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
            debug_dump = debug_data.DebugDumpDir(
                self._dump_root,
                partition_graphs=request.run_metadata.partition_graphs)

            init_command = "lt"
            title_color = "green"
            if self._run_till_filter_pass:
                if not debug_dump.find(
                        self._tensor_filters[self._run_till_filter_pass],
                        first_n=1):
                    # No dumped tensor passes the filter in this run. Clean up the dump
                    # directory and move on.
                    shutil.rmtree(self._dump_root)
                    return framework.OnRunEndResponse()
                else:
                    # Some dumped tensor(s) from this run passed the filter.
                    init_command = "lt -f %s" % self._run_till_filter_pass
                    title_color = "red"
                    self._run_till_filter_pass = None

            analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

            # Supply all the available tensor filters.
            for filter_name in self._tensor_filters:
                analyzer.add_tensor_filter(filter_name,
                                           self._tensor_filters[filter_name])

            run_end_cli = curses_ui.CursesUI()
            run_end_cli.register_command_handler(
                "list_tensors",
                analyzer.list_tensors,
                analyzer.get_help("list_tensors"),
                prefix_aliases=["lt"])
            run_end_cli.register_command_handler(
                "node_info",
                analyzer.node_info,
                analyzer.get_help("node_info"),
                prefix_aliases=["ni"])
            run_end_cli.register_command_handler(
                "list_inputs",
                analyzer.list_inputs,
                analyzer.get_help("list_inputs"),
                prefix_aliases=["li"])
            run_end_cli.register_command_handler(
                "list_outputs",
                analyzer.list_outputs,
                analyzer.get_help("list_outputs"),
                prefix_aliases=["lo"])
            run_end_cli.register_command_handler(
                "print_tensor",
                analyzer.print_tensor,
                analyzer.get_help("print_tensor"),
                prefix_aliases=["pt"])

            run_end_cli.register_command_handler(
                "run",
                self._run_end_run_command_handler,
                "Helper command for incorrectly entered run command at the run-end "
                "prompt.",
                prefix_aliases=["r"])

            title = "run-end: " + self._run_description
            run_end_cli.run_ui(init_command=init_command,
                               title=title,
                               title_color=title_color)

            # Clean up the dump directory.
            shutil.rmtree(self._dump_root)
        else:
            print(
                "No debug information to show following a non-debug run() call."
            )

        # Return placeholder response that currently holds no additional
        # information.
        return framework.OnRunEndResponse()
    def on_run_end(self, request):
        """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

        if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
            partition_graphs = None
            if request.run_metadata and request.run_metadata.partition_graphs:
                partition_graphs = request.run_metadata.partition_graphs
            elif request.client_graph_def:
                partition_graphs = [request.client_graph_def]

            debug_dump = debug_data.DebugDumpDir(
                self._dump_root, partition_graphs=partition_graphs)

            if request.tf_error:
                help_intro = cli_shared.get_error_intro(request.tf_error)

                init_command = "help"
                title_color = "red_on_white"
            else:
                help_intro = None
                init_command = "lt"

                title_color = "black_on_white"
                if self._run_till_filter_pass:
                    if not debug_dump.find(
                            self._tensor_filters[self._run_till_filter_pass],
                            first_n=1):
                        # No dumped tensor passes the filter in this run. Clean up the dump
                        # directory and move on.
                        shutil.rmtree(self._dump_root)
                        return framework.OnRunEndResponse()
                    else:
                        # Some dumped tensor(s) from this run passed the filter.
                        init_command = "lt -f %s" % self._run_till_filter_pass
                        title_color = "red_on_white"
                        self._run_till_filter_pass = None

            analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

            # Supply all the available tensor filters.
            for filter_name in self._tensor_filters:
                analyzer.add_tensor_filter(filter_name,
                                           self._tensor_filters[filter_name])

            run_end_cli = curses_ui.CursesUI()
            run_end_cli.register_command_handler(
                "list_tensors",
                analyzer.list_tensors,
                analyzer.get_help("list_tensors"),
                prefix_aliases=["lt"])
            run_end_cli.register_command_handler(
                "node_info",
                analyzer.node_info,
                analyzer.get_help("node_info"),
                prefix_aliases=["ni"])
            run_end_cli.register_command_handler(
                "list_inputs",
                analyzer.list_inputs,
                analyzer.get_help("list_inputs"),
                prefix_aliases=["li"])
            run_end_cli.register_command_handler(
                "list_outputs",
                analyzer.list_outputs,
                analyzer.get_help("list_outputs"),
                prefix_aliases=["lo"])
            run_end_cli.register_command_handler(
                "print_tensor",
                analyzer.print_tensor,
                analyzer.get_help("print_tensor"),
                prefix_aliases=["pt"])

            run_end_cli.register_command_handler(
                "run",
                self._run_end_run_command_handler,
                "Helper command for incorrectly entered run command at the run-end "
                "prompt.",
                prefix_aliases=["r"])

            # Get names of all dumped tensors.
            dumped_tensor_names = []
            for datum in debug_dump.dumped_tensor_data:
                dumped_tensor_names.append(
                    "%s:%d" % (datum.node_name, datum.output_slot))

            # Tab completions for command "print_tensors".
            run_end_cli.register_tab_comp_context(["print_tensor", "pt"],
                                                  dumped_tensor_names)

            # Tab completion for commands "node_info", "list_inputs" and
            # "list_outputs". The list comprehension is used below because nodes()
            # output can be unicodes and they need to be converted to strs.
            run_end_cli.register_tab_comp_context(
                ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
                [str(node_name) for node_name in debug_dump.nodes()])
            # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
            #    completion contexts and registered command handlers.

            title = "run-end: " + self._run_description
            if help_intro:
                run_end_cli.set_help_intro(help_intro)
            run_end_cli.run_ui(init_command=init_command,
                               title=title,
                               title_color=title_color)

            # Clean up the dump directory.
            shutil.rmtree(self._dump_root)
        else:
            print(
                "No debug information to show following a non-debug run() call."
            )

        # Return placeholder response that currently holds no additional
        # information.
        return framework.OnRunEndResponse()
Пример #11
0
  def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
    """Prepare (but not launch) CLI for run-end, with debug dump from the run.

    Args:
      debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
        run.
      tf_error: (None or OpError) OpError that happened during the run() call
        (if any).
      passed_filter: (None or str) Name of the tensor filter that just passed
        and caused the preparation of this run-end CLI (if any).
    """

    if tf_error:
      help_intro = cli_shared.get_error_intro(tf_error)

      self._init_command = "help"
      self._title_color = "red_on_white"
    else:
      help_intro = None
      self._init_command = "lt"

      self._title_color = "black_on_white"
      if passed_filter is not None:
        # Some dumped tensor(s) from this run passed the filter.
        self._init_command = "lt -f %s" % passed_filter
        self._title_color = "red_on_white"

    analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

    # Supply all the available tensor filters.
    for filter_name in self._tensor_filters:
      analyzer.add_tensor_filter(filter_name,
                                 self._tensor_filters[filter_name])

    self._run_cli = curses_ui.CursesUI()
    self._run_cli.register_command_handler(
        "list_tensors",
        analyzer.list_tensors,
        analyzer.get_help("list_tensors"),
        prefix_aliases=["lt"])
    self._run_cli.register_command_handler(
        "node_info",
        analyzer.node_info,
        analyzer.get_help("node_info"),
        prefix_aliases=["ni"])
    self._run_cli.register_command_handler(
        "list_inputs",
        analyzer.list_inputs,
        analyzer.get_help("list_inputs"),
        prefix_aliases=["li"])
    self._run_cli.register_command_handler(
        "list_outputs",
        analyzer.list_outputs,
        analyzer.get_help("list_outputs"),
        prefix_aliases=["lo"])
    self._run_cli.register_command_handler(
        "print_tensor",
        analyzer.print_tensor,
        analyzer.get_help("print_tensor"),
        prefix_aliases=["pt"])

    # Get names of all dumped tensors.
    dumped_tensor_names = []
    for datum in debug_dump.dumped_tensor_data:
      dumped_tensor_names.append("%s:%d" %
                                 (datum.node_name, datum.output_slot))

    # Tab completions for command "print_tensors".
    self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
                                            dumped_tensor_names)

    # Tab completion for commands "node_info", "list_inputs" and
    # "list_outputs". The list comprehension is used below because nodes()
    # output can be unicodes and they need to be converted to strs.
    self._run_cli.register_tab_comp_context(
        ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
        [str(node_name) for node_name in debug_dump.nodes()])
    # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
    #    completion contexts and registered command handlers.

    self._title = "run-end: " + self._run_description

    if help_intro:
      self._run_cli.set_help_intro(help_intro)
Пример #12
0
    def testAddTensorFilterEmptyName(self):
        analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)

        with self.assertRaisesRegexp(
                ValueError, "Input argument filter_name cannot be empty."):
            analyzer.add_tensor_filter("", lambda datum, tensor: True)
Пример #13
0
 def testAddGetTensorFilterLambda(self):
     analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
     analyzer.add_tensor_filter("foo_filter", lambda x, y: True)
     self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))