コード例 #1
0
    def testShapeError(self):
        tf_error = errors.OpError(None, self.var_a.initializer, "foo description", None)

        error_intro = cli_shared.get_error_intro(tf_error)

        self.assertEqual("!!! An error occurred during the run !!!", error_intro.lines[1])
        self.assertEqual([(0, len(error_intro.lines[1]), "blink")], error_intro.font_attr_segs[1])

        self.assertEqual(2, error_intro.lines[4].index("ni -a -d -t a/Assign"))
        self.assertEqual(2, error_intro.font_attr_segs[4][0][0])
        self.assertEqual(22, error_intro.font_attr_segs[4][0][1])
        self.assertEqual("ni -a -d -t a/Assign", error_intro.font_attr_segs[4][0][2][0].content)
        self.assertEqual("bold", error_intro.font_attr_segs[4][0][2][1])

        self.assertEqual(2, error_intro.lines[6].index("li -r a/Assign"))
        self.assertEqual(2, error_intro.font_attr_segs[6][0][0])
        self.assertEqual(16, error_intro.font_attr_segs[6][0][1])
        self.assertEqual("li -r a/Assign", error_intro.font_attr_segs[6][0][2][0].content)
        self.assertEqual("bold", error_intro.font_attr_segs[6][0][2][1])

        self.assertEqual(2, error_intro.lines[8].index("lt"))
        self.assertEqual(2, error_intro.font_attr_segs[8][0][0])
        self.assertEqual(4, error_intro.font_attr_segs[8][0][1])
        self.assertEqual("lt", error_intro.font_attr_segs[8][0][2][0].content)
        self.assertEqual("bold", error_intro.font_attr_segs[8][0][2][1])

        self.assertStartsWith(error_intro.lines[11], "Op name:")
        self.assertTrue(error_intro.lines[11].endswith("a/Assign"))

        self.assertStartsWith(error_intro.lines[12], "Error type:")
        self.assertTrue(error_intro.lines[12].endswith(str(type(tf_error))))

        self.assertEqual("Details:", error_intro.lines[14])
        self.assertStartsWith(error_intro.lines[15], "foo description")
コード例 #2
0
    def testShapeError(self):
        tf_error = errors.OpError(None, self.var_a.initializer,
                                  "foo description", None)

        error_intro = cli_shared.get_error_intro(tf_error)

        self.assertEqual("!!! An error occurred during the run !!!",
                         error_intro.lines[1])
        self.assertEqual([(0, len(error_intro.lines[1]), "blink")],
                         error_intro.font_attr_segs[1])

        self.assertEqual(2, error_intro.lines[4].index("ni a/Assign"))
        self.assertEqual([(2, 13, "bold")], error_intro.font_attr_segs[4])

        self.assertEqual(2, error_intro.lines[6].index("li -r a/Assign"))
        self.assertEqual([(2, 16, "bold")], error_intro.font_attr_segs[6])

        self.assertEqual(2, error_intro.lines[8].index("lt"))
        self.assertEqual([(2, 4, "bold")], error_intro.font_attr_segs[8])

        self.assertStartsWith(error_intro.lines[11], "Op name:")
        self.assertTrue(error_intro.lines[11].endswith("a/Assign"))

        self.assertStartsWith(error_intro.lines[12], "Error type:")
        self.assertTrue(error_intro.lines[12].endswith(str(type(tf_error))))

        self.assertEqual("Details:", error_intro.lines[14])
        self.assertStartsWith(error_intro.lines[15], "foo description")
コード例 #3
0
    def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
        """Prepare (but not launch) CLI for run-end, with debug dump from the run.

    Args:
      debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
        run.
      tf_error: (None or OpError) OpError that happened during the run() call
        (if any).
      passed_filter: (None or str) Name of the tensor filter that just passed
        and caused the preparation of this run-end CLI (if any).
    """

        if tf_error:
            help_intro = cli_shared.get_error_intro(tf_error)

            self._init_command = "help"
            self._title_color = "red_on_white"
        else:
            help_intro = None
            self._init_command = "lt"

            self._title_color = "black_on_white"
            if passed_filter is not None:
                # Some dumped tensor(s) from this run passed the filter.
                self._init_command = "lt -f %s" % passed_filter
                self._title_color = "red_on_white"

        self._run_cli = analyzer_cli.create_analyzer_ui(
            debug_dump,
            self._tensor_filters,
            ui_type=self._ui_type,
            on_ui_exit=self._remove_dump_root)

        # Get names of all dumped tensors.
        dumped_tensor_names = []
        for datum in debug_dump.dumped_tensor_data:
            dumped_tensor_names.append("%s:%d" %
                                       (datum.node_name, datum.output_slot))

        # Tab completions for command "print_tensors".
        self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
                                                dumped_tensor_names)

        # Tab completion for commands "node_info", "list_inputs" and
        # "list_outputs". The list comprehension is used below because nodes()
        # output can be unicodes and they need to be converted to strs.
        self._run_cli.register_tab_comp_context(
            ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
            [str(node_name) for node_name in debug_dump.nodes()])
        # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
        #    completion contexts and registered command handlers.

        self._title = "run-end: " + self._run_description

        if help_intro:
            self._run_cli.set_help_intro(help_intro)
コード例 #4
0
  def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
    """Prepare (but not launch) CLI for run-end, with debug dump from the run.

    Args:
      debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
        run.
      tf_error: (None or OpError) OpError that happened during the run() call
        (if any).
      passed_filter: (None or str) Name of the tensor filter that just passed
        and caused the preparation of this run-end CLI (if any).
    """

    if tf_error:
      help_intro = cli_shared.get_error_intro(tf_error)

      self._init_command = "help"
      self._title_color = "red_on_white"
    else:
      help_intro = None
      self._init_command = "lt"

      self._title_color = "black_on_white"
      if passed_filter is not None:
        # Some dumped tensor(s) from this run passed the filter.
        self._init_command = "lt -f %s" % passed_filter
        self._title_color = "red_on_white"

    self._run_cli = analyzer_cli.create_analyzer_ui(
        debug_dump, self._tensor_filters, ui_type=self._ui_type,
        on_ui_exit=self._remove_dump_root)

    # Get names of all dumped tensors.
    dumped_tensor_names = []
    for datum in debug_dump.dumped_tensor_data:
      dumped_tensor_names.append("%s:%d" %
                                 (datum.node_name, datum.output_slot))

    # Tab completions for command "print_tensors".
    self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
                                            dumped_tensor_names)

    # Tab completion for commands "node_info", "list_inputs" and
    # "list_outputs". The list comprehension is used below because nodes()
    # output can be unicodes and they need to be converted to strs.
    self._run_cli.register_tab_comp_context(
        ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
        [str(node_name) for node_name in debug_dump.nodes()])
    # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
    #    completion contexts and registered command handlers.

    self._title = "run-end: " + self._run_description

    if help_intro:
      self._run_cli.set_help_intro(help_intro)
コード例 #5
0
    def on_run_end(self, request):
        """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

        if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
            partition_graphs = None
            if request.run_metadata and request.run_metadata.partition_graphs:
                partition_graphs = request.run_metadata.partition_graphs
            elif request.client_graph_def:
                partition_graphs = [request.client_graph_def]

            debug_dump = debug_data.DebugDumpDir(self._dump_root, partition_graphs=partition_graphs)

            if request.tf_error:
                help_intro = cli_shared.get_error_intro(request.tf_error)

                init_command = "help"
                title_color = "red_on_white"
            else:
                help_intro = None
                init_command = "lt"

                title_color = "black_on_white"
                if self._run_till_filter_pass:
                    if not debug_dump.find(self._tensor_filters[self._run_till_filter_pass], first_n=1):
                        # No dumped tensor passes the filter in this run. Clean up the dump
                        # directory and move on.
                        shutil.rmtree(self._dump_root)
                        return framework.OnRunEndResponse()
                    else:
                        # Some dumped tensor(s) from this run passed the filter.
                        init_command = "lt -f %s" % self._run_till_filter_pass
                        title_color = "red_on_white"
                        self._run_till_filter_pass = None

            analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

            # Supply all the available tensor filters.
            for filter_name in self._tensor_filters:
                analyzer.add_tensor_filter(filter_name, self._tensor_filters[filter_name])

            run_end_cli = curses_ui.CursesUI()
            run_end_cli.register_command_handler(
                "list_tensors", analyzer.list_tensors, analyzer.get_help("list_tensors"), prefix_aliases=["lt"]
            )
            run_end_cli.register_command_handler(
                "node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"]
            )
            run_end_cli.register_command_handler(
                "list_inputs", analyzer.list_inputs, analyzer.get_help("list_inputs"), prefix_aliases=["li"]
            )
            run_end_cli.register_command_handler(
                "list_outputs", analyzer.list_outputs, analyzer.get_help("list_outputs"), prefix_aliases=["lo"]
            )
            run_end_cli.register_command_handler(
                "print_tensor", analyzer.print_tensor, analyzer.get_help("print_tensor"), prefix_aliases=["pt"]
            )

            run_end_cli.register_command_handler(
                "run",
                self._run_end_run_command_handler,
                "Helper command for incorrectly entered run command at the run-end " "prompt.",
                prefix_aliases=["r"],
            )

            # Get names of all dumped tensors.
            dumped_tensor_names = []
            for datum in debug_dump.dumped_tensor_data:
                dumped_tensor_names.append("%s:%d" % (datum.node_name, datum.output_slot))

            # Tab completions for command "print_tensors".
            run_end_cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names)

            # Tab completion for commands "node_info", "list_inputs" and
            # "list_outputs". The list comprehension is used below because nodes()
            # output can be unicodes and they need to be converted to strs.
            run_end_cli.register_tab_comp_context(
                ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
                [str(node_name) for node_name in debug_dump.nodes()],
            )
            # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
            #    completion contexts and registered command handlers.

            title = "run-end: " + self._run_description
            if help_intro:
                run_end_cli.set_help_intro(help_intro)
            run_end_cli.run_ui(init_command=init_command, title=title, title_color=title_color)

            # Clean up the dump directory.
            shutil.rmtree(self._dump_root)
        else:
            print("No debug information to show following a non-debug run() call.")

        # Return placeholder response that currently holds no additional
        # information.
        return framework.OnRunEndResponse()
コード例 #6
0
 def testGetErrorIntroForNoOpName(self):
   tf_error = errors.OpError(None, None, "Fake OpError", -1)
   error_intro = cli_shared.get_error_intro(tf_error)
   self.assertIn("Cannot determine the name of the op", error_intro.lines[3])
コード例 #7
0
 def testGetErrorIntroForNoOpName(self):
     tf_error = errors.OpError(None, None, "Fake OpError", -1)
     error_intro = cli_shared.get_error_intro(tf_error)
     self.assertIn("Cannot determine the name of the op",
                   error_intro.lines[3])
コード例 #8
0
    def on_run_end(self, request):
        """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

        if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
            partition_graphs = None
            if request.run_metadata and request.run_metadata.partition_graphs:
                partition_graphs = request.run_metadata.partition_graphs
            elif request.client_graph_def:
                partition_graphs = [request.client_graph_def]

            debug_dump = debug_data.DebugDumpDir(
                self._dump_root, partition_graphs=partition_graphs)

            if request.tf_error:
                help_intro = cli_shared.get_error_intro(request.tf_error)

                init_command = "help"
                title_color = "red_on_white"
            else:
                help_intro = None
                init_command = "lt"

                title_color = "black_on_white"
                if self._run_till_filter_pass:
                    if not debug_dump.find(
                            self._tensor_filters[self._run_till_filter_pass],
                            first_n=1):
                        # No dumped tensor passes the filter in this run. Clean up the dump
                        # directory and move on.
                        shutil.rmtree(self._dump_root)
                        return framework.OnRunEndResponse()
                    else:
                        # Some dumped tensor(s) from this run passed the filter.
                        init_command = "lt -f %s" % self._run_till_filter_pass
                        title_color = "red_on_white"
                        self._run_till_filter_pass = None

            analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

            # Supply all the available tensor filters.
            for filter_name in self._tensor_filters:
                analyzer.add_tensor_filter(filter_name,
                                           self._tensor_filters[filter_name])

            run_end_cli = curses_ui.CursesUI()
            run_end_cli.register_command_handler(
                "list_tensors",
                analyzer.list_tensors,
                analyzer.get_help("list_tensors"),
                prefix_aliases=["lt"])
            run_end_cli.register_command_handler(
                "node_info",
                analyzer.node_info,
                analyzer.get_help("node_info"),
                prefix_aliases=["ni"])
            run_end_cli.register_command_handler(
                "list_inputs",
                analyzer.list_inputs,
                analyzer.get_help("list_inputs"),
                prefix_aliases=["li"])
            run_end_cli.register_command_handler(
                "list_outputs",
                analyzer.list_outputs,
                analyzer.get_help("list_outputs"),
                prefix_aliases=["lo"])
            run_end_cli.register_command_handler(
                "print_tensor",
                analyzer.print_tensor,
                analyzer.get_help("print_tensor"),
                prefix_aliases=["pt"])

            run_end_cli.register_command_handler(
                "run",
                self._run_end_run_command_handler,
                "Helper command for incorrectly entered run command at the run-end "
                "prompt.",
                prefix_aliases=["r"])

            # Get names of all dumped tensors.
            dumped_tensor_names = []
            for datum in debug_dump.dumped_tensor_data:
                dumped_tensor_names.append(
                    "%s:%d" % (datum.node_name, datum.output_slot))

            # Tab completions for command "print_tensors".
            run_end_cli.register_tab_comp_context(["print_tensor", "pt"],
                                                  dumped_tensor_names)

            # Tab completion for commands "node_info", "list_inputs" and
            # "list_outputs". The list comprehension is used below because nodes()
            # output can be unicodes and they need to be converted to strs.
            run_end_cli.register_tab_comp_context(
                ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
                [str(node_name) for node_name in debug_dump.nodes()])
            # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
            #    completion contexts and registered command handlers.

            title = "run-end: " + self._run_description
            if help_intro:
                run_end_cli.set_help_intro(help_intro)
            run_end_cli.run_ui(init_command=init_command,
                               title=title,
                               title_color=title_color)

            # Clean up the dump directory.
            shutil.rmtree(self._dump_root)
        else:
            print(
                "No debug information to show following a non-debug run() call."
            )

        # Return placeholder response that currently holds no additional
        # information.
        return framework.OnRunEndResponse()