def on_run_end(self, request):
    """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

    self._is_run_start = False
    if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
      partition_graphs = None
      if request.run_metadata and request.run_metadata.partition_graphs:
        partition_graphs = request.run_metadata.partition_graphs
      elif request.client_graph_def:
        partition_graphs = [request.client_graph_def]

      if request.tf_error and not os.path.isdir(self._dump_root):
        # It is possible that the dump root may not exist due to errors that
        # have occurred prior to graph execution (e.g., invalid device
        # assignments), in which case we will just raise the exception as the
        # unwrapped Session does.
        raise request.tf_error

      debug_dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=partition_graphs)
      debug_dump.set_python_graph(self._sess.graph)

      passed_filter = None
      if self._active_tensor_filter:
        if not debug_dump.find(
            self._tensor_filters[self._active_tensor_filter], first_n=1):
          # No dumped tensor passes the filter in this run. Clean up the dump
          # directory and move on.
          self._remove_dump_root()
          return framework.OnRunEndResponse()
        else:
          # Some dumped tensor(s) from this run passed the filter.
          passed_filter = self._active_tensor_filter
          self._active_tensor_filter = None

      self._prep_cli_for_run_end(debug_dump, request.tf_error, passed_filter)

      self._run_start_response = self._launch_cli()

      # Clean up the dump generated by this run.
      self._remove_dump_root()
    else:
      # No debug information to show following a non-debug run() call.
      self._run_start_response = None

    # Return placeholder response that currently holds no additional
    # information.
    return framework.OnRunEndResponse()
    def on_run_end(self, request):
        """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

        self._is_run_start = False
        if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
            partition_graphs = None
            if request.run_metadata and request.run_metadata.partition_graphs:
                partition_graphs = request.run_metadata.partition_graphs
            elif request.client_graph_def:
                partition_graphs = [request.client_graph_def]

            debug_dump = debug_data.DebugDumpDir(
                self._dump_root, partition_graphs=partition_graphs)
            debug_dump.set_python_graph(self._sess.graph)

            passed_filter = None
            if self._active_tensor_filter:
                if not debug_dump.find(
                        self._tensor_filters[self._active_tensor_filter],
                        first_n=1):
                    # No dumped tensor passes the filter in this run. Clean up the dump
                    # directory and move on.
                    self._remove_dump_root()
                    return framework.OnRunEndResponse()
                else:
                    # Some dumped tensor(s) from this run passed the filter.
                    passed_filter = self._active_tensor_filter
                    self._active_tensor_filter = None

            self._prep_cli_for_run_end(debug_dump, request.tf_error,
                                       passed_filter)

            self._run_start_response = self._launch_cli()

            # Clean up the dump generated by this run.
            self._remove_dump_root()
        else:
            # No debug information to show following a non-debug run() call.
            self._run_start_response = None

        # Return placeholder response that currently holds no additional
        # information.
        return framework.OnRunEndResponse()
  def on_run_end(self, request):
    """Override abstract on-run-end callback method."""

    self._obs["on_run_end_count"] += 1
    self._obs["performed_action"] = request.performed_action

    return framework.OnRunEndResponse()
Exemple #4
0
 def on_run_end(self, request):
     return framework.OnRunEndResponse()
Exemple #5
0
    def on_run_end(self, request):
        """See doc of BaseDebugWrapperSession.on_run_end."""

        return framework.OnRunEndResponse()
    def on_run_end(self, request):
        """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

        if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
            partition_graphs = None
            if request.run_metadata and request.run_metadata.partition_graphs:
                partition_graphs = request.run_metadata.partition_graphs
            elif request.client_graph_def:
                partition_graphs = [request.client_graph_def]

            debug_dump = debug_data.DebugDumpDir(
                self._dump_root, partition_graphs=partition_graphs)

            if request.tf_error:
                help_intro = cli_shared.get_error_intro(request.tf_error)

                init_command = "help"
                title_color = "red_on_white"
            else:
                help_intro = None
                init_command = "lt"

                title_color = "black_on_white"
                if self._run_till_filter_pass:
                    if not debug_dump.find(
                            self._tensor_filters[self._run_till_filter_pass],
                            first_n=1):
                        # No dumped tensor passes the filter in this run. Clean up the dump
                        # directory and move on.
                        shutil.rmtree(self._dump_root)
                        return framework.OnRunEndResponse()
                    else:
                        # Some dumped tensor(s) from this run passed the filter.
                        init_command = "lt -f %s" % self._run_till_filter_pass
                        title_color = "red_on_white"
                        self._run_till_filter_pass = None

            analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

            # Supply all the available tensor filters.
            for filter_name in self._tensor_filters:
                analyzer.add_tensor_filter(filter_name,
                                           self._tensor_filters[filter_name])

            run_end_cli = curses_ui.CursesUI()
            run_end_cli.register_command_handler(
                "list_tensors",
                analyzer.list_tensors,
                analyzer.get_help("list_tensors"),
                prefix_aliases=["lt"])
            run_end_cli.register_command_handler(
                "node_info",
                analyzer.node_info,
                analyzer.get_help("node_info"),
                prefix_aliases=["ni"])
            run_end_cli.register_command_handler(
                "list_inputs",
                analyzer.list_inputs,
                analyzer.get_help("list_inputs"),
                prefix_aliases=["li"])
            run_end_cli.register_command_handler(
                "list_outputs",
                analyzer.list_outputs,
                analyzer.get_help("list_outputs"),
                prefix_aliases=["lo"])
            run_end_cli.register_command_handler(
                "print_tensor",
                analyzer.print_tensor,
                analyzer.get_help("print_tensor"),
                prefix_aliases=["pt"])

            run_end_cli.register_command_handler(
                "run",
                self._run_end_run_command_handler,
                "Helper command for incorrectly entered run command at the run-end "
                "prompt.",
                prefix_aliases=["r"])

            # Get names of all dumped tensors.
            dumped_tensor_names = []
            for datum in debug_dump.dumped_tensor_data:
                dumped_tensor_names.append(
                    "%s:%d" % (datum.node_name, datum.output_slot))

            # Tab completions for command "print_tensors".
            run_end_cli.register_tab_comp_context(["print_tensor", "pt"],
                                                  dumped_tensor_names)

            # Tab completion for commands "node_info", "list_inputs" and
            # "list_outputs". The list comprehension is used below because nodes()
            # output can be unicodes and they need to be converted to strs.
            run_end_cli.register_tab_comp_context(
                ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
                [str(node_name) for node_name in debug_dump.nodes()])
            # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
            #    completion contexts and registered command handlers.

            title = "run-end: " + self._run_description
            if help_intro:
                run_end_cli.set_help_intro(help_intro)
            run_end_cli.run_ui(init_command=init_command,
                               title=title,
                               title_color=title_color)

            # Clean up the dump directory.
            shutil.rmtree(self._dump_root)
        else:
            print(
                "No debug information to show following a non-debug run() call."
            )

        # Return placeholder response that currently holds no additional
        # information.
        return framework.OnRunEndResponse()