예제 #1
0
def create_analyzer_curses_cli(debug_dump, tensor_filters=None):
    """Create an instance of CursesUI based on a DebugDumpDir object.

  Args:
    debug_dump: (debug_data.DebugDumpDir) The debug dump to use.
    tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor
      filter (Callable).

  Returns:
    (curses_ui.CursesUI) A curses CLI object with a set of standard analyzer
      commands and tab-completions registered.
  """

    analyzer = DebugAnalyzer(debug_dump)
    if tensor_filters:
        for tensor_filter_name in tensor_filters:
            analyzer.add_tensor_filter(tensor_filter_name,
                                       tensor_filters[tensor_filter_name])

    cli = curses_ui.CursesUI()
    cli.register_command_handler("list_tensors",
                                 analyzer.list_tensors,
                                 analyzer.get_help("list_tensors"),
                                 prefix_aliases=["lt"])
    cli.register_command_handler("node_info",
                                 analyzer.node_info,
                                 analyzer.get_help("node_info"),
                                 prefix_aliases=["ni"])
    cli.register_command_handler("list_inputs",
                                 analyzer.list_inputs,
                                 analyzer.get_help("list_inputs"),
                                 prefix_aliases=["li"])
    cli.register_command_handler("list_outputs",
                                 analyzer.list_outputs,
                                 analyzer.get_help("list_outputs"),
                                 prefix_aliases=["lo"])
    cli.register_command_handler("print_tensor",
                                 analyzer.print_tensor,
                                 analyzer.get_help("print_tensor"),
                                 prefix_aliases=["pt"])

    dumped_tensor_names = []
    for datum in debug_dump.dumped_tensor_data:
        dumped_tensor_names.append("%s:%d" %
                                   (datum.node_name, datum.output_slot))

    # Tab completions for command "print_tensors".
    cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names)

    return cli
예제 #2
0
def get_ui(ui_type,
           on_ui_exit=None,
           available_ui_types=None,
           config=None):
  """Create a `base_ui.BaseUI` subtype.

  This factory method attempts to fallback to other available ui_types on
  ImportError. For example, if `ui_type` is `curses`, but `curses` cannot be
  imported properly, e.g., on Windows, will fallback to `readline`.

  Args:
    ui_type: (`str`) requested UI type. Currently supported:
      (curses | readline)
    on_ui_exit: (`Callable`) the callback to be called when the UI exits.
    available_ui_types: (`None` or `list` of `str`) Manually-set available
      ui_types.
    config: An instance of `cli_config.CLIConfig()` carrying user-facing
      configurations.

  Returns:
    A `base_ui.BaseUI` subtype object.

  Raises:
    ValueError: on invalid ui_type or on exhausting or fallback ui_types.
  """
  if available_ui_types is None:
    available_ui_types = copy.deepcopy(SUPPORTED_UI_TYPES)

  if ui_type and (ui_type not in available_ui_types):
    raise ValueError("Invalid ui_type: '%s'" % ui_type)

  try:
    # pylint: disable=g-import-not-at-top
    if not ui_type or ui_type == "curses":
      from tensorflow.python.debug.cli import curses_ui
      return curses_ui.CursesUI(on_ui_exit=on_ui_exit, config=config)
    elif ui_type == "readline":
      from tensorflow.python.debug.cli import readline_ui
      return readline_ui.ReadlineUI(on_ui_exit=on_ui_exit, config=config)
    # pylint: enable=g-import-not-at-top
  except ImportError:
    available_ui_types.remove(ui_type)
    if not available_ui_types:
      raise ValueError("Exhausted all fallback ui_types.")
    return get_ui(available_ui_types[0],
                  available_ui_types=available_ui_types)
예제 #3
0
  def _prep_cli_for_run_start(self):
    """Prepare (but not launch) the CLI for run-start."""

    self._run_cli = curses_ui.CursesUI()

    help_intro = debugger_cli_common.RichTextLines([])
    if self._run_call_count == 1:
      # Show logo at the onset of the first run.
      help_intro.extend(cli_shared.get_tfdbg_logo())
    help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:"))
    help_intro.extend(self._run_info)

    self._run_cli.set_help_intro(help_intro)

    # Create initial screen output detailing the run.
    self._title = "run-start: " + self._run_description
    self._init_command = "help"
    self._title_color = "blue_on_white"
예제 #4
0
  def invoke_node_stepper(self,
                          node_stepper,
                          restore_variable_values_on_exit=True):
    """Overrides method in base class to implement interactive node stepper.

    Args:
      node_stepper: (stepper.NodeStepper) The underlying NodeStepper API object.
      restore_variable_values_on_exit: (bool) Whether any variables whose values
        have been altered during this node-stepper invocation should be restored
        to their old values when this invocation ends.

    Returns:
      The same return values as the `Session.run()` call on the same fetches as
        the NodeStepper.
    """

    stepper = stepper_cli.NodeStepperCLI(node_stepper)

    # On exiting the node-stepper CLI, the finalize method of the node_stepper
    # object will be called, ensuring that the state of the graph will be the
    # same as if the stepping did not happen.
    # TODO(cais): Perhaps some users will want the effect of the interactive
    # stepping and value injection to persist. When that happens, make the call
    # to finalize optional.
    stepper_ui = curses_ui.CursesUI(
        on_ui_exit=(node_stepper.restore_variable_values
                    if restore_variable_values_on_exit else None))

    stepper_ui.register_command_handler(
        "list_sorted_nodes",
        stepper.list_sorted_nodes,
        stepper.arg_parsers["list_sorted_nodes"].format_help(),
        prefix_aliases=["lt", "lsn"])
    stepper_ui.register_command_handler(
        "cont",
        stepper.cont,
        stepper.arg_parsers["cont"].format_help(),
        prefix_aliases=["ct", "c"])
    stepper_ui.register_command_handler(
        "step",
        stepper.step,
        stepper.arg_parsers["step"].format_help(),
        prefix_aliases=["st", "s"])
    stepper_ui.register_command_handler(
        "print_tensor",
        stepper.print_tensor,
        stepper.arg_parsers["print_tensor"].format_help(),
        prefix_aliases=["pt"])
    stepper_ui.register_command_handler(
        "inject_value",
        stepper.inject_value,
        stepper.arg_parsers["inject_value"].format_help(),
        prefix_aliases=["inject", "override_value", "override"])

    # Register tab completion candidates.
    stepper_ui.register_tab_comp_context([
        "cont", "ct", "c", "pt", "inject_value", "inject", "override_value",
        "override"
    ], [str(elem) for elem in node_stepper.sorted_nodes()])
    # TODO(cais): Tie up register_tab_comp_context to a single alias to shorten
    # calls like this.

    return stepper_ui.run_ui(
        init_command="lt",
        title="Node Stepper: " + self._run_description,
        title_color="blue_on_white")
예제 #5
0
    def on_run_end(self, request):
        """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

        if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
            debug_dump = debug_data.DebugDumpDir(
                self._dump_root,
                partition_graphs=request.run_metadata.partition_graphs)

            init_command = "lt"
            title_color = "green"
            if self._run_till_filter_pass:
                if not debug_dump.find(
                        self._tensor_filters[self._run_till_filter_pass],
                        first_n=1):
                    # No dumped tensor passes the filter in this run. Clean up the dump
                    # directory and move on.
                    shutil.rmtree(self._dump_root)
                    return framework.OnRunEndResponse()
                else:
                    # Some dumped tensor(s) from this run passed the filter.
                    init_command = "lt -f %s" % self._run_till_filter_pass
                    title_color = "red"
                    self._run_till_filter_pass = None

            analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

            # Supply all the available tensor filters.
            for filter_name in self._tensor_filters:
                analyzer.add_tensor_filter(filter_name,
                                           self._tensor_filters[filter_name])

            run_end_cli = curses_ui.CursesUI()
            run_end_cli.register_command_handler(
                "list_tensors",
                analyzer.list_tensors,
                analyzer.get_help("list_tensors"),
                prefix_aliases=["lt"])
            run_end_cli.register_command_handler(
                "node_info",
                analyzer.node_info,
                analyzer.get_help("node_info"),
                prefix_aliases=["ni"])
            run_end_cli.register_command_handler(
                "list_inputs",
                analyzer.list_inputs,
                analyzer.get_help("list_inputs"),
                prefix_aliases=["li"])
            run_end_cli.register_command_handler(
                "list_outputs",
                analyzer.list_outputs,
                analyzer.get_help("list_outputs"),
                prefix_aliases=["lo"])
            run_end_cli.register_command_handler(
                "print_tensor",
                analyzer.print_tensor,
                analyzer.get_help("print_tensor"),
                prefix_aliases=["pt"])

            run_end_cli.register_command_handler(
                "run",
                self._run_end_run_command_handler,
                "Helper command for incorrectly entered run command at the run-end "
                "prompt.",
                prefix_aliases=["r"])

            title = "run-end: " + self._run_description
            run_end_cli.run_ui(init_command=init_command,
                               title=title,
                               title_color=title_color)

            # Clean up the dump directory.
            shutil.rmtree(self._dump_root)
        else:
            print(
                "No debug information to show following a non-debug run() call."
            )

        # Return placeholder response that currently holds no additional
        # information.
        return framework.OnRunEndResponse()
예제 #6
0
    def on_run_start(self, request):
        """Overrides on-run-start callback.

    Invoke the CLI to let user choose what action to take:
      run / run --no_debug / step.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.

    Raises:
      RuntimeError: If user chooses to prematurely exit the debugger.
    """

        self._update_run_calls_state(request.run_call_count, request.fetches,
                                     request.feed_dict)

        if self._run_till_filter_pass:
            # If we are running till a filter passes, we just need to keep running
            # with the DEBUG_RUN option.
            return framework.OnRunStartResponse(
                framework.OnRunStartAction.DEBUG_RUN,
                self._get_run_debug_urls())

        run_start_cli = curses_ui.CursesUI()

        run_start_cli.register_command_handler(
            "run",
            self._on_run_start_run_handler,
            self._on_run_start_parsers["run"].format_help(),
            prefix_aliases=["r"])
        run_start_cli.register_command_handler(
            "invoke_stepper",
            self._on_run_start_step_handler,
            self._on_run_start_parsers["invoke_stepper"].format_help(),
            prefix_aliases=["s"])

        if isinstance(request.fetches, list) or isinstance(
                request.fetches, tuple):
            fetch_lines = [fetch.name for fetch in request.fetches]
        else:
            fetch_lines = [repr(request.fetches)]

        if not request.feed_dict:
            feed_dict_lines = ["(Empty)"]
        else:
            feed_dict_lines = []
            for feed_key in request.feed_dict:
                feed_dict_lines.append(feed_key.name)

        # TODO(cais): Refactor into its own function.
        help_intro = [
            "======================================",
            "About to enter Session run() call #%d:" % request.run_call_count,
            "", "Fetch(es):"
        ]
        help_intro.extend(["  " + line for line in fetch_lines])
        help_intro.extend(["", "Feed dict(s):"])
        help_intro.extend(["  " + line for line in feed_dict_lines])
        help_intro.extend([
            "======================================", "",
            "Select one of the following commands to proceed ---->", "  run:",
            "      Execute the run() call with the debug tensor-watching",
            "  run -n:",
            "      Execute the run() call without the debug tensor-watching",
            "  run -f <filter_name>:",
            "      Keep executing run() calls until a dumped tensor passes ",
            "      a given, registered filter emerge. Registered filter(s):"
        ])

        if self._tensor_filters:
            for filter_name in self._tensor_filters:
                help_intro.append("        * " + filter_name)
        else:
            help_intro.append("        (None)")

        help_intro.extend([
            "",
            "For more details, see help below:"
            "",
        ])
        run_start_cli.set_help_intro(help_intro)

        # Create initial screen output detailing the run.
        title = "run-start: " + self._run_description
        response = run_start_cli.run_ui(init_command="help",
                                        title=title,
                                        title_color="yellow")
        if response == debugger_cli_common.EXPLICIT_USER_EXIT:
            # Explicit user "exit" command leads to sys.exit(1).
            print("Note: user exited from debugger CLI: sys.exit(1) called.",
                  file=sys.stderr)
            sys.exit(1)

        return response
    def on_run_end(self, request):
        """Overrides on-run-end callback.

    Actions taken:
      1) Load the debug dump.
      2) Bring up the Analyzer CLI.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.
    """

        if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
            partition_graphs = None
            if request.run_metadata and request.run_metadata.partition_graphs:
                partition_graphs = request.run_metadata.partition_graphs
            elif request.client_graph_def:
                partition_graphs = [request.client_graph_def]

            debug_dump = debug_data.DebugDumpDir(
                self._dump_root, partition_graphs=partition_graphs)

            if request.tf_error:
                help_intro = cli_shared.get_error_intro(request.tf_error)

                init_command = "help"
                title_color = "red_on_white"
            else:
                help_intro = None
                init_command = "lt"

                title_color = "black_on_white"
                if self._run_till_filter_pass:
                    if not debug_dump.find(
                            self._tensor_filters[self._run_till_filter_pass],
                            first_n=1):
                        # No dumped tensor passes the filter in this run. Clean up the dump
                        # directory and move on.
                        shutil.rmtree(self._dump_root)
                        return framework.OnRunEndResponse()
                    else:
                        # Some dumped tensor(s) from this run passed the filter.
                        init_command = "lt -f %s" % self._run_till_filter_pass
                        title_color = "red_on_white"
                        self._run_till_filter_pass = None

            analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

            # Supply all the available tensor filters.
            for filter_name in self._tensor_filters:
                analyzer.add_tensor_filter(filter_name,
                                           self._tensor_filters[filter_name])

            run_end_cli = curses_ui.CursesUI()
            run_end_cli.register_command_handler(
                "list_tensors",
                analyzer.list_tensors,
                analyzer.get_help("list_tensors"),
                prefix_aliases=["lt"])
            run_end_cli.register_command_handler(
                "node_info",
                analyzer.node_info,
                analyzer.get_help("node_info"),
                prefix_aliases=["ni"])
            run_end_cli.register_command_handler(
                "list_inputs",
                analyzer.list_inputs,
                analyzer.get_help("list_inputs"),
                prefix_aliases=["li"])
            run_end_cli.register_command_handler(
                "list_outputs",
                analyzer.list_outputs,
                analyzer.get_help("list_outputs"),
                prefix_aliases=["lo"])
            run_end_cli.register_command_handler(
                "print_tensor",
                analyzer.print_tensor,
                analyzer.get_help("print_tensor"),
                prefix_aliases=["pt"])

            run_end_cli.register_command_handler(
                "run",
                self._run_end_run_command_handler,
                "Helper command for incorrectly entered run command at the run-end "
                "prompt.",
                prefix_aliases=["r"])

            # Get names of all dumped tensors.
            dumped_tensor_names = []
            for datum in debug_dump.dumped_tensor_data:
                dumped_tensor_names.append(
                    "%s:%d" % (datum.node_name, datum.output_slot))

            # Tab completions for command "print_tensors".
            run_end_cli.register_tab_comp_context(["print_tensor", "pt"],
                                                  dumped_tensor_names)

            # Tab completion for commands "node_info", "list_inputs" and
            # "list_outputs". The list comprehension is used below because nodes()
            # output can be unicodes and they need to be converted to strs.
            run_end_cli.register_tab_comp_context(
                ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
                [str(node_name) for node_name in debug_dump.nodes()])
            # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
            #    completion contexts and registered command handlers.

            title = "run-end: " + self._run_description
            if help_intro:
                run_end_cli.set_help_intro(help_intro)
            run_end_cli.run_ui(init_command=init_command,
                               title=title,
                               title_color=title_color)

            # Clean up the dump directory.
            shutil.rmtree(self._dump_root)
        else:
            print(
                "No debug information to show following a non-debug run() call."
            )

        # Return placeholder response that currently holds no additional
        # information.
        return framework.OnRunEndResponse()
    def on_run_start(self, request):
        """Overrides on-run-start callback.

    Invoke the CLI to let user choose what action to take:
      run / run --no_debug / step.

    Args:
      request: An instance of OnSessionInitRequest.

    Returns:
      An instance of OnSessionInitResponse.

    Raises:
      RuntimeError: If user chooses to prematurely exit the debugger.
    """

        self._update_run_calls_state(request.run_call_count, request.fetches,
                                     request.feed_dict)

        if self._run_till_filter_pass:
            # If we are running till a filter passes, we just need to keep running
            # with the DEBUG_RUN option.
            return framework.OnRunStartResponse(
                framework.OnRunStartAction.DEBUG_RUN,
                self._get_run_debug_urls())

        run_start_cli = curses_ui.CursesUI()

        run_start_cli.register_command_handler(
            "run",
            self._on_run_start_run_handler,
            self._on_run_start_parsers["run"].format_help(),
            prefix_aliases=["r"])
        run_start_cli.register_command_handler(
            "invoke_stepper",
            self._on_run_start_step_handler,
            self._on_run_start_parsers["invoke_stepper"].format_help(),
            prefix_aliases=["s"])

        if self._tensor_filters:
            # Register tab completion for the filter names.
            run_start_cli.register_tab_comp_context(
                ["run", "r"], list(self._tensor_filters.keys()))

        run_start_cli.set_help_intro(
            cli_shared.get_run_start_intro(request.run_call_count,
                                           request.fetches, request.feed_dict,
                                           self._tensor_filters))

        # Create initial screen output detailing the run.
        title = "run-start: " + self._run_description
        response = run_start_cli.run_ui(init_command="help",
                                        title=title,
                                        title_color="blue_on_white")
        if response == debugger_cli_common.EXPLICIT_USER_EXIT:
            # Explicit user "exit" command leads to sys.exit(1).
            print("Note: user exited from debugger CLI: Calling sys.exit(1).",
                  file=sys.stderr)
            sys.exit(1)

        return response
예제 #9
0
  def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
    """Prepare (but not launch) CLI for run-end, with debug dump from the run.

    Args:
      debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
        run.
      tf_error: (None or OpError) OpError that happened during the run() call
        (if any).
      passed_filter: (None or str) Name of the tensor filter that just passed
        and caused the preparation of this run-end CLI (if any).
    """

    if tf_error:
      help_intro = cli_shared.get_error_intro(tf_error)

      self._init_command = "help"
      self._title_color = "red_on_white"
    else:
      help_intro = None
      self._init_command = "lt"

      self._title_color = "black_on_white"
      if passed_filter is not None:
        # Some dumped tensor(s) from this run passed the filter.
        self._init_command = "lt -f %s" % passed_filter
        self._title_color = "red_on_white"

    analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

    # Supply all the available tensor filters.
    for filter_name in self._tensor_filters:
      analyzer.add_tensor_filter(filter_name,
                                 self._tensor_filters[filter_name])

    self._run_cli = curses_ui.CursesUI()
    self._run_cli.register_command_handler(
        "list_tensors",
        analyzer.list_tensors,
        analyzer.get_help("list_tensors"),
        prefix_aliases=["lt"])
    self._run_cli.register_command_handler(
        "node_info",
        analyzer.node_info,
        analyzer.get_help("node_info"),
        prefix_aliases=["ni"])
    self._run_cli.register_command_handler(
        "list_inputs",
        analyzer.list_inputs,
        analyzer.get_help("list_inputs"),
        prefix_aliases=["li"])
    self._run_cli.register_command_handler(
        "list_outputs",
        analyzer.list_outputs,
        analyzer.get_help("list_outputs"),
        prefix_aliases=["lo"])
    self._run_cli.register_command_handler(
        "print_tensor",
        analyzer.print_tensor,
        analyzer.get_help("print_tensor"),
        prefix_aliases=["pt"])

    # Get names of all dumped tensors.
    dumped_tensor_names = []
    for datum in debug_dump.dumped_tensor_data:
      dumped_tensor_names.append("%s:%d" %
                                 (datum.node_name, datum.output_slot))

    # Tab completions for command "print_tensors".
    self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
                                            dumped_tensor_names)

    # Tab completion for commands "node_info", "list_inputs" and
    # "list_outputs". The list comprehension is used below because nodes()
    # output can be unicodes and they need to be converted to strs.
    self._run_cli.register_tab_comp_context(
        ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
        [str(node_name) for node_name in debug_dump.nodes()])
    # TODO(cais): Reduce API surface area for aliases vis-a-vis tab
    #    completion contexts and registered command handlers.

    self._title = "run-end: " + self._run_description

    if help_intro:
      self._run_cli.set_help_intro(help_intro)