Exemple #1
0
    def step(self, args, screen_info=None):
        """Step once.

        Args:
          args: (list of str) command-line arguments for the "step" command.
          screen_info: Information about screen.

        Returns:
          (RichTextLines) Screen output for the result of the stepping action.
        """

        parsed = self.arg_parsers["step"].parse_args(args)

        if parsed.num_times < 0:
            return debugger_cli_common.RichTextLines(
                "ERROR: Invalid number of times to step: %d" %
                parsed.num_times)

        for _ in xrange(parsed.num_times):
            if self._next >= len(self._sorted_nodes):
                return debugger_cli_common.RichTextLines(
                    "ERROR: Cannot step any further because the end of the sorted "
                    "transitive closure has been reached.")
            else:
                screen_output = self.cont([self._sorted_nodes[self._next]],
                                          screen_info)

        return screen_output
Exemple #2
0
    def cont(self, args, screen_info=None):
        """Continue-to action on the graph."""

        _ = screen_info

        parsed = self.arg_parsers["cont"].parse_args(args)

        # Determine which node is being continued to, so the _next pointer can be
        # set properly.
        node_name = parsed.target_name.split(":")[0]
        if node_name not in self._sorted_nodes:
            return cli_shared.error(self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] %
                                    parsed.target_name)
        self._next = self._sorted_nodes.index(node_name)

        cont_result = self._node_stepper.cont(
            parsed.target_name,
            invalidate_from_updated_variables=(
                parsed.invalidate_from_updated_variables),
            restore_variable_values=parsed.restore_variable_values)
        self._completed_nodes.add(parsed.target_name.split(":")[0])

        screen_output = debugger_cli_common.RichTextLines(
            ["Continued to %s:" % parsed.target_name, ""])
        screen_output.extend(self._report_last_input_types())
        screen_output.extend(self._report_last_updated())
        screen_output.extend(
            tensor_format.format_tensor(cont_result,
                                        parsed.target_name,
                                        include_metadata=True))

        # Generate windowed view of the sorted transitive closure on which the
        # stepping is occurring.
        lower_bound = max(0, self._next - 2)
        upper_bound = min(len(self._sorted_nodes), self._next + 3)

        final_output = self.list_sorted_nodes(
            ["-l", str(lower_bound), "-u",
             str(upper_bound)])
        final_output.extend(debugger_cli_common.RichTextLines([""]))
        final_output.extend(screen_output)

        # Re-calculate the target of the next "step" action.
        self._calculate_next()

        return final_output
Exemple #3
0
    def _prep_cli_for_run_start(self):
        """Prepare (but not launch) the CLI for run-start."""

        self._run_cli = ui_factory.get_ui(self._ui_type)

        help_intro = debugger_cli_common.RichTextLines([])
        if self._run_call_count == 1:
            # Show logo at the onset of the first run.
            help_intro.extend(cli_shared.get_tvmdbg_logo())
        help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:"))
        help_intro.extend(self._run_info)

        self._run_cli.set_help_intro(help_intro)

        # Create initial screen output detailing the run.
        self._title = "run-start: " + self._run_description
        self._init_command = "run_info"
        self._title_color = "blue_on_white"
Exemple #4
0
    def inject_value(self, args, screen_info=None):
        """Inject value to a given tensor.

        Args:
          args: (list of str) command-line arguments for the "step" command.
          screen_info: Information about screen.

        Returns:
          (RichTextLines) Screen output for the result of the stepping action.
        """

        _ = screen_info  # Currently unused.

        if screen_info and "cols" in screen_info:
            np_printoptions = {"linewidth": screen_info["cols"]}
        else:
            np_printoptions = {}

        parsed = self.arg_parsers["inject_value"].parse_args(args)

        tensor_names = self._resolve_tensor_names(parsed.tensor_name)
        if not tensor_names:
            return cli_shared.error(self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] %
                                    parsed.tensor_name)
        elif len(tensor_names) > 1:
            return cli_shared.error(
                self._MESSAGE_TEMPLATES["MULTIPLE_TENSORS"] %
                parsed.tensor_name)
        else:
            tensor_name = tensor_names[0]

        tensor_value = eval(parsed.tensor_value_str)  # pylint: disable=eval-used

        try:
            self._node_stepper.override_tensor(tensor_name, tensor_value)
            lines = [
                "Injected value \"%s\"" % parsed.tensor_value_str,
                "  to tensor \"%s\":" % tensor_name, ""
            ]

            tensor_lines = tensor_format.format_tensor(
                tensor_value,
                tensor_name,
                include_metadata=True,
                np_printoptions=np_printoptions).lines
            lines.extend(tensor_lines)

        except ValueError:
            lines = [
                "ERROR: Failed to inject value to tensor %s" %
                parsed.tensor_name
            ]

        return debugger_cli_common.RichTextLines(lines)
def get_error_intro(tvm_error):
    """Generate formatted intro for TVM run-time error.

    Args:
      tvm_error: (errors.OpError) TVM run-time error object.

    Returns:
      (RichTextLines) Formatted intro message about the run-time OpError, with
        sample commands for debugging.
    """

    op_name = tvm_error.op.name

    intro_lines = [
        "--------------------------------------",
        RL("!!! An error occurred during the run !!!", "blink"),
        "",
        "You may use the following commands to debug:",
    ]

    out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)

    out.extend(
        _recommend_command("ni -a -d -t %s" % op_name,
                           "Inspect information about the failing op.",
                           create_link=True))
    out.extend(
        _recommend_command("li -r %s" % op_name,
                           "List inputs to the failing op, recursively.",
                           create_link=True))

    out.extend(
        _recommend_command(
            "lt",
            "List all tensors dumped during the failing run() call.",
            create_link=True))

    more_lines = [
        "",
        "Op name:    " + op_name,
        "Error type: " + str(type(tvm_error)),
        "",
        "Details:",
        str(tvm_error),
        "",
        "WARNING: Using client GraphDef due to the error, instead of "
        "executor GraphDefs.",
        "--------------------------------------",
        "",
    ]

    out.extend(debugger_cli_common.RichTextLines(more_lines))

    return out
def get_tvmdbg_logo():
    """Make an ASCII representation of the tvmdbg logo."""

    lines = [
        "",
        " TTTTTTTT V     V MM   MM DDDD  BBBB   GGGG ",
        "    TT    V     V M M M M D   D B   B G    ",
        "    TT     V   V  M  M  M D   D BBBB  G   GG",
        "    TT      V V   M     M D   D B   B G    G",
        "    TT       V    M     M DDDD  BBBB   GGGG ",
        "",
    ]
    return debugger_cli_common.RichTextLines(lines)
Exemple #7
0
    def _run_handler(self, args, screen_info=None):
        """Command handler for "run" command during on-run-start."""

        del screen_info  # Currently unused.

        parsed = self._argparsers["run"].parse_args(args)
        parsed.node_name_filter = parsed.node_name_filter or None
        parsed.op_type_filter = parsed.op_type_filter or None
        parsed.tensor_dtype_filter = parsed.tensor_dtype_filter or None

        if parsed.profile:
            raise debugger_cli_common.CommandLineExit(
                exit_token=framework.OnRunStartResponse(
                    framework.OnRunStartAction.PROFILE_RUN, []))

        self._skip_debug = parsed.no_debug
        self._run_through_times = parsed.times

        if parsed.times > 1 or parsed.no_debug:
            # If requested -t times > 1, the very next run will be a non-debug run.
            action = framework.OnRunStartAction.NON_DEBUG_RUN
            debug_urls = []
        else:
            action = framework.OnRunStartAction.DEBUG_RUN
            debug_urls = self._get_run_debug_urls()
        run_start_response = framework.OnRunStartResponse(
            action,
            debug_urls,
            node_name_regex_whitelist=parsed.node_name_filter,
            op_type_regex_whitelist=parsed.op_type_filter,
            tensor_dtype_regex_whitelist=parsed.tensor_dtype_filter)

        if parsed.till_filter_pass:
            # For the run-till-filter-pass (run -f) mode, use the DEBUG_RUN
            # option to access the intermediate tensors, and set the corresponding
            # state flag of the class itself to True.
            if parsed.till_filter_pass in self._tensor_filters:
                action = framework.OnRunStartAction.DEBUG_RUN
                self._active_tensor_filter = parsed.till_filter_pass
                self._active_tensor_filter_run_start_response = run_start_response
            else:
                # Handle invalid filter name.
                return debugger_cli_common.RichTextLines([
                    "ERROR: tensor filter \"%s\" does not exist." %
                    parsed.till_filter_pass
                ])

        # Raise CommandLineExit exception to cause the CLI to exit.
        raise debugger_cli_common.CommandLineExit(
            exit_token=run_start_response)
Exemple #8
0
    def _report_last_updated(self):
        """Generate a report of the variables updated in the last cont/step call.

        Returns:
          (debugger_cli_common.RichTextLines) A RichTextLines representation of the
            variables updated in the last cont/step call.
        """

        last_updated = self._node_stepper.last_updated()
        if not last_updated:
            return debugger_cli_common.RichTextLines([])

        rich_lines = [RL("Updated:", self._UPDATED_ATTRIBUTE)]
        sorted_last_updated = sorted(list(last_updated))
        for updated in sorted_last_updated:
            rich_lines.append("  %s" % updated)
        rich_lines.append("")
        return debugger_cli_common.rich_text_lines_from_rich_line_list(rich_lines)
Exemple #9
0
    def _run_info_handler(self, args, screen_info=None):
        _ = args  # Currently unused.
        _ = screen_info  # Currently unused.
        output = debugger_cli_common.RichTextLines([])

        if self._run_call_count == 1:
            output.extend(cli_shared.get_tvmdbg_logo())
        output.extend(self._run_info)

        if (not self._is_run_start
                and debugger_cli_common.MAIN_MENU_KEY in output.annotations):
            menu = output.annotations[debugger_cli_common.MAIN_MENU_KEY]
            if "list_tensors" not in menu.captions():
                menu.insert(
                    0,
                    debugger_cli_common.MenuItem("list_tensors",
                                                 "list_tensors"))

        return output
Exemple #10
0
    def print_tensor(self, args, screen_info=None):
        """Print the value of a tensor that the stepper has access to."""

        parsed = self.arg_parsers["print_tensor"].parse_args(args)

        if screen_info and "cols" in screen_info:
            np_printoptions = {"linewidth": screen_info["cols"]}
        else:
            np_printoptions = {}

        # Determine if any range-highlighting is required.
        highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

        tensor_name, tensor_slicing = (
            command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

        tensor_names = self._resolve_tensor_names(tensor_name)
        if not tensor_names:
            return cli_shared.error(
                self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] % tensor_name)
        elif len(tensor_names) > 1:
            return cli_shared.error(
                self._MESSAGE_TEMPLATES["MULTIPLE_TENSORS"] % tensor_name)
        else:
            tensor_name = tensor_names[0]

        try:
            tensor_value = self._node_stepper.get_tensor_value(tensor_name)
        except ValueError as ex:
            return debugger_cli_common.RichTextLines([str(ex)])

        return cli_shared.format_tensor(
            tensor_value,
            tensor_name,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)
    def print_source(self, args, screen_info=None):
        """Print a Python source file with line-level profile information.

        Args:
          args: Command-line arguments, excluding the command prefix, as a list of
            str.
          screen_info: Optional dict input containing screen information such as
            cols.

        Returns:
          Output text lines as a RichTextLines object.
        """
        del screen_info

        parsed = self._arg_parsers["print_source"].parse_args(args)

        device_name_regex = (re.compile(parsed.device_name_filter)
                             if parsed.device_name_filter else None)

        profile_data = []
        data_generator = self._get_profile_data_generator()
        device_count = len(self._run_metadata.step_stats.dev_stats)
        for index in range(device_count):
            device_stats = self._run_metadata.step_stats.dev_stats[index]
            if device_name_regex and not device_name_regex.match(
                    device_stats.device):
                continue
            profile_data.extend(
                [datum for datum in data_generator(device_stats)])

        source_annotation = source_utils.annotate_source_against_profile(
            profile_data,
            os.path.expanduser(parsed.source_file_path),
            node_name_filter=parsed.node_name_filter,
            op_type_filter=parsed.op_type_filter)
        if not source_annotation:
            return debugger_cli_common.RichTextLines([
                "The source file %s does not contain any profile information for "
                "the previous Session run under the following "
                "filters:" % parsed.source_file_path,
                "  --%s: %s" %
                (_DEVICE_NAME_FILTER_FLAG, parsed.device_name_filter),
                "  --%s: %s" %
                (_NODE_NAME_FILTER_FLAG, parsed.node_name_filter),
                "  --%s: %s" % (_OP_TYPE_FILTER_FLAG, parsed.op_type_filter)
            ])

        max_total_cost = 0
        for line_index in source_annotation:
            total_cost = _get_total_cost(source_annotation[line_index],
                                         parsed.cost_type)
            max_total_cost = max(max_total_cost, total_cost)

        source_lines, line_num_width = source_utils.load_source(
            parsed.source_file_path)

        cost_bar_max_length = 10
        total_cost_head = parsed.cost_type
        column_widths = {
            "cost_bar": cost_bar_max_length + 3,
            "total_cost": len(total_cost_head) + 3,
            "num_nodes_execs": len(self._NUM_EXECS_SUB_HEAD) + 1,
            "line_number": line_num_width,
        }

        head = RL(
            " " * column_widths["cost_bar"] + total_cost_head + " " *
            (column_widths["total_cost"] - len(total_cost_head)) +
            self._NUM_NODES_HEAD + " " *
            (column_widths["num_nodes_execs"] - len(self._NUM_NODES_HEAD)),
            font_attr=self._LINE_COST_ATTR)
        head += RL(self._LINENO_HEAD, font_attr=self._LINE_NUM_ATTR)
        sub_head = RL(
            " " * (column_widths["cost_bar"] + column_widths["total_cost"]) +
            self._NUM_EXECS_SUB_HEAD + " " *
            (column_widths["num_nodes_execs"] - len(self._NUM_EXECS_SUB_HEAD))
            + " " * column_widths["line_number"],
            font_attr=self._LINE_COST_ATTR)
        sub_head += RL(self._SOURCE_HEAD, font_attr="bold")
        lines = [head, sub_head]

        output_annotations = {}
        for i, line in enumerate(source_lines):
            lineno = i + 1
            if lineno in source_annotation:
                annotation = source_annotation[lineno]
                cost_bar = self._render_normalized_cost_bar(
                    _get_total_cost(annotation, parsed.cost_type),
                    max_total_cost, cost_bar_max_length)
                annotated_line = cost_bar
                annotated_line += " " * (column_widths["cost_bar"] -
                                         len(cost_bar))

                total_cost = RL(cli_shared.time_to_readable_str(
                    _get_total_cost(annotation, parsed.cost_type),
                    force_time_unit=parsed.time_unit),
                                font_attr=self._LINE_COST_ATTR)
                total_cost += " " * (column_widths["total_cost"] -
                                     len(total_cost))
                annotated_line += total_cost

                file_path_filter = re.escape(parsed.source_file_path) + "$"
                command = "lp --file_path_filter %s --min_lineno %d --max_lineno %d" % (
                    file_path_filter, lineno, lineno + 1)
                if parsed.device_name_filter:
                    command += " --%s %s" % (_DEVICE_NAME_FILTER_FLAG,
                                             parsed.device_name_filter)
                if parsed.node_name_filter:
                    command += " --%s %s" % (_NODE_NAME_FILTER_FLAG,
                                             parsed.node_name_filter)
                if parsed.op_type_filter:
                    command += " --%s %s" % (_OP_TYPE_FILTER_FLAG,
                                             parsed.op_type_filter)
                menu_item = debugger_cli_common.MenuItem(None, command)
                num_nodes_execs = RL(
                    "%d(%d)" %
                    (annotation.node_count, annotation.node_exec_count),
                    font_attr=[self._LINE_COST_ATTR, menu_item])
                num_nodes_execs += " " * (column_widths["num_nodes_execs"] -
                                          len(num_nodes_execs))
                annotated_line += num_nodes_execs
            else:
                annotated_line = RL(" " * sum(column_widths[col_name]
                                              for col_name in column_widths
                                              if col_name != "line_number"))

            line_num_column = RL(" L%d" % (lineno), self._LINE_NUM_ATTR)
            line_num_column += " " * (column_widths["line_number"] -
                                      len(line_num_column))
            annotated_line += line_num_column
            annotated_line += line
            lines.append(annotated_line)

            if parsed.init_line == lineno:
                output_annotations[
                    debugger_cli_common.INIT_SCROLL_POS_KEY] = len(lines) - 1

        return debugger_cli_common.rich_text_lines_from_rich_line_list(
            lines, annotations=output_annotations)
    def list_profile(self, args, screen_info=None):
        """Command handler for list_profile.

        List per-operation profile information.

        Args:
          args: Command-line arguments, excluding the command prefix, as a list of
            str.
          screen_info: Optional dict input containing screen information such as
            cols.

        Returns:
          Output text lines as a RichTextLines object.
        """
        screen_cols = 80
        if screen_info and "cols" in screen_info:
            screen_cols = screen_info["cols"]

        parsed = self._arg_parsers["list_profile"].parse_args(args)
        op_time_interval = (command_parser.parse_time_interval(parsed.op_time)
                            if parsed.op_time else None)
        exec_time_interval = (command_parser.parse_time_interval(
            parsed.execution_time) if parsed.execution_time else None)
        node_name_regex = (re.compile(parsed.node_name_filter)
                           if parsed.node_name_filter else None)
        file_path_regex = (re.compile(parsed.file_path_filter)
                           if parsed.file_path_filter else None)
        op_type_regex = (re.compile(parsed.op_type_filter)
                         if parsed.op_type_filter else None)

        output = debugger_cli_common.RichTextLines([""])
        device_name_regex = (re.compile(parsed.device_name_filter)
                             if parsed.device_name_filter else None)
        data_generator = self._get_profile_data_generator()
        device_count = len(self._run_metadata.step_stats.dev_stats)
        for index in range(device_count):
            device_stats = self._run_metadata.step_stats.dev_stats[index]
            if not device_name_regex or device_name_regex.match(
                    device_stats.device):
                profile_data = [
                    datum for datum in data_generator(device_stats)
                    if _list_profile_filter(datum,
                                            node_name_regex,
                                            file_path_regex,
                                            op_type_regex,
                                            op_time_interval,
                                            exec_time_interval,
                                            min_lineno=parsed.min_lineno,
                                            max_lineno=parsed.max_lineno)
                ]
                profile_data = sorted(profile_data,
                                      key=lambda datum: _list_profile_sort_key(
                                          datum, parsed.sort_by),
                                      reverse=parsed.reverse)
                output.extend(
                    _get_list_profile_lines(
                        device_stats.device,
                        index,
                        device_count,
                        profile_data,
                        parsed.sort_by,
                        parsed.reverse,
                        parsed.time_unit,
                        device_name_filter=parsed.device_name_filter,
                        node_name_filter=parsed.node_name_filter,
                        op_type_filter=parsed.op_type_filter,
                        screen_cols=screen_cols))
        return output
Exemple #13
0
def format_tensor(tensor,
                  tensor_label,
                  include_metadata=False,
                  auxiliary_message=None,
                  include_numeric_summary=False,
                  np_printoptions=None,
                  highlight_options=None):
    """Generate a RichTextLines object showing a tensor in formatted style.

    Args:
      tensor: The tensor to be displayed, as a numpy ndarray or other
        appropriate format (e.g., None representing uninitialized tensors).
      tensor_label: A label for the tensor, as a string. If set to None, will
        suppress the tensor name line in the return value.
      include_metadata: Whether metadata such as dtype and shape are to be
        included in the formatted text.
      auxiliary_message: An auxiliary message to display under the tensor label,
        dtype and shape information lines.
      include_numeric_summary: Whether a text summary of the numeric values (if
        applicable) will be included.
      np_printoptions: A dictionary of keyword arguments that are passed to a
        call of np.set_printoptions() to set the text format for display numpy
        ndarrays.
      highlight_options: (HighlightOptions) options for highlighting elements
        of the tensor.

    Returns:
      A RichTextLines object. Its annotation field has line-by-line markups to
      indicate which indices in the array the first element of each line
      corresponds to.
    """
    lines = []
    font_attr_segs = {}

    if tensor_label is not None:
        lines.append("Tensor \"%s\":" % tensor_label)
        suffix = tensor_label.split(":")[-1]
        if suffix.isdigit():
            # Suffix is a number. Assume it is the output slot index.
            font_attr_segs[0] = [(8, 8 + len(tensor_label), "bold")]
        else:
            # Suffix is not a number. It is auxiliary information such as the debug
            # op type. In this case, highlight the suffix with a different color.
            debug_op_len = len(suffix)
            proper_len = len(tensor_label) - debug_op_len - 1
            font_attr_segs[0] = [(8, 8 + proper_len, "bold"),
                                 (8 + proper_len + 1,
                                  8 + proper_len + 1 + debug_op_len, "yellow")]

    if isinstance(tensor, debug_data.InconvertibleTensorProto):
        if lines:
            lines.append("")
        lines.extend(str(tensor).split("\n"))
        return debugger_cli_common.RichTextLines(lines)
    elif not isinstance(tensor, np.ndarray):
        # If tensor is not a np.ndarray, return simple text-line representation of
        # the object without annotations.
        if lines:
            lines.append("")
        lines.extend(repr(tensor).split("\n"))
        return debugger_cli_common.RichTextLines(lines)

    if include_metadata:
        lines.append("  dtype: %s" % str(tensor.dtype))
        lines.append("  shape: %s" % str(tensor.shape).replace("L", ""))

    if lines:
        lines.append("")
    formatted = debugger_cli_common.RichTextLines(
        lines, font_attr_segs=font_attr_segs)

    if auxiliary_message:
        formatted.extend(auxiliary_message)

    if include_numeric_summary:
        formatted.append("Numeric summary:")
        formatted.extend(numeric_summary(tensor))
        formatted.append("")

    # Apply custom string formatting options for numpy ndarray.
    if np_printoptions is not None:
        np.set_printoptions(**np_printoptions)

    array_lines = repr(tensor).split("\n")
    if tensor.dtype.type is not np.string_:
        # Parse array lines to get beginning indices for each line.

        # TODO(cais): Currently, we do not annotate string-type tensors due to
        #   difficulty in escaping sequences. Address this issue.
        annotations = _annotate_ndarray_lines(array_lines,
                                              tensor,
                                              np_printoptions=np_printoptions)
    else:
        annotations = None
    formatted_array = debugger_cli_common.RichTextLines(
        array_lines, annotations=annotations)
    formatted.extend(formatted_array)

    # Perform optional highlighting.
    if highlight_options is not None:
        indices_list = list(np.argwhere(highlight_options.criterion(tensor)))

        total_elements = np.size(tensor)
        highlight_summary = "Highlighted%s: %d of %d element(s) (%.2f%%)" % (
            "(%s)" % highlight_options.description
            if highlight_options.description else "", len(indices_list),
            total_elements, len(indices_list) / float(total_elements) * 100.0)

        formatted.lines[0] += " " + highlight_summary

        if indices_list:
            indices_list = [list(indices) for indices in indices_list]

            are_omitted, rows, start_cols, end_cols = locate_tensor_element(
                formatted, indices_list)
            for is_omitted, row, start_col, end_col in zip(
                    are_omitted, rows, start_cols, end_cols):
                if is_omitted or start_col is None or end_col is None:
                    continue

                if row in formatted.font_attr_segs:
                    formatted.font_attr_segs[row].append(
                        (start_col, end_col, highlight_options.font_attr))
                else:
                    formatted.font_attr_segs[row] = [
                        (start_col, end_col, highlight_options.font_attr)
                    ]

    return formatted
Exemple #14
0
def numeric_summary(tensor):
    """Get a text summary of a numeric tensor.

    This summary is only available for numeric (int*, float*, complex*) and
    Boolean tensors.

    Args:
      tensor: (`numpy.ndarray`) the tensor value object to be summarized.

    Returns:
      The summary text as a `RichTextLines` object. If the type of `tensor` is not
      numeric or Boolean, a single-line `RichTextLines` object containing a
      warning message will reflect that.
    """
    def _counts_summary(counts, skip_zeros=True, total_count=None):
        """Format values as a two-row table."""
        if skip_zeros:
            counts = [(count_key, count_val) for count_key, count_val in counts
                      if count_val]
        max_common_len = 0
        for count_key, count_val in counts:
            count_val_str = str(count_val)
            common_len = max(len(count_key) + 1, len(count_val_str) + 1)
            max_common_len = max(common_len, max_common_len)

        key_line = debugger_cli_common.RichLine("|")
        val_line = debugger_cli_common.RichLine("|")
        for count_key, count_val in counts:
            count_val_str = str(count_val)
            key_line += _pad_string_to_length(count_key, max_common_len)
            val_line += _pad_string_to_length(count_val_str, max_common_len)
        key_line += " |"
        val_line += " |"

        if total_count is not None:
            total_key_str = "total"
            total_val_str = str(total_count)
            max_common_len = max(len(total_key_str) + 1, len(total_val_str))
            total_key_str = _pad_string_to_length(total_key_str,
                                                  max_common_len)
            total_val_str = _pad_string_to_length(total_val_str,
                                                  max_common_len)
            key_line += total_key_str + " |"
            val_line += total_val_str + " |"

        return debugger_cli_common.rich_text_lines_from_rich_line_list(
            [key_line, val_line])

    if not isinstance(tensor, np.ndarray) or not np.size(tensor):
        return debugger_cli_common.RichTextLines(
            ["No numeric summary available due to empty tensor."])
    elif (np.issubdtype(tensor.dtype, np.floating)
          or np.issubdtype(tensor.dtype, np.complex)
          or np.issubdtype(tensor.dtype, np.integer)):
        counts = [("nan", np.sum(np.isnan(tensor))),
                  ("-inf", np.sum(np.isneginf(tensor))),
                  ("-",
                   np.sum(
                       np.logical_and(tensor < 0.0,
                                      np.logical_not(np.isneginf(tensor))))),
                  ("0", np.sum(tensor == 0.0)),
                  ("+",
                   np.sum(
                       np.logical_and(tensor > 0.0,
                                      np.logical_not(np.isposinf(tensor))))),
                  ("+inf", np.sum(np.isposinf(tensor)))]
        output = _counts_summary(counts, total_count=np.size(tensor))

        valid_array = tensor[np.logical_not(
            np.logical_or(np.isinf(tensor), np.isnan(tensor)))]
        if np.size(valid_array):
            stats = [("min", np.min(valid_array)),
                     ("max", np.max(valid_array)),
                     ("mean", np.mean(valid_array)),
                     ("std", np.std(valid_array))]
            output.extend(_counts_summary(stats, skip_zeros=False))
        return output
    elif tensor.dtype == np.bool:
        counts = [
            ("False", np.sum(tensor == 0)),
            ("True", np.sum(tensor > 0)),
        ]
        return _counts_summary(counts, total_count=np.size(tensor))
    else:
        return debugger_cli_common.RichTextLines([
            "No numeric summary available due to tensor dtype: %s." %
            tensor.dtype
        ])
def get_run_start_intro(run_call_count,
                        fetches,
                        feed_dict,
                        tensor_filters,
                        is_callable_runner=False):
    """Generate formatted intro for run-start UI.

    Args:
      run_call_count: (int) Run call counter.
      fetches: Fetches of the `GraphRuntime.run()` call. See doc of `GraphRuntime.run()`
        for more details.
      feed_dict: Feeds to the `GraphRuntime.run()` call. See doc of `GraphRuntime.run()`
        for more details.
      tensor_filters: (dict) A dict from tensor-filter name to tensor-filter
        callable.
      is_callable_runner: (bool) whether a runner returned by
          GraphRuntime.make_callable is being run.

    Returns:
      (RichTextLines) Formatted intro message about the `GraphRuntime.run()` call.
    """

    fetch_lines = common.get_flattened_names(fetches)

    if not feed_dict:
        feed_dict_lines = [debugger_cli_common.RichLine("  (Empty)")]
    else:
        feed_dict_lines = []
        for feed_key in feed_dict:
            feed_key_name = common.get_graph_element_name(feed_key)
            feed_dict_line = debugger_cli_common.RichLine("  ")
            feed_dict_line += debugger_cli_common.RichLine(
                feed_key_name,
                debugger_cli_common.MenuItem(None, "pf '%s'" % feed_key_name))
            # Surround the name string with quotes, because feed_key_name may contain
            # spaces in some cases, e.g., SparseTensors.
            feed_dict_lines.append(feed_dict_line)
    feed_dict_lines = debugger_cli_common.rich_text_lines_from_rich_line_list(
        feed_dict_lines)

    out = debugger_cli_common.RichTextLines(_HORIZONTAL_BAR)
    if is_callable_runner:
        out.append(
            " Running a runner returned by GraphRuntime.make_callable()")
    else:
        out.append(" GraphRuntime.run() call #%d:" % run_call_count)
        out.append("")
        out.append(" Output:")
        out.extend(
            debugger_cli_common.RichTextLines(
                ["   " + line for line in fetch_lines]))
        out.append("")
        out.append(" Inputs:")
        out.extend(feed_dict_lines)
    out.append(_HORIZONTAL_BAR)
    out.append("")
    out.append(" Select one of the following commands to proceed ---->")

    out.extend(
        _recommend_command("run",
                           "Execute the run() call with debug tensor-watching",
                           create_link=True))
    out.extend(
        _recommend_command(
            "run -n",
            "Execute the run() call without debug tensor-watching",
            create_link=True))
    out.extend(
        _recommend_command(
            "run -t <T>",
            "Execute run() calls (T - 1) times without debugging, then "
            "execute run() once more with debugging and drop back to the CLI"))
    out.extend(
        _recommend_command(
            "run -f <filter_name>",
            "Keep executing run() calls until a dumped tensor passes a given, "
            "registered filter (conditional breakpoint mode)"))

    more_lines = ["    Registered filter(s):"]
    if tensor_filters:
        filter_names = []
        for filter_name in tensor_filters:
            filter_names.append(filter_name)
            command_menu_node = debugger_cli_common.MenuItem(
                "", "run -f %s" % filter_name)
            more_lines.append(
                RL("        * ") + RL(filter_name, command_menu_node))
    else:
        more_lines.append("        (None)")

    out.extend(
        debugger_cli_common.rich_text_lines_from_rich_line_list(more_lines))

    # TODO(Pariksheet): Python invoke_stepper implementation not support now.
    #    out.extend(
    #        _recommend_command(
    #            "invoke_stepper",
    #            "Use the node-stepper interface, which allows you to interactively "
    #            "step through nodes involved in the graph run() call and "
    #            "inspect/modify their values", create_link=True))

    out.append("")

    #    out.append_rich_line(RL("For more details, see ") +
    #                         RL("help.", debugger_cli_common.MenuItem("", "help")) +
    #                         ".")
    #    out.append("")

    # Make main menu for the run-start intro.
    menu = debugger_cli_common.Menu()
    menu.append(debugger_cli_common.MenuItem("run", "run"))
    # TODO(Pariksheet): Python invoke_stepper implementation not support now.
    #    menu.append(debugger_cli_common.MenuItem(
    #        "invoke_stepper", "invoke_stepper"))
    menu.append(debugger_cli_common.MenuItem("exit", "exit"))
    out.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu

    return out