def _print_feed_handler(self, args, screen_info=None):
    np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
        screen_info)

    if not self._feed_dict:
      return cli_shared.error(
          "The feed_dict of the current run is None or empty.")

    parsed = self._argparsers["print_feed"].parse_args(args)
    tensor_name, tensor_slicing = (
        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

    feed_key = None
    feed_value = None
    for key in self._feed_dict:
      key_name = common.get_graph_element_name(key)
      if key_name == tensor_name:
        feed_key = key_name
        feed_value = self._feed_dict[key]
        break

    if feed_key is None:
      return cli_shared.error(
          "The feed_dict of the current run does not contain the key %s" %
          tensor_name)
    else:
      return cli_shared.format_tensor(
          feed_value,
          feed_key + " (feed)",
          np_printoptions,
          print_all=parsed.print_all,
          tensor_slicing=tensor_slicing,
          highlight_options=cli_shared.parse_ranges_highlight(parsed.ranges),
          include_numeric_summary=parsed.numeric_summary)
Example #2
0
  def _print_feed_handler(self, args, screen_info=None):
    np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
        screen_info)

    if not self._feed_dict:
      return cli_shared.error(
          "The feed_dict of the current run is None or empty.")

    parsed = self._argparsers["print_feed"].parse_args(args)
    tensor_name, tensor_slicing = (
        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

    feed_key = None
    feed_value = None
    for key in self._feed_dict:
      key_name = common.get_graph_element_name(key)
      if key_name == tensor_name:
        feed_key = key_name
        feed_value = self._feed_dict[key]
        break

    if feed_key is None:
      return cli_shared.error(
          "The feed_dict of the current run does not contain the key %s" %
          tensor_name)
    else:
      return cli_shared.format_tensor(
          feed_value,
          feed_key + " (feed)",
          np_printoptions,
          print_all=parsed.print_all,
          tensor_slicing=tensor_slicing,
          highlight_options=cli_shared.parse_ranges_highlight(parsed.ranges),
          include_numeric_summary=parsed.numeric_summary)
Example #3
0
    def inject_value(self, args, screen_info=None):
        """Inject value to a given tensor.

    Args:
      args: (list of str) command-line arguments for the "step" command.
      screen_info: Information about screen.

    Returns:
      (RichTextLines) Screen output for the result of the stepping action.
    """

        _ = screen_info  # Currently unused.

        if screen_info and "cols" in screen_info:
            np_printoptions = {"linewidth": screen_info["cols"]}
        else:
            np_printoptions = {}

        parsed = self.arg_parsers["inject_value"].parse_args(args)

        tensor_names = self._resolve_tensor_names(parsed.tensor_name)
        if not tensor_names:
            return cli_shared.error(self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] %
                                    parsed.tensor_name)
        elif len(tensor_names) > 1:
            return cli_shared.error(
                self._MESSAGE_TEMPLATES["MULTIPLE_TENSORS"] %
                parsed.tensor_name)
        else:
            tensor_name = tensor_names[0]

        tensor_value = eval(parsed.tensor_value_str)  # pylint: disable=eval-used

        try:
            self._node_stepper.override_tensor(tensor_name, tensor_value)
            lines = [
                "Injected value \"%s\"" % parsed.tensor_value_str,
                "  to tensor \"%s\":" % tensor_name, ""
            ]

            tensor_lines = tensor_format.format_tensor(
                tensor_value,
                tensor_name,
                include_metadata=True,
                np_printoptions=np_printoptions).lines
            lines.extend(tensor_lines)

        except ValueError:
            lines = [
                "ERROR: Failed to inject value to tensor %s" %
                parsed.tensor_name
            ]

        return debugger_cli_common.RichTextLines(lines)
Example #4
0
  def inject_value(self, args, screen_info=None):
    """Inject value to a given tensor.

    Args:
      args: (list of str) command-line arguments for the "step" command.
      screen_info: Information about screen.

    Returns:
      (RichTextLines) Screen output for the result of the stepping action.
    """

    _ = screen_info  # Currently unused.

    if screen_info and "cols" in screen_info:
      np_printoptions = {"linewidth": screen_info["cols"]}
    else:
      np_printoptions = {}

    parsed = self.arg_parsers["inject_value"].parse_args(args)

    tensor_names = self._resolve_tensor_names(parsed.tensor_name)
    if not tensor_names:
      return cli_shared.error(
          self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] % parsed.tensor_name)
    elif len(tensor_names) > 1:
      return cli_shared.error(
          self._MESSAGE_TEMPLATES["MULTIPLE_TENSORS"] % parsed.tensor_name)
    else:
      tensor_name = tensor_names[0]

    tensor_value = eval(parsed.tensor_value_str)  # pylint: disable=eval-used

    try:
      self._node_stepper.override_tensor(tensor_name, tensor_value)
      lines = [
          "Injected value \"%s\"" % parsed.tensor_value_str,
          "  to tensor \"%s\":" % tensor_name, ""
      ]

      tensor_lines = tensor_format.format_tensor(
          tensor_value,
          tensor_name,
          include_metadata=True,
          np_printoptions=np_printoptions).lines
      lines.extend(tensor_lines)

    except ValueError:
      lines = [
          "ERROR: Failed to inject value to tensor %s" % parsed.tensor_name
      ]

    return debugger_cli_common.RichTextLines(lines)
Example #5
0
  def print_tensor(self, args, screen_info=None):
    """Print the value of a tensor that the stepper has access to."""

    parsed = self.arg_parsers["print_tensor"].parse_args(args)

    if screen_info and "cols" in screen_info:
      np_printoptions = {"linewidth": screen_info["cols"]}
    else:
      np_printoptions = {}

    # Determine if any range-highlighting is required.
    highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

    tensor_name, tensor_slicing = (
        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

    tensor_names = self._resolve_tensor_names(tensor_name)
    if not tensor_names:
      return cli_shared.error(
          self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] % tensor_name)
    elif len(tensor_names) > 1:
      return cli_shared.error(
          self._MESSAGE_TEMPLATES["MULTIPLE_TENSORS"] % tensor_name)
    else:
      tensor_name = tensor_names[0]

    try:
      tensor_value = self._node_stepper.get_tensor_value(tensor_name)
    except ValueError as e:
      return debugger_cli_common.RichTextLines([str(e)])

    return cli_shared.format_tensor(
        tensor_value,
        tensor_name,
        np_printoptions,
        print_all=parsed.print_all,
        tensor_slicing=tensor_slicing,
        highlight_options=highlight_options)
Example #6
0
  def print_tensor(self, args, screen_info=None):
    """Print the value of a tensor that the stepper has access to."""

    parsed = self.arg_parsers["print_tensor"].parse_args(args)

    if screen_info and "cols" in screen_info:
      np_printoptions = {"linewidth": screen_info["cols"]}
    else:
      np_printoptions = {}

    # Determine if any range-highlighting is required.
    highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

    tensor_name, tensor_slicing = (
        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

    tensor_names = self._resolve_tensor_names(tensor_name)
    if not tensor_names:
      return cli_shared.error(
          self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] % tensor_name)
    elif len(tensor_names) > 1:
      return cli_shared.error(
          self._MESSAGE_TEMPLATES["MULTIPLE_TENSORS"] % tensor_name)
    else:
      tensor_name = tensor_names[0]

    try:
      tensor_value = self._node_stepper.get_tensor_value(tensor_name)
    except ValueError as e:
      return debugger_cli_common.RichTextLines([str(e)])

    return cli_shared.format_tensor(
        tensor_value,
        tensor_name,
        np_printoptions,
        print_all=parsed.print_all,
        tensor_slicing=tensor_slicing,
        highlight_options=highlight_options)
Example #7
0
    def cont(self, args, screen_info=None):
        """Continue-to action on the graph."""

        _ = screen_info

        parsed = self.arg_parsers["cont"].parse_args(args)

        # Determine which node is being continued to, so the _next pointer can be
        # set properly.
        node_name = parsed.target_name.split(":")[0]
        if node_name not in self._sorted_nodes:
            return cli_shared.error(self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] %
                                    parsed.target_name)
        self._next = self._sorted_nodes.index(node_name)

        cont_result = self._node_stepper.cont(
            parsed.target_name,
            invalidate_from_updated_variables=(
                parsed.invalidate_from_updated_variables),
            restore_variable_values=parsed.restore_variable_values)
        self._completed_nodes.add(parsed.target_name.split(":")[0])

        screen_output = debugger_cli_common.RichTextLines(
            ["Continued to %s:" % parsed.target_name, ""])
        screen_output.extend(self._report_last_feed_types())
        screen_output.extend(self._report_last_updated())
        screen_output.extend(
            tensor_format.format_tensor(cont_result,
                                        parsed.target_name,
                                        include_metadata=True))

        # Generate windowed view of the sorted transitive closure on which the
        # stepping is occurring.
        lower_bound = max(0, self._next - 2)
        upper_bound = min(len(self._sorted_nodes), self._next + 3)

        final_output = self.list_sorted_nodes(
            ["-l", str(lower_bound), "-u",
             str(upper_bound)])
        final_output.extend(debugger_cli_common.RichTextLines([""]))
        final_output.extend(screen_output)

        # Re-calculate the target of the next "step" action.
        self._calculate_next()

        return final_output
Example #8
0
  def cont(self, args, screen_info=None):
    """Continue-to action on the graph."""

    _ = screen_info

    parsed = self.arg_parsers["cont"].parse_args(args)

    # Determine which node is being continued to, so the _next pointer can be
    # set properly.
    node_name = parsed.target_name.split(":")[0]
    if node_name not in self._sorted_nodes:
      return cli_shared.error(self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] %
                              parsed.target_name)
    self._next = self._sorted_nodes.index(node_name)

    cont_result = self._node_stepper.cont(
        parsed.target_name,
        invalidate_from_updated_variables=(
            parsed.invalidate_from_updated_variables),
        restore_variable_values=parsed.restore_variable_values)
    self._completed_nodes.add(parsed.target_name.split(":")[0])

    screen_output = debugger_cli_common.RichTextLines(
        ["Continued to %s:" % parsed.target_name, ""])
    screen_output.extend(self._report_last_feed_types())
    screen_output.extend(self._report_last_updated())
    screen_output.extend(
        tensor_format.format_tensor(
            cont_result, parsed.target_name, include_metadata=True))

    # Generate windowed view of the sorted transitive closure on which the
    # stepping is occurring.
    lower_bound = max(0, self._next - 2)
    upper_bound = min(len(self._sorted_nodes), self._next + 3)

    final_output = self.list_sorted_nodes(
        ["-l", str(lower_bound), "-u", str(upper_bound)])
    final_output.extend(debugger_cli_common.RichTextLines([""]))
    final_output.extend(screen_output)

    # Re-calculate the target of the next "step" action.
    self._calculate_next()

    return final_output
Example #9
0
  def _list_inputs_or_outputs(self,
                              recursive,
                              node_name,
                              depth,
                              control,
                              op_type,
                              do_outputs=False):
    """Helper function used by list_inputs and list_outputs.

    Format a list of lines to display the inputs or output recipients of a
    given node.

    Args:
      recursive: Whether the listing is to be done recursively, as a boolean.
      node_name: The name of the node in question, as a str.
      depth: Maximum recursion depth, applies only if recursive == True, as an
        int.
      control: Whether control inputs or control recipients are included, as a
        boolean.
      op_type: Whether the op types of the nodes are to be included, as a
        boolean.
      do_outputs: Whether recipients, instead of input nodes are to be
        listed, as a boolean.

    Returns:
      Input or recipient tree formatted as a RichTextLines object.
    """

    if do_outputs:
      tracker = self._debug_dump.node_recipients
      type_str = "Recipients of"
      short_type_str = "recipients"
    else:
      tracker = self._debug_dump.node_inputs
      type_str = "Inputs to"
      short_type_str = "inputs"

    lines = []
    font_attr_segs = {}

    # Check if this is a tensor name, instead of a node name.
    node_name, _ = debug_data.parse_node_or_tensor_name(node_name)

    # Check if node exists.
    if not self._debug_dump.node_exists(node_name):
      return cli_shared.error(
          "There is no node named \"%s\" in the partition graphs" % node_name)

    if recursive:
      max_depth = depth
    else:
      max_depth = 1

    if control:
      include_ctrls_str = ", control %s included" % short_type_str
    else:
      include_ctrls_str = ""

    line = "%s node \"%s\"" % (type_str, node_name)
    font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, "bold")
                        ]
    lines.append(line + " (Depth limit = %d%s):" % (max_depth, include_ctrls_str
                                                   ))

    command_template = "lo -c -r %s" if do_outputs else "li -c -r %s"
    self._dfs_from_node(
        lines,
        font_attr_segs,
        node_name,
        tracker,
        max_depth,
        1, [],
        control,
        op_type,
        command_template=command_template)

    # Include legend.
    lines.append("")
    lines.append("Legend:")
    lines.append("  (d): recursion depth = d.")

    if control:
      lines.append("  (Ctrl): Control input.")
    if op_type:
      lines.append("  [Op]: Input node has op type Op.")

    # TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.

    return debugger_cli_common.RichTextLines(
        lines, font_attr_segs=font_attr_segs)
Example #10
0
  def print_tensor(self, args, screen_info=None):
    """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    parsed = self._arg_parsers["print_tensor"].parse_args(args)

    if screen_info and "cols" in screen_info:
      np_printoptions = {"linewidth": screen_info["cols"]}
    else:
      np_printoptions = {}

    # Determine if any range-highlighting is required.
    highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

    tensor_name, tensor_slicing = (
        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

    node_name, output_slot = debug_data.parse_node_or_tensor_name(tensor_name)
    if (self._debug_dump.loaded_partition_graphs() and
        not self._debug_dump.node_exists(node_name)):
      output = cli_shared.error(
          "Node \"%s\" does not exist in partition graphs" % node_name)
      _add_main_menu(
          output,
          node_name=None,
          enable_list_tensors=True,
          enable_print_tensor=False)
      return output

    watch_keys = self._debug_dump.debug_watch_keys(node_name)
    if output_slot is None:
      output_slots = set()
      for watch_key in watch_keys:
        output_slots.add(int(watch_key.split(":")[1]))

      if len(output_slots) == 1:
        # There is only one dumped tensor from this node, so there is no
        # ambiguity. Proceed to show the only dumped tensor.
        output_slot = list(output_slots)[0]
      else:
        # There are more than one dumped tensors from this node. Indicate as
        # such.
        # TODO(cais): Provide an output screen with command links for
        # convenience.
        lines = [
            "Node \"%s\" generated debug dumps from %s output slots:" %
            (node_name, len(output_slots)),
            "Please specify the output slot: %s:x." % node_name
        ]
        output = debugger_cli_common.RichTextLines(lines)
        _add_main_menu(
            output,
            node_name=node_name,
            enable_list_tensors=True,
            enable_print_tensor=False)
        return output

    # Find debug dump data that match the tensor name (node name + output
    # slot).
    matching_data = []
    for watch_key in watch_keys:
      debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
      for datum in debug_tensor_data:
        if datum.output_slot == output_slot:
          matching_data.append(datum)

    if not matching_data:
      # No dump for this tensor.
      output = cli_shared.error("Tensor \"%s\" did not generate any dumps." %
                                parsed.tensor_name)
    elif len(matching_data) == 1:
      # There is only one dump for this tensor.
      if parsed.number <= 0:
        output = cli_shared.format_tensor(
            matching_data[0].get_tensor(),
            matching_data[0].watch_key,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)
      else:
        output = cli_shared.error(
            "Invalid number (%d) for tensor %s, which generated one dump." %
            (parsed.number, parsed.tensor_name))

      _add_main_menu(output, node_name=node_name, enable_print_tensor=False)
    else:
      # There are more than one dumps for this tensor.
      if parsed.number < 0:
        lines = [
            "Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
                                                   len(matching_data))
        ]
        font_attr_segs = {}

        for i, datum in enumerate(matching_data):
          rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
          lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))
          command = "print_tensor %s -n %d" % (parsed.tensor_name, i)
          font_attr_segs[len(lines) - 1] = [(
              len(lines[-1]) - len(datum.watch_key), len(lines[-1]),
              debugger_cli_common.MenuItem(None, command))]

        lines.append("")
        lines.append(
            "You can use the -n (--number) flag to specify which dump to "
            "print.")
        lines.append("For example:")
        lines.append("  print_tensor %s -n 0" % parsed.tensor_name)

        output = debugger_cli_common.RichTextLines(
            lines, font_attr_segs=font_attr_segs)
      elif parsed.number >= len(matching_data):
        output = cli_shared.error(
            "Specified number (%d) exceeds the number of available dumps "
            "(%d) for tensor %s" %
            (parsed.number, len(matching_data), parsed.tensor_name))
      else:
        output = cli_shared.format_tensor(
            matching_data[parsed.number].get_tensor(),
            matching_data[parsed.number].watch_key + " (dump #%d)" %
            parsed.number,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)
      _add_main_menu(output, node_name=node_name, enable_print_tensor=False)

    return output
Example #11
0
  def node_info(self, args, screen_info=None):
    """Command handler for node_info.

    Query information about a given node.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    # TODO(cais): Add annotation of substrings for node names, to facilitate
    # on-screen highlighting/selection of node names.
    _ = screen_info

    parsed = self._arg_parsers["node_info"].parse_args(args)

    # Get a node name, regardless of whether the input is a node name (without
    # output slot attached) or a tensor name (with output slot attached).
    node_name, unused_slot = debug_data.parse_node_or_tensor_name(
        parsed.node_name)

    if not self._debug_dump.node_exists(node_name):
      output = cli_shared.error(
          "There is no node named \"%s\" in the partition graphs" % node_name)
      _add_main_menu(
          output,
          node_name=None,
          enable_list_tensors=True,
          enable_node_info=False,
          enable_list_inputs=False,
          enable_list_outputs=False)
      return output

    # TODO(cais): Provide UI glossary feature to explain to users what the
    # term "partition graph" means and how it is related to TF graph objects
    # in Python. The information can be along the line of:
    # "A tensorflow graph defined in Python is stripped of unused ops
    # according to the feeds and fetches and divided into a number of
    # partition graphs that may be distributed among multiple devices and
    # hosts. The partition graphs are what's actually executed by the C++
    # runtime during a run() call."

    lines = ["Node %s" % node_name]
    font_attr_segs = {
        0: [(len(lines[-1]) - len(node_name), len(lines[-1]), "bold")]
    }
    lines.append("")
    lines.append("  Op: %s" % self._debug_dump.node_op_type(node_name))
    lines.append("  Device: %s" % self._debug_dump.node_device(node_name))
    output = debugger_cli_common.RichTextLines(
        lines, font_attr_segs=font_attr_segs)

    # List node inputs (non-control and control).
    inputs = self._debug_dump.node_inputs(node_name)
    ctrl_inputs = self._debug_dump.node_inputs(node_name, is_control=True)
    output.extend(self._format_neighbors("input", inputs, ctrl_inputs))

    # List node output recipients (non-control and control).
    recs = self._debug_dump.node_recipients(node_name)
    ctrl_recs = self._debug_dump.node_recipients(node_name, is_control=True)
    output.extend(self._format_neighbors("recipient", recs, ctrl_recs))

    # Optional: List attributes of the node.
    if parsed.attributes:
      output.extend(self._list_node_attributes(node_name))

    # Optional: List dumps available from the node.
    if parsed.dumps:
      output.extend(self._list_node_dumps(node_name))

    if parsed.traceback:
      output.extend(self._render_node_traceback(node_name))

    _add_main_menu(output, node_name=node_name, enable_node_info=False)
    return output
Example #12
0
  def list_tensors(self, args, screen_info=None):
    """Command handler for list_tensors.

    List tensors dumped during debugged Session.run() call.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    # TODO(cais): Add annotations of substrings for dumped tensor names, to
    # facilitate on-screen highlighting/selection of node names.
    _ = screen_info

    parsed = self._arg_parsers["list_tensors"].parse_args(args)

    output = []

    filter_strs = []
    if parsed.op_type_filter:
      op_type_regex = re.compile(parsed.op_type_filter)
      filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter)
    else:
      op_type_regex = None

    if parsed.node_name_filter:
      node_name_regex = re.compile(parsed.node_name_filter)
      filter_strs.append("Node name regex filter: \"%s\"" %
                         parsed.node_name_filter)
    else:
      node_name_regex = None

    output = debugger_cli_common.RichTextLines(filter_strs)
    output.append("")

    if parsed.tensor_filter:
      try:
        filter_callable = self.get_tensor_filter(parsed.tensor_filter)
      except ValueError:
        output = cli_shared.error("There is no tensor filter named \"%s\"." %
                                  parsed.tensor_filter)
        _add_main_menu(output, node_name=None, enable_list_tensors=False)
        return output

      data_to_show = self._debug_dump.find(filter_callable)
    else:
      data_to_show = self._debug_dump.dumped_tensor_data

    # TODO(cais): Implement filter by lambda on tensor value.

    max_timestamp_width, max_dump_size_width, max_op_type_width = (
        self._measure_tensor_list_column_widths(data_to_show))

    # Sort the data.
    data_to_show = self._sort_dump_data_by(
        data_to_show, parsed.sort_by, parsed.reverse)

    output.extend(
        self._tensor_list_column_heads(parsed, max_timestamp_width,
                                       max_dump_size_width, max_op_type_width))

    dump_count = 0
    for dump in data_to_show:
      if node_name_regex and not node_name_regex.match(dump.node_name):
        continue

      if op_type_regex:
        op_type = self._debug_dump.node_op_type(dump.node_name)
        if not op_type_regex.match(op_type):
          continue

      rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
      dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
      dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot)
      op_type = self._debug_dump.node_op_type(dump.node_name)

      line = "[%.3f]" % rel_time
      line += " " * (max_timestamp_width - len(line))
      line += dump_size_str
      line += " " * (max_timestamp_width + max_dump_size_width - len(line))
      line += op_type
      line += " " * (max_timestamp_width + max_dump_size_width +
                     max_op_type_width - len(line))
      line += " %s" % dumped_tensor_name

      output.append(
          line,
          font_attr_segs=[(
              len(line) - len(dumped_tensor_name), len(line),
              debugger_cli_common.MenuItem("", "pt %s" % dumped_tensor_name))])
      dump_count += 1

    if parsed.tensor_filter:
      output.prepend([
          "%d dumped tensor(s) passing filter \"%s\":" %
          (dump_count, parsed.tensor_filter)
      ])
    else:
      output.prepend(["%d dumped tensor(s):" % dump_count])

    _add_main_menu(output, node_name=None, enable_list_tensors=False)
    return output
Example #13
0
  def print_tensor(self, args, screen_info=None):
    """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    parsed = self._arg_parsers["print_tensor"].parse_args(args)

    if screen_info and "cols" in screen_info:
      np_printoptions = {"linewidth": screen_info["cols"]}
    else:
      np_printoptions = {}

    # Determine if any range-highlighting is required.
    highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

    tensor_name, tensor_slicing = (
        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

    node_name, output_slot = debug_data.parse_node_or_tensor_name(tensor_name)
    if output_slot is None:
      return cli_shared.error("\"%s\" is not a valid tensor name" %
                              parsed.tensor_name)

    if (self._debug_dump.loaded_partition_graphs() and
        not self._debug_dump.node_exists(node_name)):
      return cli_shared.error(
          "Node \"%s\" does not exist in partition graphs" % node_name)

    watch_keys = self._debug_dump.debug_watch_keys(node_name)

    # Find debug dump data that match the tensor name (node name + output
    # slot).
    matching_data = []
    for watch_key in watch_keys:
      debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
      for datum in debug_tensor_data:
        if datum.output_slot == output_slot:
          matching_data.append(datum)

    if not matching_data:
      # No dump for this tensor.
      return cli_shared.error(
          "Tensor \"%s\" did not generate any dumps." % parsed.tensor_name)
    elif len(matching_data) == 1:
      # There is only one dump for this tensor.
      if parsed.number <= 0:
        return cli_shared.format_tensor(
            matching_data[0].get_tensor(),
            matching_data[0].watch_key,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)
      else:
        return cli_shared.error(
            "Invalid number (%d) for tensor %s, which generated one dump." %
            (parsed.number, parsed.tensor_name))
    else:
      # There are more than one dumps for this tensor.
      if parsed.number < 0:
        lines = [
            "Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
                                                   len(matching_data))
        ]

        for i, datum in enumerate(matching_data):
          rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
          lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))

        lines.append("")
        lines.append(
            "Use the -n (--number) flag to specify which dump to print.")
        lines.append("For example:")
        lines.append("  print_tensor %s -n 0" % parsed.tensor_name)

        return debugger_cli_common.RichTextLines(lines)
      elif parsed.number >= len(matching_data):
        return cli_shared.error(
            "Specified number (%d) exceeds the number of available dumps "
            "(%d) for tensor %s" %
            (parsed.number, len(matching_data), parsed.tensor_name))
      else:
        return cli_shared.format_tensor(
            matching_data[parsed.number].get_tensor(),
            matching_data[parsed.number].watch_key + " (dump #%d)" %
            parsed.number,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)
Example #14
0
    def list_tensors(self, args, screen_info=None):
        """Command handler for list_tensors.

    List tensors dumped during debugged Session.run() call.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

        # TODO(cais): Add annotations of substrings for dumped tensor names, to
        # facilitate on-screen highlighting/selection of node names.
        _ = screen_info

        parsed = self._arg_parsers["list_tensors"].parse_args(args)

        output = []
        font_attr_segs = {}

        filter_strs = []
        if parsed.op_type_filter:
            op_type_regex = re.compile(parsed.op_type_filter)
            filter_strs.append("Op type regex filter: \"%s\"" %
                               parsed.op_type_filter)
        else:
            op_type_regex = None

        if parsed.node_name_filter:
            node_name_regex = re.compile(parsed.node_name_filter)
            filter_strs.append("Node name regex filter: \"%s\"" %
                               parsed.node_name_filter)
        else:
            node_name_regex = None

        filter_output = debugger_cli_common.RichTextLines(filter_strs)

        if parsed.tensor_filter:
            try:
                filter_callable = self.get_tensor_filter(parsed.tensor_filter)
            except ValueError:
                output = cli_shared.error(
                    "There is no tensor filter named \"%s\"." %
                    parsed.tensor_filter)
                _add_main_menu(output,
                               node_name=None,
                               enable_list_tensors=False)
                return output

            data_to_show = self._debug_dump.find(filter_callable)
        else:
            data_to_show = self._debug_dump.dumped_tensor_data

        # TODO(cais): Implement filter by lambda on tensor value.

        dump_count = 0
        for dump in data_to_show:
            if node_name_regex and not node_name_regex.match(dump.node_name):
                continue

            if op_type_regex:
                op_type = self._debug_dump.node_op_type(dump.node_name)
                if not op_type_regex.match(op_type):
                    continue

            rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
            dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot)
            output.append("[%.3f ms] %s" % (rel_time, dumped_tensor_name))
            font_attr_segs[len(output) - 1] = [
                (len(output[-1]) - len(dumped_tensor_name), len(output[-1]),
                 debugger_cli_common.MenuItem("",
                                              "pt %s" % dumped_tensor_name))
            ]
            dump_count += 1

        filter_output.append("")
        filter_output.extend(
            debugger_cli_common.RichTextLines(output,
                                              font_attr_segs=font_attr_segs))
        output = filter_output

        if parsed.tensor_filter:
            output.prepend([
                "%d dumped tensor(s) passing filter \"%s\":" %
                (dump_count, parsed.tensor_filter)
            ])
        else:
            output.prepend(["%d dumped tensor(s):" % dump_count])

        _add_main_menu(output, node_name=None, enable_list_tensors=False)
        return output
Example #15
0
    def node_info(self, args, screen_info=None):
        """Command handler for node_info.

    Query information about a given node.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

        # TODO(cais): Add annotation of substrings for node names, to facilitate
        # on-screen highlighting/selection of node names.
        _ = screen_info

        parsed = self._arg_parsers["node_info"].parse_args(args)

        # Get a node name, regardless of whether the input is a node name (without
        # output slot attached) or a tensor name (with output slot attached).
        node_name, unused_slot = debug_data.parse_node_or_tensor_name(
            parsed.node_name)

        if not self._debug_dump.node_exists(node_name):
            output = cli_shared.error(
                "There is no node named \"%s\" in the partition graphs" %
                node_name)
            _add_main_menu(output,
                           node_name=None,
                           enable_list_tensors=True,
                           enable_node_info=False,
                           enable_list_inputs=False,
                           enable_list_outputs=False)
            return output

        # TODO(cais): Provide UI glossary feature to explain to users what the
        # term "partition graph" means and how it is related to TF graph objects
        # in Python. The information can be along the line of:
        # "A tensorflow graph defined in Python is stripped of unused ops
        # according to the feeds and fetches and divided into a number of
        # partition graphs that may be distributed among multiple devices and
        # hosts. The partition graphs are what's actually executed by the C++
        # runtime during a run() call."

        lines = ["Node %s" % node_name]
        lines.append("")
        lines.append("  Op: %s" % self._debug_dump.node_op_type(node_name))
        lines.append("  Device: %s" % self._debug_dump.node_device(node_name))

        # List node inputs (non-control and control).
        inputs = self._debug_dump.node_inputs(node_name)
        ctrl_inputs = self._debug_dump.node_inputs(node_name, is_control=True)

        input_lines = self._format_neighbors("input", inputs, ctrl_inputs)
        lines.extend(input_lines)

        # List node output recipients (non-control and control).
        recs = self._debug_dump.node_recipients(node_name)
        ctrl_recs = self._debug_dump.node_recipients(node_name,
                                                     is_control=True)

        rec_lines = self._format_neighbors("recipient", recs, ctrl_recs)
        lines.extend(rec_lines)

        # Optional: List attributes of the node.
        if parsed.attributes:
            lines.extend(self._list_node_attributes(node_name))

        # Optional: List dumps available from the node.
        if parsed.dumps:
            lines.extend(self._list_node_dumps(node_name))

        output = debugger_cli_common.RichTextLines(lines)

        if parsed.traceback:
            output.extend(self._render_node_traceback(node_name))

        _add_main_menu(output, node_name=node_name, enable_node_info=False)
        return output
Example #16
0
    def _list_inputs_or_outputs(self,
                                recursive,
                                node_name,
                                depth,
                                control,
                                op_type,
                                do_outputs=False):
        """Helper function used by list_inputs and list_outputs.

    Format a list of lines to display the inputs or output recipients of a
    given node.

    Args:
      recursive: Whether the listing is to be done recursively, as a boolean.
      node_name: The name of the node in question, as a str.
      depth: Maximum recursion depth, applies only if recursive == True, as an
        int.
      control: Whether control inputs or control recipients are included, as a
        boolean.
      op_type: Whether the op types of the nodes are to be included, as a
        boolean.
      do_outputs: Whether recipients, instead of input nodes are to be
        listed, as a boolean.

    Returns:
      Input or recipient tree formatted as a RichTextLines object.
    """

        if do_outputs:
            tracker = self._debug_dump.node_recipients
            type_str = "Recipients of"
            short_type_str = "recipients"
        else:
            tracker = self._debug_dump.node_inputs
            type_str = "Inputs to"
            short_type_str = "inputs"

        lines = []

        # Check if this is a tensor name, instead of a node name.
        node_name, _ = debug_data.parse_node_or_tensor_name(node_name)

        # Check if node exists.
        if not self._debug_dump.node_exists(node_name):
            return cli_shared.error(
                "There is no node named \"%s\" in the partition graphs" %
                node_name)

        if recursive:
            max_depth = depth
        else:
            max_depth = 1

        if control:
            include_ctrls_str = ", control %s included" % short_type_str
        else:
            include_ctrls_str = ""

        lines.append("%s node \"%s\" (Depth limit = %d%s):" %
                     (type_str, node_name, max_depth, include_ctrls_str))

        self._dfs_from_node(lines, node_name, tracker, max_depth, 1, [],
                            control, op_type)

        # Include legend.
        lines.append("")
        lines.append("Legend:")
        lines.append("  (d): recursion depth = d.")

        if control:
            lines.append("  (Ctrl): Control input.")
        if op_type:
            lines.append("  [Op]: Input node has op type Op.")

        # TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.

        return debugger_cli_common.RichTextLines(lines)
Example #17
0
    def print_tensor(self, args, screen_info=None):
        """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

        parsed = self._arg_parsers["print_tensor"].parse_args(args)

        if screen_info and "cols" in screen_info:
            np_printoptions = {"linewidth": screen_info["cols"]}
        else:
            np_printoptions = {}

        # Determine if any range-highlighting is required.
        highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

        tensor_name, tensor_slicing = (
            command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

        node_name, output_slot = debug_data.parse_node_or_tensor_name(
            tensor_name)
        if (self._debug_dump.loaded_partition_graphs()
                and not self._debug_dump.node_exists(node_name)):
            output = cli_shared.error(
                "Node \"%s\" does not exist in partition graphs" % node_name)
            _add_main_menu(output,
                           node_name=None,
                           enable_list_tensors=True,
                           enable_print_tensor=False)
            return output

        watch_keys = self._debug_dump.debug_watch_keys(node_name)
        if output_slot is None:
            output_slots = set()
            for watch_key in watch_keys:
                output_slots.add(int(watch_key.split(":")[1]))

            if len(output_slots) == 1:
                # There is only one dumped tensor from this node, so there is no
                # ambiguity. Proceed to show the only dumped tensor.
                output_slot = list(output_slots)[0]
            else:
                # There are more than one dumped tensors from this node. Indicate as
                # such.
                # TODO(cais): Provide an output screen with command links for
                # convenience.
                lines = [
                    "Node \"%s\" generated debug dumps from %s output slots:" %
                    (node_name, len(output_slots)),
                    "Please specify the output slot: %s:x." % node_name
                ]
                output = debugger_cli_common.RichTextLines(lines)
                _add_main_menu(output,
                               node_name=node_name,
                               enable_list_tensors=True,
                               enable_print_tensor=False)
                return output

        # Find debug dump data that match the tensor name (node name + output
        # slot).
        matching_data = []
        for watch_key in watch_keys:
            debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
            for datum in debug_tensor_data:
                if datum.output_slot == output_slot:
                    matching_data.append(datum)

        if not matching_data:
            # No dump for this tensor.
            output = cli_shared.error(
                "Tensor \"%s\" did not generate any dumps." %
                parsed.tensor_name)
        elif len(matching_data) == 1:
            # There is only one dump for this tensor.
            if parsed.number <= 0:
                output = cli_shared.format_tensor(
                    matching_data[0].get_tensor(),
                    matching_data[0].watch_key,
                    np_printoptions,
                    print_all=parsed.print_all,
                    tensor_slicing=tensor_slicing,
                    highlight_options=highlight_options)
            else:
                output = cli_shared.error(
                    "Invalid number (%d) for tensor %s, which generated one dump."
                    % (parsed.number, parsed.tensor_name))

            _add_main_menu(output,
                           node_name=node_name,
                           enable_print_tensor=False)
        else:
            # There are more than one dumps for this tensor.
            if parsed.number < 0:
                lines = [
                    "Tensor \"%s\" generated %d dumps:" %
                    (parsed.tensor_name, len(matching_data))
                ]

                for i, datum in enumerate(matching_data):
                    rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
                    lines.append("#%d [%.3f ms] %s" %
                                 (i, rel_time, datum.watch_key))

                lines.append("")
                lines.append(
                    "Use the -n (--number) flag to specify which dump to print."
                )
                lines.append("For example:")
                lines.append("  print_tensor %s -n 0" % parsed.tensor_name)

                output = debugger_cli_common.RichTextLines(lines)
            elif parsed.number >= len(matching_data):
                output = cli_shared.error(
                    "Specified number (%d) exceeds the number of available dumps "
                    "(%d) for tensor %s" %
                    (parsed.number, len(matching_data), parsed.tensor_name))
            else:
                output = cli_shared.format_tensor(
                    matching_data[parsed.number].get_tensor(),
                    matching_data[parsed.number].watch_key +
                    " (dump #%d)" % parsed.number,
                    np_printoptions,
                    print_all=parsed.print_all,
                    tensor_slicing=tensor_slicing,
                    highlight_options=highlight_options)
            _add_main_menu(output,
                           node_name=node_name,
                           enable_print_tensor=False)

        return output
Example #18
0
  def list_tensors(self, args, screen_info=None):
    """Command handler for list_tensors.

    List tensors dumped during debugged Session.run() call.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    # TODO(cais): Add annotations of substrings for dumped tensor names, to
    # facilitate on-screen highlighting/selection of node names.
    _ = screen_info

    parsed = self._arg_parsers["list_tensors"].parse_args(args)

    output = []

    filter_strs = []
    if parsed.op_type_filter:
      op_type_regex = re.compile(parsed.op_type_filter)
      filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter)
    else:
      op_type_regex = None

    if parsed.node_name_filter:
      node_name_regex = re.compile(parsed.node_name_filter)
      filter_strs.append("Node name regex filter: \"%s\"" %
                         parsed.node_name_filter)
    else:
      node_name_regex = None

    if parsed.tensor_filter:
      try:
        filter_callable = self.get_tensor_filter(parsed.tensor_filter)
      except ValueError:
        return cli_shared.error(
            "There is no tensor filter named \"%s\"." % parsed.tensor_filter)

      data_to_show = self._debug_dump.find(filter_callable)
    else:
      data_to_show = self._debug_dump.dumped_tensor_data

    # TODO(cais): Implement filter by lambda on tensor value.

    dump_count = 0
    for dump in data_to_show:
      if node_name_regex and not node_name_regex.match(dump.node_name):
        continue

      if op_type_regex:
        op_type = self._debug_dump.node_op_type(dump.node_name)
        if not op_type_regex.match(op_type):
          continue

      rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
      output.append("[%.3f ms] %s:%d" % (rel_time, dump.node_name,
                                         dump.output_slot))
      dump_count += 1

    output.insert(0, "")

    output = filter_strs + output

    if parsed.tensor_filter:
      output.insert(0, "%d dumped tensor(s) passing filter \"%s\":" %
                    (dump_count, parsed.tensor_filter))
    else:
      output.insert(0, "%d dumped tensor(s):" % dump_count)

    return debugger_cli_common.RichTextLines(output)
Example #19
0
  def cont(self, args, screen_info=None):
    """Continue-to action on the graph."""

    _ = screen_info

    parsed = self.arg_parsers["cont"].parse_args(args)

    # Determine which node is being continued to, so the _next pointer can be
    # set properly.
    node_name = parsed.target_name.split(":")[0]
    if node_name not in self._sorted_nodes:
      return cli_shared.error(self._MESSAGE_TEMPLATES["NOT_IN_CLOSURE"] %
                              parsed.target_name)
    self._next = self._sorted_nodes.index(node_name)

    cont_result = self._node_stepper.cont(
        parsed.target_name,
        restore_variable_values=parsed.restore_variable_values)
    self._completed_nodes.add(parsed.target_name.split(":")[0])

    feed_types = self._node_stepper.last_feed_types()

    lines = ["Continued to %s:" % parsed.target_name, ""]
    font_attr_segs = {}
    lines.append("Stepper used feeds:")
    line_counter = len(lines)

    if feed_types:
      for feed_name in feed_types:
        feed_info_line = "  %s : %s" % (feed_name, feed_types[feed_name])
        lines.append(feed_info_line)
        if feed_types[feed_name] == stepper.NodeStepper.FEED_TYPE_HANDLE:
          font_attr_segs[line_counter] = [
              (len(feed_name) + 2, len(feed_info_line), "green")
          ]
        elif feed_types[feed_name] == stepper.NodeStepper.FEED_TYPE_OVERRIDE:
          font_attr_segs[line_counter] = [
              (len(feed_name) + 2, len(feed_info_line), "yellow")
          ]
        line_counter += 1
    else:
      lines.append("  (No feeds)")
    lines.append("")

    screen_output = debugger_cli_common.RichTextLines(
        lines, font_attr_segs=font_attr_segs)

    tensor_output = tensor_format.format_tensor(
        cont_result, parsed.target_name,
        include_metadata=True)
    screen_output.extend(tensor_output)

    # Generate windowed view of the sorted transitive closure on which the
    # stepping is occurring.
    lower_bound = max(0, self._next - 2)
    upper_bound = min(len(self._sorted_nodes), self._next + 3)

    final_output = self.list_sorted_nodes(
        ["-l", str(lower_bound), "-u", str(upper_bound)])
    final_output.extend(debugger_cli_common.RichTextLines([""]))
    final_output.extend(screen_output)

    # Re-calculate the target of the next "step" action.
    self._calculate_next()

    return final_output