Exemple #1
0
  def print_tensor(self, args, screen_info=None):
    """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    if screen_info and "cols" in screen_info:
      np_printoptions = {"linewidth": screen_info["cols"]}
    else:
      np_printoptions = {}

    parsed = self._arg_parsers["print_tensor"].parse_args(args)

    node_name, output_slot = debug_data.parse_node_or_tensor_name(
        parsed.tensor_name)
    if output_slot is None:
      return self._error("\"%s\" is not a valid tensor name" %
                         parsed.tensor_name)

    if not self._debug_dump.node_exists(node_name):
      return self._error(
          "Node \"%s\" does not exist in partition graphs" % node_name)

    watch_keys = self._debug_dump.debug_watch_keys(node_name)

    # Find debug dump data that match the tensor name (node name + output
    # slot).
    matching_data = []
    for watch_key in watch_keys:
      debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
      for datum in debug_tensor_data:
        if datum.output_slot == output_slot:
          matching_data.append(datum)

    if not matching_data:
      return self._error(
          "Tensor \"%s\" did not generate any dumps." % parsed.tensor_name)

    # TODO(cais): In the case of multiple dumps from the same tensor, require
    #   explicit specification of the DebugOp and the temporal order.
    if len(matching_data) > 1:
      return self._error(
          "print_tensor logic for multiple dumped records has not been "
          "implemented.")

    return tensor_format.format_tensor(
        matching_data[0].get_tensor(),
        matching_data[0].watch_key,
        include_metadata=True,
        np_printoptions=np_printoptions)
Exemple #2
0
    def print_tensor(self, args, screen_info=None):
        """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

        if screen_info and "cols" in screen_info:
            np_printoptions = {"linewidth": screen_info["cols"]}
        else:
            np_printoptions = {}

        parsed = self._arg_parsers["print_tensor"].parse_args(args)

        node_name, output_slot = debug_data.parse_node_or_tensor_name(
            parsed.tensor_name)
        if output_slot is None:
            return self._error("\"%s\" is not a valid tensor name" %
                               parsed.tensor_name)

        if not self._debug_dump.node_exists(node_name):
            return self._error(
                "Node \"%s\" does not exist in partition graphs" % node_name)

        watch_keys = self._debug_dump.debug_watch_keys(node_name)

        # Find debug dump data that match the tensor name (node name + output
        # slot).
        matching_data = []
        for watch_key in watch_keys:
            debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
            for datum in debug_tensor_data:
                if datum.output_slot == output_slot:
                    matching_data.append(datum)

        if not matching_data:
            return self._error("Tensor \"%s\" did not generate any dumps." %
                               parsed.tensor_name)

        # TODO(cais): In the case of multiple dumps from the same tensor, require
        #   explicit specification of the DebugOp and the temporal order.
        if len(matching_data) > 1:
            return self._error(
                "print_tensor logic for multiple dumped records has not been "
                "implemented.")

        return tensor_format.format_tensor(matching_data[0].get_tensor(),
                                           matching_data[0].watch_key,
                                           include_metadata=True,
                                           np_printoptions=np_printoptions)
Exemple #3
0
  def _get_node(self, element):
    """Get the node of a graph element.

    Args:
      element: A graph element (Op, Tensor or Node)

    Returns:
      The node associated with element in the graph.
    """

    node_name, _ = debug_data.parse_node_or_tensor_name(element.name)
    return self._sess.graph.as_graph_element(node_name)
Exemple #4
0
  def _get_node(self, element):
    """Get the node of a graph element.

    Args:
      element: A graph element (Op, Tensor or Node)

    Returns:
      The node associated with element in the graph.
    """

    node_name, _ = debug_data.parse_node_or_tensor_name(element.name)
    return self._sess.graph.as_graph_element(node_name)
Exemple #5
0
    def testParseTensorName(self):
        node_name, slot = debug_data.parse_node_or_tensor_name(
            "namespace1/node_2:3")

        self.assertEqual("namespace1/node_2", node_name)
        self.assertEqual(3, slot)
Exemple #6
0
    def testParseNodeName(self):
        node_name, slot = debug_data.parse_node_or_tensor_name(
            "namespace1/node_1")

        self.assertEqual("namespace1/node_1", node_name)
        self.assertIsNone(slot)
  def _dfs_from_node(self,
                     lines,
                     node_name,
                     tracker,
                     max_depth,
                     depth,
                     unfinished,
                     include_control=False,
                     show_op_type=False):
    """Perform depth-first search (DFS) traversal of a node's input tree.

    Args:
      lines: Text lines to append to, as a list of str.
      node_name: Name of the node, as a str. This arg is updated during the
        recursion.
      tracker: A callable that takes one str as the node name input and
        returns a list of str as the inputs/outputs.
        This makes it this function general enough to be used with both
        node-input and node-output tracking.
      max_depth: Maximum recursion depth, as an int.
      depth: Current recursion depth. This arg is updated during the
        recursion.
      unfinished: A stack of unfinished recursion depths, as a list of int.
      include_control: Whether control dependencies are to be included as
        inputs (and marked as such).
      show_op_type: Whether op type of the input nodes are to be displayed
        alongside the nodes' names.
    """

    # Make a shallow copy of the list because it may be extended later.
    all_inputs = copy.copy(tracker(node_name, is_control=False))
    is_ctrl = [False] * len(all_inputs)
    if include_control:
      # Sort control inputs or recipients in in alphabetical order of the node
      # names.
      ctrl_inputs = sorted(tracker(node_name, is_control=True))
      all_inputs.extend(ctrl_inputs)
      is_ctrl.extend([True] * len(ctrl_inputs))

    if not all_inputs:
      if depth == 1:
        lines.append("  [None]")

      return

    unfinished.append(depth)

    # Create depth-dependent hanging indent for the line.
    hang = ""
    for k in xrange(depth):
      if k < depth - 1:
        if k + 1 in unfinished:
          hang += HANG_UNFINISHED
        else:
          hang += HANG_FINISHED
      else:
        hang += HANG_SUFFIX

    if all_inputs and depth > max_depth:
      lines.append(hang + ELLIPSIS)
      unfinished.pop()
      return

    hang += DEPTH_TEMPLATE % depth

    for i in xrange(len(all_inputs)):
      inp = all_inputs[i]
      if is_ctrl[i]:
        ctrl_str = CTRL_LABEL
      else:
        ctrl_str = ""

      op_type_str = ""
      if show_op_type:
        op_type_str = OP_TYPE_TEMPLATE % self._debug_dump.node_op_type(inp)

      if i == len(all_inputs) - 1:
        unfinished.pop()

      lines.append(hang + ctrl_str + op_type_str + inp)

      # Recursive call.
      # The input's/output's name can be a tensor name, in the case of node
      # with >1 output slots.
      inp_node_name, _ = debug_data.parse_node_or_tensor_name(inp)
      self._dfs_from_node(
          lines,
          inp_node_name,
          tracker,
          max_depth,
          depth + 1,
          unfinished,
          include_control=include_control,
          show_op_type=show_op_type)
  def _list_inputs_or_outputs(self,
                              recursive,
                              node_name,
                              depth,
                              control,
                              op_type,
                              do_outputs=False):
    """Helper function used by list_inputs and list_outputs.

    Format a list of lines to display the inputs or output recipients of a
    given node.

    Args:
      recursive: Whether the listing is to be done recursively, as a boolean.
      node_name: The name of the node in question, as a str.
      depth: Maximum recursion depth, applies only if recursive == True, as an
        int.
      control: Whether control inputs or control recipients are included, as a
        boolean.
      op_type: Whether the op types of the nodes are to be included, as a
        boolean.
      do_outputs: Whether recipients, instead of input nodes are to be
        listed, as a boolean.

    Returns:
      Input or recipient tree formatted as a RichTextLines object.
    """

    if do_outputs:
      tracker = self._debug_dump.node_recipients
      type_str = "Recipients of"
      short_type_str = "recipients"
    else:
      tracker = self._debug_dump.node_inputs
      type_str = "Inputs to"
      short_type_str = "inputs"

    lines = []

    # Check if this is a tensor name, instead of a node name.
    node_name, _ = debug_data.parse_node_or_tensor_name(node_name)

    # Check if node exists.
    if not self._debug_dump.node_exists(node_name):
      return cli_shared.error(
          "There is no node named \"%s\" in the partition graphs" % node_name)

    if recursive:
      max_depth = depth
    else:
      max_depth = 1

    if control:
      include_ctrls_str = ", control %s included" % short_type_str
    else:
      include_ctrls_str = ""

    lines.append("%s node \"%s\" (Depth limit = %d%s):" %
                 (type_str, node_name, max_depth, include_ctrls_str))

    self._dfs_from_node(lines, node_name, tracker, max_depth, 1, [], control,
                        op_type)

    # Include legend.
    lines.append("")
    lines.append("Legend:")
    lines.append("  (d): recursion depth = d.")

    if control:
      lines.append("  (Ctrl): Control input.")
    if op_type:
      lines.append("  [Op]: Input node has op type Op.")

    # TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.

    return debugger_cli_common.RichTextLines(lines)
Exemple #9
0
    def print_tensor(self, args, screen_info=None):
        """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

        parsed = self._arg_parsers["print_tensor"].parse_args(args)

        if screen_info and "cols" in screen_info:
            np_printoptions = {"linewidth": screen_info["cols"]}
        else:
            np_printoptions = {}

        # Determine if any range-highlighting is required.
        highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

        tensor_name, tensor_slicing = (
            command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

        node_name, output_slot = debug_data.parse_node_or_tensor_name(
            tensor_name)
        if (self._debug_dump.loaded_partition_graphs()
                and not self._debug_dump.node_exists(node_name)):
            output = cli_shared.error(
                "Node \"%s\" does not exist in partition graphs" % node_name)
            _add_main_menu(output,
                           node_name=None,
                           enable_list_tensors=True,
                           enable_print_tensor=False)
            return output

        watch_keys = self._debug_dump.debug_watch_keys(node_name)
        if output_slot is None:
            output_slots = set()
            for watch_key in watch_keys:
                output_slots.add(int(watch_key.split(":")[1]))

            if len(output_slots) == 1:
                # There is only one dumped tensor from this node, so there is no
                # ambiguity. Proceed to show the only dumped tensor.
                output_slot = list(output_slots)[0]
            else:
                # There are more than one dumped tensors from this node. Indicate as
                # such.
                # TODO(cais): Provide an output screen with command links for
                # convenience.
                lines = [
                    "Node \"%s\" generated debug dumps from %s output slots:" %
                    (node_name, len(output_slots)),
                    "Please specify the output slot: %s:x." % node_name
                ]
                output = debugger_cli_common.RichTextLines(lines)
                _add_main_menu(output,
                               node_name=node_name,
                               enable_list_tensors=True,
                               enable_print_tensor=False)
                return output

        # Find debug dump data that match the tensor name (node name + output
        # slot).
        matching_data = []
        for watch_key in watch_keys:
            debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
            for datum in debug_tensor_data:
                if datum.output_slot == output_slot:
                    matching_data.append(datum)

        if not matching_data:
            # No dump for this tensor.
            output = cli_shared.error(
                "Tensor \"%s\" did not generate any dumps." %
                parsed.tensor_name)
        elif len(matching_data) == 1:
            # There is only one dump for this tensor.
            if parsed.number <= 0:
                output = cli_shared.format_tensor(
                    matching_data[0].get_tensor(),
                    matching_data[0].watch_key,
                    np_printoptions,
                    print_all=parsed.print_all,
                    tensor_slicing=tensor_slicing,
                    highlight_options=highlight_options)
            else:
                output = cli_shared.error(
                    "Invalid number (%d) for tensor %s, which generated one dump."
                    % (parsed.number, parsed.tensor_name))

            _add_main_menu(output,
                           node_name=node_name,
                           enable_print_tensor=False)
        else:
            # There are more than one dumps for this tensor.
            if parsed.number < 0:
                lines = [
                    "Tensor \"%s\" generated %d dumps:" %
                    (parsed.tensor_name, len(matching_data))
                ]

                for i, datum in enumerate(matching_data):
                    rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
                    lines.append("#%d [%.3f ms] %s" %
                                 (i, rel_time, datum.watch_key))

                lines.append("")
                lines.append(
                    "Use the -n (--number) flag to specify which dump to print."
                )
                lines.append("For example:")
                lines.append("  print_tensor %s -n 0" % parsed.tensor_name)

                output = debugger_cli_common.RichTextLines(lines)
            elif parsed.number >= len(matching_data):
                output = cli_shared.error(
                    "Specified number (%d) exceeds the number of available dumps "
                    "(%d) for tensor %s" %
                    (parsed.number, len(matching_data), parsed.tensor_name))
            else:
                output = cli_shared.format_tensor(
                    matching_data[parsed.number].get_tensor(),
                    matching_data[parsed.number].watch_key +
                    " (dump #%d)" % parsed.number,
                    np_printoptions,
                    print_all=parsed.print_all,
                    tensor_slicing=tensor_slicing,
                    highlight_options=highlight_options)
            _add_main_menu(output,
                           node_name=node_name,
                           enable_print_tensor=False)

        return output
  def node_info(self, args, screen_info=None):
    """Command handler for node_info.

    Query information about a given node.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    # TODO(cais): Add annotation of substrings for node names, to facilitate
    # on-screen highlighting/selection of node names.
    _ = screen_info

    parsed = self._arg_parsers["node_info"].parse_args(args)

    # Get a node name, regardless of whether the input is a node name (without
    # output slot attached) or a tensor name (with output slot attached).
    node_name, unused_slot = debug_data.parse_node_or_tensor_name(
        parsed.node_name)

    if not self._debug_dump.node_exists(node_name):
      return cli_shared.error(
          "There is no node named \"%s\" in the partition graphs" % node_name)

    # TODO(cais): Provide UI glossary feature to explain to users what the
    # term "partition graph" means and how it is related to TF graph objects
    # in Python. The information can be along the line of:
    # "A tensorflow graph defined in Python is stripped of unused ops
    # according to the feeds and fetches and divided into a number of
    # partition graphs that may be distributed among multiple devices and
    # hosts. The partition graphs are what's actually executed by the C++
    # runtime during a run() call."

    lines = ["Node %s" % node_name]
    lines.append("")
    lines.append("  Op: %s" % self._debug_dump.node_op_type(node_name))
    lines.append("  Device: %s" % self._debug_dump.node_device(node_name))

    # List node inputs (non-control and control).
    inputs = self._debug_dump.node_inputs(node_name)
    ctrl_inputs = self._debug_dump.node_inputs(node_name, is_control=True)

    input_lines = self._format_neighbors("input", inputs, ctrl_inputs)
    lines.extend(input_lines)

    # List node output recipients (non-control and control).
    recs = self._debug_dump.node_recipients(node_name)
    ctrl_recs = self._debug_dump.node_recipients(node_name, is_control=True)

    rec_lines = self._format_neighbors("recipient", recs, ctrl_recs)
    lines.extend(rec_lines)

    # Optional: List attributes of the node.
    if parsed.attributes:
      lines.extend(self._list_node_attributes(node_name))

    # Optional: List dumps available from the node.
    if parsed.dumps:
      lines.extend(self._list_node_dumps(node_name))

    return debugger_cli_common.RichTextLines(lines)
Exemple #11
0
    def _dfs_from_node(self,
                       lines,
                       node_name,
                       tracker,
                       max_depth,
                       depth,
                       unfinished,
                       include_control=False,
                       show_op_type=False):
        """Perform depth-first search (DFS) traversal of a node's input tree.

    Args:
      lines: Text lines to append to, as a list of str.
      node_name: Name of the node, as a str. This arg is updated during the
        recursion.
      tracker: A callable that takes one str as the node name input and
        returns a list of str as the inputs/outputs.
        This makes it this function general enough to be used with both
        node-input and node-output tracking.
      max_depth: Maximum recursion depth, as an int.
      depth: Current recursion depth. This arg is updated during the
        recursion.
      unfinished: A stack of unfinished recursion depths, as a list of int.
      include_control: Whether control dependencies are to be included as
        inputs (and marked as such).
      show_op_type: Whether op type of the input nodes are to be displayed
        alongside the the nodes' names.
    """

        # Make a shallow copy of the list because it may be extended later.
        all_inputs = copy.copy(tracker(node_name, is_control=False))
        is_ctrl = [False] * len(all_inputs)
        if include_control:
            # Sort control inputs or recipients in in alphabetical order of the node
            # names.
            ctrl_inputs = sorted(tracker(node_name, is_control=True))
            all_inputs.extend(ctrl_inputs)
            is_ctrl.extend([True] * len(ctrl_inputs))

        if not all_inputs:
            if depth == 1:
                lines.append("  [None]")

            return

        unfinished.append(depth)

        # Create depth-dependent hanging indent for the line.
        hang = ""
        for k in xrange(depth):
            if k < depth - 1:
                if k + 1 in unfinished:
                    hang += HANG_UNFINISHED
                else:
                    hang += HANG_FINISHED
            else:
                hang += HANG_SUFFIX

        if all_inputs and depth > max_depth:
            lines.append(hang + ELLIPSIS)
            unfinished.pop()
            return

        hang += DEPTH_TEMPLATE % depth

        for i in xrange(len(all_inputs)):
            inp = all_inputs[i]
            if is_ctrl[i]:
                ctrl_str = CTRL_LABEL
            else:
                ctrl_str = ""

            op_type_str = ""
            if show_op_type:
                op_type_str = OP_TYPE_TEMPLATE % self._debug_dump.node_op_type(
                    inp)

            if i == len(all_inputs) - 1:
                unfinished.pop()

            lines.append(hang + ctrl_str + op_type_str + inp)

            # Recursive call.
            # The input's/output's name can be a tensor name, in the case of node
            # with >1 output slots.
            inp_node_name, _ = debug_data.parse_node_or_tensor_name(inp)
            self._dfs_from_node(lines,
                                inp_node_name,
                                tracker,
                                max_depth,
                                depth + 1,
                                unfinished,
                                include_control=include_control,
                                show_op_type=show_op_type)
Exemple #12
0
    def _list_inputs_or_outputs(self,
                                recursive,
                                node_name,
                                depth,
                                control,
                                op_type,
                                do_outputs=False):
        """Helper function used by list_inputs and list_outputs.

    Format a list of lines to display the inputs or output recipients of a
    given node.

    Args:
      recursive: Whether the listing is to be done recursively, as a boolean.
      node_name: The name of the node in question, as a str.
      depth: Maximum recursion depth, applies only if recursive == True, as an
        int.
      control: Whether control inputs or control recipients are included, as a
        boolean.
      op_type: Whether the op types of the nodes are to be included, as a
        boolean.
      do_outputs: Whether recipients, instead of input nodes are to be
        listed, as a boolean.

    Returns:
      Input or recipient tree formatted as a RichTextLines object.
    """

        if do_outputs:
            tracker = self._debug_dump.node_recipients
            type_str = "Recipients of"
            short_type_str = "recipients"
        else:
            tracker = self._debug_dump.node_inputs
            type_str = "Inputs to"
            short_type_str = "inputs"

        lines = []

        # Check if this is a tensor name, instead of a node name.
        node_name, _ = debug_data.parse_node_or_tensor_name(node_name)

        # Check if node exists.
        if not self._debug_dump.node_exists(node_name):
            return self._error(
                "There is no node named \"%s\" in the partition graphs" %
                node_name)

        if recursive:
            max_depth = depth
        else:
            max_depth = 1

        if control:
            include_ctrls_str = ", control %s included" % short_type_str
        else:
            include_ctrls_str = ""

        lines.append("%s node \"%s\" (Depth limit = %d%s):" %
                     (type_str, node_name, max_depth, include_ctrls_str))

        self._dfs_from_node(lines, node_name, tracker, max_depth, 1, [],
                            control, op_type)

        # Include legend.
        lines.append("")
        lines.append("Legend:")
        lines.append("  (d): recursion depth = d.")

        if control:
            lines.append("  (Ctrl): Control input.")
        if op_type:
            lines.append("  [Op]: Input node has op type Op.")

        # TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.

        return debugger_cli_common.RichTextLines(lines)
Exemple #13
0
    def print_tensor(self, args, screen_info=None):
        """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

        parsed = self._arg_parsers["print_tensor"].parse_args(args)

        if screen_info and "cols" in screen_info:
            np_printoptions = {"linewidth": screen_info["cols"]}
        else:
            np_printoptions = {}

        # Determine if there parsed.tensor_name contains any indexing (slicing).
        if parsed.tensor_name.count("[") == 1 and parsed.tensor_name.endswith(
                "]"):
            tensor_name = parsed.tensor_name[:parsed.tensor_name.index("[")]
            tensor_slicing = parsed.tensor_name[parsed.tensor_name.index("["):]
        else:
            tensor_name = parsed.tensor_name
            tensor_slicing = ""

        node_name, output_slot = debug_data.parse_node_or_tensor_name(
            tensor_name)
        if output_slot is None:
            return self._error("\"%s\" is not a valid tensor name" %
                               parsed.tensor_name)

        if not self._debug_dump.node_exists(node_name):
            return self._error(
                "Node \"%s\" does not exist in partition graphs" % node_name)

        watch_keys = self._debug_dump.debug_watch_keys(node_name)

        # Find debug dump data that match the tensor name (node name + output
        # slot).
        matching_data = []
        for watch_key in watch_keys:
            debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
            for datum in debug_tensor_data:
                if datum.output_slot == output_slot:
                    matching_data.append(datum)

        if not matching_data:
            # No dump for this tensor.
            return self._error("Tensor \"%s\" did not generate any dumps." %
                               parsed.tensor_name)
        elif len(matching_data) == 1:
            # There is only one dump for this tensor.
            if parsed.number <= 0:
                return self._format_tensor(matching_data[0].get_tensor(),
                                           matching_data[0].watch_key,
                                           np_printoptions,
                                           print_all=parsed.print_all,
                                           tensor_slicing=tensor_slicing)
            else:
                return self._error(
                    "Invalid number (%d) for tensor %s, which generated one dump."
                    % (parsed.number, parsed.tensor_name))
        else:
            # There are more than one dumps for this tensor.
            if parsed.number < 0:
                lines = [
                    "Tensor \"%s\" generated %d dumps:" %
                    (parsed.tensor_name, len(matching_data))
                ]

                for i, datum in enumerate(matching_data):
                    rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
                    lines.append("#%d [%.3f ms] %s" %
                                 (i, rel_time, datum.watch_key))

                lines.append("")
                lines.append(
                    "Use the -n (--number) flag to specify which dump to print."
                )
                lines.append("For example:")
                lines.append("  print_tensor %s -n 0" % parsed.tensor_name)

                return debugger_cli_common.RichTextLines(lines)
            elif parsed.number >= len(matching_data):
                return self._error(
                    "Specified number (%d) exceeds the number of available dumps "
                    "(%d) for tensor %s" %
                    (parsed.number, len(matching_data), parsed.tensor_name))
            else:
                return self._format_tensor(
                    matching_data[parsed.number].get_tensor(),
                    matching_data[parsed.number].watch_key +
                    " (dump #%d)" % parsed.number,
                    np_printoptions,
                    print_all=parsed.print_all,
                    tensor_slicing=tensor_slicing)
Exemple #14
0
    def node_info(self, args, screen_info=None):
        """Command handler for node_info.

    Query information about a given node.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

        # TODO(cais): Add annotation of substrings for node names, to facilitate
        # on-screen highlighting/selection of node names.
        _ = screen_info

        parsed = self._arg_parsers["node_info"].parse_args(args)

        # Get a node name, regardless of whether the input is a node name (without
        # output slot attached) or a tensor name (with output slot attached).
        node_name, unused_slot = debug_data.parse_node_or_tensor_name(
            parsed.node_name)

        if not self._debug_dump.node_exists(node_name):
            return self._error(
                "There is no node named \"%s\" in the partition graphs" %
                node_name)

        # TODO(cais): Provide UI glossary feature to explain to users what the
        # term "partition graph" means and how it is related to TF graph objects
        # in Python. The information can be along the line of:
        # "A tensorflow graph defined in Python is stripped of unused ops
        # according to the feeds and fetches and divided into a number of
        # partition graphs that may be distributed among multiple devices and
        # hosts. The partition graphs are what's actually executed by the C++
        # runtime during a run() call."

        lines = ["Node %s" % node_name]
        lines.append("")
        lines.append("  Op: %s" % self._debug_dump.node_op_type(node_name))
        lines.append("  Device: %s" % self._debug_dump.node_device(node_name))

        # List node inputs (non-control and control).
        inputs = self._debug_dump.node_inputs(node_name)
        ctrl_inputs = self._debug_dump.node_inputs(node_name, is_control=True)

        input_lines = self._format_neighbors("input", inputs, ctrl_inputs)
        lines.extend(input_lines)

        # List node output recipients (non-control and control).
        recs = self._debug_dump.node_recipients(node_name)
        ctrl_recs = self._debug_dump.node_recipients(node_name,
                                                     is_control=True)

        rec_lines = self._format_neighbors("recipient", recs, ctrl_recs)
        lines.extend(rec_lines)

        # Optional: List attributes of the node.
        if parsed.attributes:
            lines.extend(self._list_node_attributes(node_name))

        # Optional: List dumps available from the node.
        if parsed.dumps:
            lines.extend(self._list_node_dumps(node_name))

        return debugger_cli_common.RichTextLines(lines)
Exemple #15
0
  def print_tensor(self, args, screen_info=None):
    """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    parsed = self._arg_parsers["print_tensor"].parse_args(args)

    if screen_info and "cols" in screen_info:
      np_printoptions = {"linewidth": screen_info["cols"]}
    else:
      np_printoptions = {}

    # Determine if any range-highlighting is required.
    highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

    tensor_name, tensor_slicing = (
        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

    node_name, output_slot = debug_data.parse_node_or_tensor_name(tensor_name)
    if (self._debug_dump.loaded_partition_graphs() and
        not self._debug_dump.node_exists(node_name)):
      output = cli_shared.error(
          "Node \"%s\" does not exist in partition graphs" % node_name)
      _add_main_menu(
          output,
          node_name=None,
          enable_list_tensors=True,
          enable_print_tensor=False)
      return output

    watch_keys = self._debug_dump.debug_watch_keys(node_name)
    if output_slot is None:
      output_slots = set()
      for watch_key in watch_keys:
        output_slots.add(int(watch_key.split(":")[1]))

      if len(output_slots) == 1:
        # There is only one dumped tensor from this node, so there is no
        # ambiguity. Proceed to show the only dumped tensor.
        output_slot = list(output_slots)[0]
      else:
        # There are more than one dumped tensors from this node. Indicate as
        # such.
        # TODO(cais): Provide an output screen with command links for
        # convenience.
        lines = [
            "Node \"%s\" generated debug dumps from %s output slots:" %
            (node_name, len(output_slots)),
            "Please specify the output slot: %s:x." % node_name
        ]
        output = debugger_cli_common.RichTextLines(lines)
        _add_main_menu(
            output,
            node_name=node_name,
            enable_list_tensors=True,
            enable_print_tensor=False)
        return output

    # Find debug dump data that match the tensor name (node name + output
    # slot).
    matching_data = []
    for watch_key in watch_keys:
      debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
      for datum in debug_tensor_data:
        if datum.output_slot == output_slot:
          matching_data.append(datum)

    if not matching_data:
      # No dump for this tensor.
      output = cli_shared.error("Tensor \"%s\" did not generate any dumps." %
                                parsed.tensor_name)
    elif len(matching_data) == 1:
      # There is only one dump for this tensor.
      if parsed.number <= 0:
        output = cli_shared.format_tensor(
            matching_data[0].get_tensor(),
            matching_data[0].watch_key,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)
      else:
        output = cli_shared.error(
            "Invalid number (%d) for tensor %s, which generated one dump." %
            (parsed.number, parsed.tensor_name))

      _add_main_menu(output, node_name=node_name, enable_print_tensor=False)
    else:
      # There are more than one dumps for this tensor.
      if parsed.number < 0:
        lines = [
            "Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
                                                   len(matching_data))
        ]
        font_attr_segs = {}

        for i, datum in enumerate(matching_data):
          rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
          lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))
          command = "print_tensor %s -n %d" % (parsed.tensor_name, i)
          font_attr_segs[len(lines) - 1] = [(
              len(lines[-1]) - len(datum.watch_key), len(lines[-1]),
              debugger_cli_common.MenuItem(None, command))]

        lines.append("")
        lines.append(
            "You can use the -n (--number) flag to specify which dump to "
            "print.")
        lines.append("For example:")
        lines.append("  print_tensor %s -n 0" % parsed.tensor_name)

        output = debugger_cli_common.RichTextLines(
            lines, font_attr_segs=font_attr_segs)
      elif parsed.number >= len(matching_data):
        output = cli_shared.error(
            "Specified number (%d) exceeds the number of available dumps "
            "(%d) for tensor %s" %
            (parsed.number, len(matching_data), parsed.tensor_name))
      else:
        output = cli_shared.format_tensor(
            matching_data[parsed.number].get_tensor(),
            matching_data[parsed.number].watch_key + " (dump #%d)" %
            parsed.number,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)
      _add_main_menu(output, node_name=node_name, enable_print_tensor=False)

    return output
Exemple #16
0
  def testParseNodeName(self):
    node_name, slot = debug_data.parse_node_or_tensor_name("namespace1/node_1")

    self.assertEqual("namespace1/node_1", node_name)
    self.assertIsNone(slot)
Exemple #17
0
  def testParseTensorName(self):
    node_name, slot = debug_data.parse_node_or_tensor_name(
        "namespace1/node_2:3")

    self.assertEqual("namespace1/node_2", node_name)
    self.assertEqual(3, slot)
  def print_tensor(self, args, screen_info=None):
    """Command handler for print_tensor.

    Print value of a given dumped tensor.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    parsed = self._arg_parsers["print_tensor"].parse_args(args)

    if screen_info and "cols" in screen_info:
      np_printoptions = {"linewidth": screen_info["cols"]}
    else:
      np_printoptions = {}

    # Determine if any range-highlighting is required.
    highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)

    tensor_name, tensor_slicing = (
        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))

    node_name, output_slot = debug_data.parse_node_or_tensor_name(tensor_name)
    if output_slot is None:
      return cli_shared.error("\"%s\" is not a valid tensor name" %
                              parsed.tensor_name)

    if (self._debug_dump.loaded_partition_graphs() and
        not self._debug_dump.node_exists(node_name)):
      return cli_shared.error(
          "Node \"%s\" does not exist in partition graphs" % node_name)

    watch_keys = self._debug_dump.debug_watch_keys(node_name)

    # Find debug dump data that match the tensor name (node name + output
    # slot).
    matching_data = []
    for watch_key in watch_keys:
      debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
      for datum in debug_tensor_data:
        if datum.output_slot == output_slot:
          matching_data.append(datum)

    if not matching_data:
      # No dump for this tensor.
      return cli_shared.error(
          "Tensor \"%s\" did not generate any dumps." % parsed.tensor_name)
    elif len(matching_data) == 1:
      # There is only one dump for this tensor.
      if parsed.number <= 0:
        return cli_shared.format_tensor(
            matching_data[0].get_tensor(),
            matching_data[0].watch_key,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)
      else:
        return cli_shared.error(
            "Invalid number (%d) for tensor %s, which generated one dump." %
            (parsed.number, parsed.tensor_name))
    else:
      # There are more than one dumps for this tensor.
      if parsed.number < 0:
        lines = [
            "Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
                                                   len(matching_data))
        ]

        for i, datum in enumerate(matching_data):
          rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
          lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))

        lines.append("")
        lines.append(
            "Use the -n (--number) flag to specify which dump to print.")
        lines.append("For example:")
        lines.append("  print_tensor %s -n 0" % parsed.tensor_name)

        return debugger_cli_common.RichTextLines(lines)
      elif parsed.number >= len(matching_data):
        return cli_shared.error(
            "Specified number (%d) exceeds the number of available dumps "
            "(%d) for tensor %s" %
            (parsed.number, len(matching_data), parsed.tensor_name))
      else:
        return cli_shared.format_tensor(
            matching_data[parsed.number].get_tensor(),
            matching_data[parsed.number].watch_key + " (dump #%d)" %
            parsed.number,
            np_printoptions,
            print_all=parsed.print_all,
            tensor_slicing=tensor_slicing,
            highlight_options=highlight_options)