Exemple #1
0
 def testReadableStrIncludesBAtTheEndOnRequest(self):
     self.assertEqual("0B",
                      cli_shared.bytes_to_readable_str(0, include_b=True))
     self.assertEqual(
         "1.00kB", cli_shared.bytes_to_readable_str(1024, include_b=True))
     self.assertEqual(
         "1.00MB", cli_shared.bytes_to_readable_str(1024**2,
                                                    include_b=True))
     self.assertEqual(
         "1.00GB", cli_shared.bytes_to_readable_str(1024**3,
                                                    include_b=True))
 def testReadableStrIncludesBAtTheEndOnRequest(self):
   self.assertEqual("0B", cli_shared.bytes_to_readable_str(0, include_b=True))
   self.assertEqual(
       "1.00kB", cli_shared.bytes_to_readable_str(
           1024, include_b=True))
   self.assertEqual(
       "1.00MB", cli_shared.bytes_to_readable_str(
           1024**2, include_b=True))
   self.assertEqual(
       "1.00GB", cli_shared.bytes_to_readable_str(
           1024**3, include_b=True))
Exemple #3
0
  def _measure_tensor_list_column_widths(self, data):
    """Determine the maximum widths of the timestamp and op-type column.

    This method assumes that data is sorted in the default order, i.e.,
    by ascending timestamps.

    Args:
      data: (list of DebugTensorDaum) the data based on which the maximum
        column widths will be determined.

    Returns:
      (int) maximum width of the timestamp column. 0 if data is empty.
      (int) maximum width of the dump size column. 0 if data is empty.
      (int) maximum width of the op type column. 0 if data is empty.
    """

    max_timestamp_width = 0
    if data:
      max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0
      max_timestamp_width = len("[%.3f] " % max_rel_time_ms)

    max_dump_size_width = 0
    for dump in data:
      dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
      if len(dump_size_str) + 1 > max_dump_size_width:
        max_dump_size_width = len(dump_size_str) + 1

    max_op_type_width = 0
    for dump in data:
      op_type = self._debug_dump.node_op_type(dump.node_name)
      if len(op_type) > max_op_type_width:
        max_op_type_width = len(op_type)

    return max_timestamp_width, max_dump_size_width, max_op_type_width
Exemple #4
0
 def testSizeAboveOneGigaByteWorks(self):
     self.assertEqual("1.00G", cli_shared.bytes_to_readable_str(1024**3))
     self.assertEqual("2000.00G",
                      cli_shared.bytes_to_readable_str(1024**3 * 2000))
Exemple #5
0
 def testSizesBetweenOneMegaByteandOneGigaByteWorks(self):
     self.assertEqual("1.00M", cli_shared.bytes_to_readable_str(1024**2))
     self.assertEqual("2.40M",
                      cli_shared.bytes_to_readable_str(int(1024**2 * 2.4)))
     self.assertEqual("1023.00M",
                      cli_shared.bytes_to_readable_str(1024**2 * 1023))
Exemple #6
0
 def testSizesBelowOneKiloByteWorks(self):
     self.assertEqual("0", cli_shared.bytes_to_readable_str(0))
     self.assertEqual("500", cli_shared.bytes_to_readable_str(500))
     self.assertEqual("1023", cli_shared.bytes_to_readable_str(1023))
Exemple #7
0
 def testNoneSizeWorks(self):
     self.assertEqual(str(None), cli_shared.bytes_to_readable_str(None))
Exemple #8
0
 def testSizeAboveOneGigaByteWorks(self):
     self.assertEqual("1.00G", cli_shared.bytes_to_readable_str(1024 ** 3))
     self.assertEqual("2000.00G", cli_shared.bytes_to_readable_str(1024 ** 3 * 2000))
Exemple #9
0
 def testSizesBetweenOneMegaByteandOneGigaByteWorks(self):
     self.assertEqual("1.00M", cli_shared.bytes_to_readable_str(1024 ** 2))
     self.assertEqual("2.40M", cli_shared.bytes_to_readable_str(int(1024 ** 2 * 2.4)))
     self.assertEqual("1023.00M", cli_shared.bytes_to_readable_str(1024 ** 2 * 1023))
Exemple #10
0
 def testSizesBelowOneKiloByteWorks(self):
     self.assertEqual("0", cli_shared.bytes_to_readable_str(0))
     self.assertEqual("500", cli_shared.bytes_to_readable_str(500))
     self.assertEqual("1023", cli_shared.bytes_to_readable_str(1023))
Exemple #11
0
 def testNoneSizeWorks(self):
     self.assertEqual(str(None), cli_shared.bytes_to_readable_str(None))
Exemple #12
0
  def list_tensors(self, args, screen_info=None):
    """Command handler for list_tensors.

    List tensors dumped during debugged Session.run() call.

    Args:
      args: Command-line arguments, excluding the command prefix, as a list of
        str.
      screen_info: Optional dict input containing screen information such as
        cols.

    Returns:
      Output text lines as a RichTextLines object.
    """

    # TODO(cais): Add annotations of substrings for dumped tensor names, to
    # facilitate on-screen highlighting/selection of node names.
    _ = screen_info

    parsed = self._arg_parsers["list_tensors"].parse_args(args)

    output = []

    filter_strs = []
    if parsed.op_type_filter:
      op_type_regex = re.compile(parsed.op_type_filter)
      filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter)
    else:
      op_type_regex = None

    if parsed.node_name_filter:
      node_name_regex = re.compile(parsed.node_name_filter)
      filter_strs.append("Node name regex filter: \"%s\"" %
                         parsed.node_name_filter)
    else:
      node_name_regex = None

    output = debugger_cli_common.RichTextLines(filter_strs)
    output.append("")

    if parsed.tensor_filter:
      try:
        filter_callable = self.get_tensor_filter(parsed.tensor_filter)
      except ValueError:
        output = cli_shared.error("There is no tensor filter named \"%s\"." %
                                  parsed.tensor_filter)
        _add_main_menu(output, node_name=None, enable_list_tensors=False)
        return output

      data_to_show = self._debug_dump.find(filter_callable)
    else:
      data_to_show = self._debug_dump.dumped_tensor_data

    # TODO(cais): Implement filter by lambda on tensor value.

    max_timestamp_width, max_dump_size_width, max_op_type_width = (
        self._measure_tensor_list_column_widths(data_to_show))

    # Sort the data.
    data_to_show = self._sort_dump_data_by(
        data_to_show, parsed.sort_by, parsed.reverse)

    output.extend(
        self._tensor_list_column_heads(parsed, max_timestamp_width,
                                       max_dump_size_width, max_op_type_width))

    dump_count = 0
    for dump in data_to_show:
      if node_name_regex and not node_name_regex.match(dump.node_name):
        continue

      if op_type_regex:
        op_type = self._debug_dump.node_op_type(dump.node_name)
        if not op_type_regex.match(op_type):
          continue

      rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
      dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
      dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot)
      op_type = self._debug_dump.node_op_type(dump.node_name)

      line = "[%.3f]" % rel_time
      line += " " * (max_timestamp_width - len(line))
      line += dump_size_str
      line += " " * (max_timestamp_width + max_dump_size_width - len(line))
      line += op_type
      line += " " * (max_timestamp_width + max_dump_size_width +
                     max_op_type_width - len(line))
      line += " %s" % dumped_tensor_name

      output.append(
          line,
          font_attr_segs=[(
              len(line) - len(dumped_tensor_name), len(line),
              debugger_cli_common.MenuItem("", "pt %s" % dumped_tensor_name))])
      dump_count += 1

    if parsed.tensor_filter:
      output.prepend([
          "%d dumped tensor(s) passing filter \"%s\":" %
          (dump_count, parsed.tensor_filter)
      ])
    else:
      output.prepend(["%d dumped tensor(s):" % dump_count])

    _add_main_menu(output, node_name=None, enable_list_tensors=False)
    return output