コード例 #1
0
def process_benchmarks(log_files):
    benchmarks = test_log_pb2.BenchmarkEntries()
    for f in log_files:
        content = gfile.GFile(f, "rb").read()
        if benchmarks.MergeFromString(content) != len(content):
            raise Exception("Failed parsing benchmark entry from %s" % f)
    return benchmarks
コード例 #2
0
    def testReportBenchmark(self):
        output_dir = self.get_temp_dir() + os.path.sep
        os.environ['TEST_REPORT_FILE_PREFIX'] = output_dir
        proto_file_path = os.path.join(output_dir,
                                       'BenchmarkTest.testReportBenchmark')
        if os.path.exists(proto_file_path):
            os.remove(proto_file_path)

        self.report_benchmark(iters=2000,
                              wall_time=1000,
                              name='testReportBenchmark',
                              metrics=[{
                                  'name': 'metric_name_1',
                                  'value': 0,
                                  'min_value': 1
                              }, {
                                  'name': 'metric_name_2',
                                  'value': 90,
                                  'min_value': 0,
                                  'max_value': 95
                              }])

        with open(proto_file_path, 'rb') as f:
            benchmark_entries = test_log_pb2.BenchmarkEntries()
            benchmark_entries.ParseFromString(f.read())

            actual_result = json_format.MessageToDict(
                benchmark_entries,
                preserving_proto_field_name=True,
                including_default_value_fields=True)['entry'][0]
        os.remove(proto_file_path)

        expected_result = {
            'name':
            'BenchmarkTest.testReportBenchmark',
            # google.protobuf.json_format.MessageToDict() will convert
            # int64 field to string.
            'iters':
            '2000',
            'wall_time':
            1000,
            'cpu_time':
            0,
            'throughput':
            0,
            'extras': {},
            'metrics': [{
                'name': 'metric_name_1',
                'value': 0,
                'min_value': 1
            }, {
                'name': 'metric_name_2',
                'value': 90,
                'min_value': 0,
                'max_value': 95
            }]
        }

        self.assertEqual(2000, benchmark_entries.entry[0].iters)
        self.assertDictEqual(expected_result, actual_result)
コード例 #3
0
def process_benchmarks(log_files):
    benchmarks = test_log_pb2.BenchmarkEntries()
    for f in log_files:
        content = tf.gfile.GFile(f).read()
        entry = benchmarks.entry.add()
        text_format.Merge(content, entry)
    return benchmarks
コード例 #4
0
ファイル: benchmark.py プロジェクト: Utsal20/poGANmon
def _global_report_benchmark(
    name, iters=None, cpu_time=None, wall_time=None,
    throughput=None, extras=None):
  """Method for recording a benchmark directly.

  Args:
    name: The BenchmarkEntry name.
    iters: (optional) How many iterations were run
    cpu_time: (optional) Total cpu time in seconds
    wall_time: (optional) Total wall time in seconds
    throughput: (optional) Throughput (in MB/s)
    extras: (optional) Dict mapping string keys to additional benchmark info.

  Raises:
    TypeError: if extras is not a dict.
    IOError: if the benchmark output file already exists.
  """
  if extras is not None:
    if not isinstance(extras, dict):
      raise TypeError("extras must be a dict")

    logging.info("Benchmark [%s] iters: %d, wall_time: %g, cpu_time: %g,"
                 "throughput: %g %s", name, iters if iters is not None else -1,
                 wall_time if wall_time is not None else -1, cpu_time if
                 cpu_time is not None else -1, throughput if
                 throughput is not None else -1, str(extras) if extras else "")

  entries = test_log_pb2.BenchmarkEntries()
  entry = entries.entry.add()
  entry.name = name
  if iters is not None:
    entry.iters = iters
  if cpu_time is not None:
    entry.cpu_time = cpu_time
  if wall_time is not None:
    entry.wall_time = wall_time
  if throughput is not None:
    entry.throughput = throughput
  if extras is not None:
    for (k, v) in extras.items():
      if isinstance(v, numbers.Number):
        entry.extras[k].double_value = v
      else:
        entry.extras[k].string_value = str(v)

  test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
  if test_env is None:
    # Reporting was not requested, just print the proto
    print(str(entries))
    return

  serialized_entry = entries.SerializeToString()

  mangled_name = name.replace("/", "__")
  output_path = "%s%s" % (test_env, mangled_name)
  if gfile.Exists(output_path):
    raise IOError("File already exists: %s" % output_path)
  with gfile.GFile(output_path, "wb") as out:
    out.write(serialized_entry)
コード例 #5
0
ファイル: results.py プロジェクト: jackd/tfbm
def load_benchmarks(benchmarks_dir: str,
                    run_id: str = "") -> List[BenchmarkResult]:
    """Load benchmarks saved under `benchmarks_dir`."""
    filenames = tf.io.gfile.listdir(benchmarks_dir)
    entries = test_log_pb2.BenchmarkEntries()
    for filename in filenames:
        path = os.path.join(benchmarks_dir, filename)
        with tf.io.gfile.GFile(path, "rb") as fp:
            entries.MergeFromString(fp.read())
    return [
        BenchmarkResult.from_proto(e, run_id=run_id) for e in entries.entry  # pylint: disable=no-member
    ]
コード例 #6
0
ファイル: utils.py プロジェクト: huazai1992/zhu_test
def read_benchmark_result(benchmark_result_file_path):
    from google.protobuf import json_format
    from tensorflow.core.util import test_log_pb2

    if not os.path.isfile(benchmark_result_file_path):
        logging.error(
            'Failed to read benchmark result because file {} does not exist'.
            format(benchmark_result_file_path))
        return {}

    with open(benchmark_result_file_path, 'rb') as f:
        benchmark_entries = test_log_pb2.BenchmarkEntries()
        benchmark_entries.ParseFromString(f.read())

        return json_format.MessageToDict(
            benchmark_entries, preserving_proto_field_name=True)['entry'][0]
コード例 #7
0
def read_benchmark_result(benchmark_result_file_path):
  """Read benchmark result from the protobuf file."""
  from google.protobuf import json_format  # pylint: disable=g-import-not-at-top
  from tensorflow.core.util import test_log_pb2  # pylint: disable=g-import-not-at-top

  if not os.path.isfile(benchmark_result_file_path):
    logging.error('Failed to read benchmark result because '
                  'file %s does not exist', benchmark_result_file_path)
    return {}

  with open(benchmark_result_file_path, 'rb') as f:
    benchmark_entries = test_log_pb2.BenchmarkEntries()
    benchmark_entries.ParseFromString(f.read())

    return json_format.MessageToDict(
        benchmark_entries, preserving_proto_field_name=True)['entry'][0]
コード例 #8
0
def _global_report_benchmark(name,
                             iters=None,
                             cpu_time=None,
                             wall_time=None,
                             throughput=None,
                             extras=None,
                             metrics=None):
    """Method for recording a benchmark directly.

  Args:
    name: The BenchmarkEntry name.
    iters: (optional) How many iterations were run
    cpu_time: (optional) Total cpu time in seconds
    wall_time: (optional) Total wall time in seconds
    throughput: (optional) Throughput (in MB/s)
    extras: (optional) Dict mapping string keys to additional benchmark info.
    metrics: (optional) A list of dict representing metrics generated by the
      benchmark. Each dict should contain keys 'name' and'value'. A dict
      can optionally contain keys 'min_value' and 'max_value'.

  Raises:
    TypeError: if extras is not a dict.
    IOError: if the benchmark output file already exists.
  """
    logging.info(
        "Benchmark [%s] iters: %d, wall_time: %g, cpu_time: %g,"
        "throughput: %g, extras: %s, metrics: %s", name,
        iters if iters is not None else -1,
        wall_time if wall_time is not None else -1,
        cpu_time if cpu_time is not None else -1,
        throughput if throughput is not None else -1,
        str(extras) if extras else "None",
        str(metrics) if metrics else "None")

    entries = test_log_pb2.BenchmarkEntries()
    entry = entries.entry.add()
    entry.name = name
    if iters is not None:
        entry.iters = iters
    if cpu_time is not None:
        entry.cpu_time = cpu_time
    if wall_time is not None:
        entry.wall_time = wall_time
    if throughput is not None:
        entry.throughput = throughput
    if extras is not None:
        if not isinstance(extras, dict):
            raise TypeError("extras must be a dict")
        for (k, v) in extras.items():
            if isinstance(v, numbers.Number):
                entry.extras[k].double_value = v
            else:
                entry.extras[k].string_value = str(v)
    if metrics is not None:
        if not isinstance(metrics, list):
            raise TypeError("metrics must be a list")
        for metric in metrics:
            if "name" not in metric:
                raise TypeError("metric must has a 'name' field")
            if "value" not in metric:
                raise TypeError("metric must has a 'value' field")

            metric_entry = entry.metrics.add()
            metric_entry.name = metric["name"]
            metric_entry.value = metric["value"]
            if "min_value" in metric:
                metric_entry.min_value.value = metric["min_value"]
            if "max_value" in metric:
                metric_entry.max_value.value = metric["max_value"]

    test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
    if test_env is None:
        # Reporting was not requested, just print the proto
        print(str(entries))
        return

    serialized_entry = entries.SerializeToString()

    mangled_name = name.replace("/", "__")
    output_path = "%s%s" % (test_env, mangled_name)
    if gfile.Exists(output_path):
        raise IOError("File already exists: %s" % output_path)
    with gfile.GFile(output_path, "wb") as out:
        out.write(serialized_entry)