def testReportingBenchmark(self):
        tempdir = tf.test.get_temp_dir()
        try:
            tf.gfile.MakeDirs(tempdir)
        except OSError as e:
            # It's OK if the directory already exists.
            if " exists:" not in str(e):
                raise e

        prefix = os.path.join(
            tempdir, "reporting_bench_%016x_" % random.getrandbits(64))
        expected_output_file = "%s%s" % (
            prefix, "TestReportingBenchmark.benchmarkReport1")
        expected_output_file_2 = "%s%s" % (
            prefix, "TestReportingBenchmark.custom_benchmark_name")
        try:
            self.assertFalse(tf.gfile.Exists(expected_output_file))
            # Run benchmark but without env, shouldn't write anything
            if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
                del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
            reporting = TestReportingBenchmark()
            reporting.benchmarkReport1(
            )  # This should run without writing anything
            self.assertFalse(tf.gfile.Exists(expected_output_file))

            # Runbenchmark with env, should write
            os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix

            reporting = TestReportingBenchmark()
            reporting.benchmarkReport1()  # This should write
            reporting.benchmarkReport2()  # This should write

            # Check the files were written
            self.assertTrue(tf.gfile.Exists(expected_output_file))
            self.assertTrue(tf.gfile.Exists(expected_output_file_2))

            # Check the contents are correct
            expected_1 = test_log_pb2.BenchmarkEntry()
            expected_1.name = "TestReportingBenchmark.benchmarkReport1"
            expected_1.iters = 1

            expected_2 = test_log_pb2.BenchmarkEntry()
            expected_2.name = "TestReportingBenchmark.custom_benchmark_name"
            expected_2.iters = 2
            expected_2.extras["number_key"].double_value = 3
            expected_2.extras["other_key"].string_value = "string"

            read_benchmark_1 = tf.gfile.GFile(expected_output_file, "r").read()
            read_benchmark_1 = text_format.Merge(read_benchmark_1,
                                                 test_log_pb2.BenchmarkEntry())
            self.assertProtoEquals(expected_1, read_benchmark_1)

            read_benchmark_2 = tf.gfile.GFile(expected_output_file_2,
                                              "r").read()
            read_benchmark_2 = text_format.Merge(read_benchmark_2,
                                                 test_log_pb2.BenchmarkEntry())
            self.assertProtoEquals(expected_2, read_benchmark_2)

        finally:
            tf.gfile.DeleteRecursively(tempdir)
Exemple #2
0
def _global_report_benchmark(name,
                             iters=None,
                             cpu_time=None,
                             wall_time=None,
                             throughput=None,
                             extras=None):
    """Method for recording a benchmark directly.

  Args:
    name: The BenchmarkEntry name.
    iters: (optional) How many iterations were run
    cpu_time: (optional) Total cpu time in seconds
    wall_time: (optional) Total wall time in seconds
    throughput: (optional) Throughput (in MB/s)
    extras: (optional) Dict mapping string keys to additional benchmark info.

  Raises:
    TypeError: if extras is not a dict.
    IOError: if the benchmark output file already exists.
  """
    if extras is not None:
        if not isinstance(extras, dict):
            raise TypeError("extras must be a dict")

    test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
    if test_env is None:
        # Reporting was not requested
        return

    entry = test_log_pb2.BenchmarkEntry()
    entry.name = name
    if iters is not None:
        entry.iters = iters
    if cpu_time is not None:
        entry.cpu_time = cpu_time
    if wall_time is not None:
        entry.wall_time = wall_time
    if throughput is not None:
        entry.throughput = throughput
    if extras is not None:
        for (k, v) in extras.items():
            if isinstance(v, numbers.Number):
                entry.extras[k].double_value = v
            else:
                entry.extras[k].string_value = str(v)

    serialized_entry = text_format.MessageToString(entry)

    mangled_name = name.replace("/", "__")
    output_path = "%s%s" % (test_env, mangled_name)
    if gfile.Exists(output_path):
        raise IOError("File already exists: %s" % output_path)
    with gfile.GFile(output_path, "w") as out:
        out.write(serialized_entry)
Exemple #3
0
  def testReportingBenchmark(self):
    tempdir = test.get_temp_dir()
    try:
      gfile.MakeDirs(tempdir)
    except OSError as e:
      # It's OK if the directory already exists.
      if " exists:" not in str(e):
        raise e

    prefix = os.path.join(tempdir,
                          "reporting_bench_%016x_" % random.getrandbits(64))
    expected_output_file = "%s%s" % (prefix,
                                     "TestReportingBenchmark.benchmarkReport1")
    expected_output_file_2 = "%s%s" % (
        prefix, "TestReportingBenchmark.custom_benchmark_name")
    expected_output_file_3 = "%s%s" % (prefix,
                                       "TestReportingBenchmark.op_benchmark")
    try:
      self.assertFalse(gfile.Exists(expected_output_file))
      # Run benchmark but without env, shouldn't write anything
      if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
        del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
      reporting = TestReportingBenchmark()
      reporting.benchmarkReport1()  # This should run without writing anything
      self.assertFalse(gfile.Exists(expected_output_file))

      # Runbenchmark with env, should write
      os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix

      reporting = TestReportingBenchmark()
      reporting.benchmarkReport1()  # This should write
      reporting.benchmarkReport2()  # This should write
      reporting.benchmark_times_an_op()  # This should write

      # Check the files were written
      self.assertTrue(gfile.Exists(expected_output_file))
      self.assertTrue(gfile.Exists(expected_output_file_2))
      self.assertTrue(gfile.Exists(expected_output_file_3))

      # Check the contents are correct
      expected_1 = test_log_pb2.BenchmarkEntry()
      expected_1.name = "TestReportingBenchmark.benchmarkReport1"
      expected_1.iters = 1

      expected_2 = test_log_pb2.BenchmarkEntry()
      expected_2.name = "TestReportingBenchmark.custom_benchmark_name"
      expected_2.iters = 2
      expected_2.extras["number_key"].double_value = 3
      expected_2.extras["other_key"].string_value = "string"

      expected_3 = test_log_pb2.BenchmarkEntry()
      expected_3.name = "TestReportingBenchmark.op_benchmark"
      expected_3.iters = 1000

      def read_benchmark_entry(f):
        s = gfile.GFile(f, "rb").read()
        entries = test_log_pb2.BenchmarkEntries.FromString(s)
        self.assertEquals(1, len(entries.entry))
        return entries.entry[0]

      read_benchmark_1 = read_benchmark_entry(expected_output_file)
      self.assertProtoEquals(expected_1, read_benchmark_1)

      read_benchmark_2 = read_benchmark_entry(expected_output_file_2)
      self.assertProtoEquals(expected_2, read_benchmark_2)

      read_benchmark_3 = read_benchmark_entry(expected_output_file_3)
      self.assertEquals(expected_3.name, read_benchmark_3.name)
      self.assertEquals(expected_3.iters, read_benchmark_3.iters)
      self.assertGreater(read_benchmark_3.wall_time, 0)
      full_trace = read_benchmark_3.extras["full_trace_chrome_format"]
      json_trace = json.loads(full_trace.string_value)
      self.assertTrue(isinstance(json_trace, dict))
      self.assertTrue("traceEvents" in json_trace.keys())

    finally:
      gfile.DeleteRecursively(tempdir)