Ejemplo n.º 1
0
    def BenchmarkSmokeTest(self):
        # Some benchmarks are running multiple iterations
        # which is not needed for a smoke test
        if hasattr(benchmark_class, 'enable_smoke_test_mode'):
            benchmark_class.enable_smoke_test_mode = True

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            options = testing.GetRunOptions(
                output_dir=temp_dir,
                benchmark_cls=benchmark_class,
                overrides={'story_shard_end_index': num_pages},
                environment=chromium_config.GetDefaultChromiumConfig())
            options.pageset_repeat = 1  # For smoke testing only run the page once.
            options.output_formats = ['histograms']
            options.max_values_per_test_case = MAX_VALUES_PER_TEST_CASE
            results_processor.ProcessOptions(options)

            return_code = benchmark_class().Run(options)
            # TODO(crbug.com/1019139): Make 111 be the exit code that means
            # "no stories were run.".
            if return_code in (-1, 111):
                self.skipTest('The benchmark was not run.')
            self.assertEqual(return_code,
                             0,
                             msg='Benchmark run failed: %s' %
                             benchmark_class.Name())
            return_code = results_processor.ProcessResults(options)
            self.assertEqual(return_code,
                             0,
                             msg='Result processing failed: %s' %
                             benchmark_class.Name())
Ejemplo n.º 2
0
def main(config, args=None):
  options = command_line.ParseArgs(
      environment=config, args=args,
      results_arg_parser=results_processor.ArgumentParser())
  results_processor.ProcessOptions(options)
  run_return_code = command_line.RunCommand(options)
  process_return_code = results_processor.ProcessResults(options)
  return max(run_return_code, process_return_code)
Ejemplo n.º 3
0
def ProcessResults(options):
    """Collect generated results and call results_processor to compute results."""
    _MergeResultsJson(_GetTraceDir(options),
                      os.path.join(options.intermediate_dir, MERGED_RESULTS))
    process_return_code = results_processor.ProcessResults(options)
    if process_return_code != 0:
        return process_return_code
    expected_perf_filename = os.path.join(options.output_dir,
                                          'histograms.json')
    output_perf_results = os.path.join(options.output_dir, 'perf_results.json')
    shutil.move(expected_perf_filename, output_perf_results)
    return process_return_code
Ejemplo n.º 4
0
def main(config, args=None):
    results_arg_parser = results_processor.ArgumentParser()
    options = command_line.ParseArgs(environment=config,
                                     args=args,
                                     results_arg_parser=results_arg_parser)
    results_processor.ProcessOptions(options)
    run_return_code = command_line.RunCommand(options)
    process_return_code = results_processor.ProcessResults(options)
    if process_return_code != 0:
        return process_return_code
    else:
        return run_return_code
Ejemplo n.º 5
0
    def RunTest(self):
        class SinglePageBenchmark(benchmark_class):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)
                stories_to_remove = [
                    s for s in story_set.stories if s != story_to_smoke_test
                ]
                for s in stories_to_remove:
                    story_set.RemoveStory(s)
                assert story_set.stories
                return story_set

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            # Set the benchmark's default arguments.
            options = GenerateBenchmarkOptions(
                output_dir=temp_dir, benchmark_cls=SinglePageBenchmark)
            simplified_test_name = self.id().replace(
                'benchmarks.system_health_smoke_test.SystemHealthBenchmarkSmokeTest.',
                '')
            # Sanity check to ensure that that substring removal was effective.
            assert len(simplified_test_name) < len(self.id())

            if (simplified_test_name in _DISABLED_TESTS
                    and not options.run_disabled_tests):
                self.skipTest('Test is explicitly disabled')
            single_page_benchmark = SinglePageBenchmark()
            return_code = single_page_benchmark.Run(options)
            # TODO(crbug.com/1019139): Make 111 be the exit code that means
            # "no stories were run.".
            if return_code in (-1, 111):
                self.skipTest('The benchmark was not run.')
            self.assertEqual(return_code,
                             0,
                             msg='Benchmark run failed: %s' %
                             benchmark_class.Name())
            return_code = results_processor.ProcessResults(options)
            self.assertEqual(return_code,
                             0,
                             msg='Result processing failed: %s' %
                             benchmark_class.Name())
Ejemplo n.º 6
0
  def RunBenchmark(self, benchmark_class):
    """Run a benchmark, process results, and return generated histograms."""
    # TODO(crbug.com/985712): Ideally we should be able to just call
    # telemetry.command_line.RunCommand(self.options) with the right set
    # of options chosen. However, argument parsing and command running are
    # currently tangled in Telemetry. In particular the class property
    # Run._benchmark is not set when we skip argument parsing, and the Run.Run
    # method call fails. Simplify this when argument parsing and command
    # running are no longer intertwined like this.
    run_return_code = benchmark_class().Run(self.options)
    self.assertEqual(run_return_code, 0)

    process_return_code = results_processor.ProcessResults(self.options)
    self.assertEqual(process_return_code, 0)

    histograms_file = os.path.join(self.options.output_dir, 'histograms.json')
    self.assertTrue(os.path.exists(histograms_file))

    with open(histograms_file) as f:
      dicts = json.load(f)
    histograms = histogram_set.HistogramSet()
    histograms.ImportDicts(dicts)
    return histograms