def setUp(self):
        super(GTestProgressReporterTest, self).setUp()
        self._fake_timer = fakes.FakeTimer(gtest_progress_reporter)

        self._output_stream = StringIO.StringIO()
        self._reporter = gtest_progress_reporter.GTestProgressReporter(
            self._output_stream)
    def setUp(self):
        super(GTestProgressReporterTest, self).setUp()
        self._mock_timer = simple_mock.MockTimer(gtest_progress_reporter)

        self._output_stream = stream.TestOutputStream()
        self._reporter = gtest_progress_reporter.GTestProgressReporter(
            self._output_stream)
Esempio n. 3
0
  def __init__(self, progress_stream=None, output_dir=None,
               intermediate_dir=None, benchmark_name=None,
               benchmark_description=None, upload_bucket=None,
               results_label=None):
    """Object to hold story run results while a benchmark is executed.

    Args:
      progress_stream: A file-like object where to write progress reports as
          stories are being run. Can be None to suppress progress reporting.
      output_dir: A string specifying the directory where to store the test
          artifacts, e.g: trace, videos, etc.
      benchmark_name: A string with the name of the currently running benchmark.
      benchmark_description: A string with a description of the currently
          running benchmark.
      upload_bucket: A string identifting a cloud storage bucket where to
          upload artifacts.
      results_label: A string that serves as an identifier for the current
          benchmark run.
    """
    super(PageTestResults, self).__init__()
    self._progress_reporter = gtest_progress_reporter.GTestProgressReporter(
        progress_stream)
    self._output_dir = output_dir
    self._intermediate_dir = intermediate_dir
    if intermediate_dir is None and output_dir is not None:
      self._intermediate_dir = os.path.join(output_dir, 'artifacts')
    self._upload_bucket = upload_bucket

    self._current_story_run = None
    self._all_story_runs = []

    # This is used to validate that measurements accross story runs use units
    # consistently.
    self._measurement_units = {}

    self._benchmark_name = benchmark_name or '(unknown benchmark)'
    self._benchmark_description = benchmark_description or ''

    # |_interruption| is None if the benchmark has not been interrupted.
    # Otherwise it is a string explaining the reason for the interruption.
    # Interruptions occur for unrecoverable exceptions.
    self._interruption = None
    self._results_label = results_label

    self._diagnostics = {
        reserved_infos.BENCHMARKS.name: [self.benchmark_name],
        reserved_infos.BENCHMARK_DESCRIPTIONS.name:
            [self.benchmark_description],
    }

    # If the object has been finalized, no more results can be added to it.
    self._finalized = False
    self._start_time = time.time()
    self._results_stream = None
    if self._intermediate_dir is not None:
      if not os.path.exists(self._intermediate_dir):
        os.makedirs(self._intermediate_dir)
      self._results_stream = open(
          os.path.join(self._intermediate_dir, TEST_RESULTS), 'w')
Esempio n. 4
0
  def __init__(self, output_formatters=None, progress_stream=None,
               output_dir=None, should_add_value=None, benchmark_name=None,
               benchmark_description=None,
               upload_bucket=None, results_label=None):
    """
    Args:
      output_formatters: A list of output formatters. The output
          formatters are typically used to format the test results, such
          as CsvOutputFormatter, which output the test results as CSV.
      progress_stream: A file-like object where to write progress reports as
          stories are being run. Can be None to suppress progress reporting.
      output_dir: A string specifying the directory where to store the test
          artifacts, e.g: trace, videos, etc.
      should_add_value: A function that takes two arguments: a value name and
          a boolean (True when the value belongs to the first run of the
          corresponding story). It returns True if the value should be added
          to the test results and False otherwise.
      benchmark_name: A string with the name of the currently running benchmark.
      benchmark_description: A string with a description of the currently
          running benchmark.
      upload_bucket: A string identifting a cloud storage bucket where to
          upload artifacts.
      results_label: A string that serves as an identifier for the current
          benchmark run.
    """
    super(PageTestResults, self).__init__()
    self._progress_reporter = gtest_progress_reporter.GTestProgressReporter(
        progress_stream)
    self._output_formatters = (
        output_formatters if output_formatters is not None else [])
    self._output_dir = output_dir
    self._upload_bucket = upload_bucket
    if should_add_value is not None:
      self._should_add_value = should_add_value
    else:
      self._should_add_value = lambda v, is_first: True

    self._current_story_run = None
    self._all_story_runs = []
    self._all_stories = set()
    self._representative_value_for_each_value_name = {}
    self._all_summary_values = []

    self._histograms = histogram_set.HistogramSet()

    self._benchmark_name = benchmark_name or '(unknown benchmark)'
    self._benchmark_description = benchmark_description or ''
    self._benchmark_start_us = time.time() * 1e6
    # |_interruption| is None if the benchmark has not been interrupted.
    # Otherwise it is a string explaining the reason for the interruption.
    # Interruptions occur for unrecoverable exceptions.
    self._interruption = None
    self._results_label = results_label
    self._histogram_dicts_to_add = []
  def testOutputSkipInformation(self):
    test_story_set = _MakeStorySet()
    self._reporter = gtest_progress_reporter.GTestProgressReporter(
        self._output_stream, output_skipped_tests_summary=True)
    results = _MakePageTestResults(self._reporter)
    results.WillRunPage(test_story_set.stories[0])
    self._fake_timer.SetTime(0.007)
    results.Skip('Page skipped for testing reason')
    results.DidRunPage(test_story_set.stories[0])

    results.PrintSummary()
    expected = ('[ RUN      ] bench/http://www.foo.com/\n'
                '===== SKIPPING TEST http://www.foo.com/:'
                ' Page skipped for testing reason =====\n'
                '[  SKIPPED ] bench/http://www.foo.com/ (7 ms)\n'
                '[  PASSED  ] 0 tests.\n'
                '[  SKIPPED ] 1 test.\n'
                '\n'
                'Skipped pages:\n'
                'bench/http://www.foo.com/\n'
               )
    self.assertEquals(expected, ''.join(self._output_stream.getvalue()))
    def testOutputSkipInformation(self):
        test_story_set = _MakeStorySet()
        self._reporter = gtest_progress_reporter.GTestProgressReporter(
            self._output_stream, output_skipped_tests_summary=True)
        results = page_test_results.PageTestResults(
            progress_reporter=self._reporter)
        results.WillRunPage(test_story_set.stories[0])
        self._mock_timer.SetTime(0.007)
        results.AddValue(
            skip.SkipValue(test_story_set.stories[0],
                           'Page skipped for testing reason'))
        results.DidRunPage(test_story_set.stories[0])

        results.PrintSummary()
        expected = ('[ RUN      ] http://www.foo.com/\n'
                    '===== SKIPPING TEST http://www.foo.com/:'
                    ' Page skipped for testing reason =====\n'
                    '[       OK ] http://www.foo.com/ (7 ms)\n'
                    '[  PASSED  ] 1 test.\n'
                    '\n'
                    'Skipped pages:\n'
                    'http://www.foo.com/\n'
                    '\n')
        self.assertEquals(expected, ''.join(self._output_stream.output_data))
Esempio n. 7
0
    def __init__(self,
                 output_formatters=None,
                 progress_stream=None,
                 output_dir=None,
                 intermediate_dir=None,
                 benchmark_name=None,
                 benchmark_description=None,
                 upload_bucket=None,
                 results_label=None):
        """
    Args:
      output_formatters: A list of output formatters. The output
          formatters are typically used to format the test results, such
          as CsvOutputFormatter, which output the test results as CSV.
      progress_stream: A file-like object where to write progress reports as
          stories are being run. Can be None to suppress progress reporting.
      output_dir: A string specifying the directory where to store the test
          artifacts, e.g: trace, videos, etc.
      benchmark_name: A string with the name of the currently running benchmark.
      benchmark_description: A string with a description of the currently
          running benchmark.
      upload_bucket: A string identifting a cloud storage bucket where to
          upload artifacts.
      results_label: A string that serves as an identifier for the current
          benchmark run.
    """
        super(PageTestResults, self).__init__()
        self._progress_reporter = gtest_progress_reporter.GTestProgressReporter(
            progress_stream)
        self._output_formatters = (output_formatters
                                   if output_formatters is not None else [])
        self._output_dir = output_dir
        self._intermediate_dir = intermediate_dir
        if intermediate_dir is None and output_dir is not None:
            self._intermediate_dir = os.path.join(output_dir, 'artifacts')
        self._upload_bucket = upload_bucket

        self._current_story_run = None
        self._all_story_runs = []
        self._all_stories = set()
        self._representative_value_for_each_value_name = {}
        self._all_summary_values = []

        self._histograms = histogram_set.HistogramSet()

        self._benchmark_name = benchmark_name or '(unknown benchmark)'
        self._benchmark_description = benchmark_description or ''

        # |_interruption| is None if the benchmark has not been interrupted.
        # Otherwise it is a string explaining the reason for the interruption.
        # Interruptions occur for unrecoverable exceptions.
        self._interruption = None
        self._results_label = results_label

        self._diagnostics = {
            reserved_infos.BENCHMARKS.name: [self.benchmark_name],
            reserved_infos.BENCHMARK_DESCRIPTIONS.name:
            [self.benchmark_description],
        }

        # If the object has been finalized, no more results can be added to it.
        self._finalized = False
        self._start_time = time.time()
        self._results_stream = None
        if self._intermediate_dir is not None:
            if not os.path.exists(self._intermediate_dir):
                os.makedirs(self._intermediate_dir)
            self._results_stream = open(
                os.path.join(self._intermediate_dir, TELEMETRY_RESULTS), 'w')
            self._RecordBenchmarkStart()
Esempio n. 8
0
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
    if suppress_gtest_report:
        return progress_reporter.ProgressReporter()

    return gtest_progress_reporter.GTestProgressReporter(
        sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
Esempio n. 9
0
def _GetProgressReporter(suppress_gtest_report):
  if suppress_gtest_report:
    return progress_reporter.ProgressReporter()
  else:
    return gtest_progress_reporter.GTestProgressReporter(sys.stdout)