コード例 #1
0
def _MakePageTestResults(description='benchmark_description', output_dir=None):
    return page_test_results.PageTestResults(benchmark_name='benchmark_name',
                                             benchmark_description=description,
                                             output_dir=output_dir)
コード例 #2
0
    def testAsDictWithSkippedAndFailedTests_AlsoShardIndex(self):
        # Set up shard index. If already running on a shard or fake it
        # if not running on a shard.
        delete_env_var_after = False
        expected_shard_index = 0
        if 'GTEST_SHARD_INDEX' in os.environ:
            expected_shard_index = int(os.environ['GTEST_SHARD_INDEX'])
        else:
            os.environ['GTEST_SHARD_INDEX'] = str(expected_shard_index)
            delete_env_var_after = True
        try:
            results = page_test_results.PageTestResults()
            results.telemetry_info.benchmark_start_epoch = 1501773200
            results.telemetry_info.benchmark_name = 'benchmark_name'

            results.WillRunPage(self._story_set[0])
            v0 = scalar.ScalarValue(
                results.current_page,
                'foo',
                'seconds',
                3,
                improvement_direction=improvement_direction.DOWN)
            results.AddValue(v0)
            results.DidRunPage(self._story_set[0])

            results.WillRunPage(self._story_set[1])
            v1 = scalar.ScalarValue(
                results.current_page,
                'bar',
                'seconds',
                4,
                improvement_direction=improvement_direction.DOWN)
            results.AddValue(v1)
            results.DidRunPage(self._story_set[1])

            results.WillRunPage(self._story_set[0])
            results.Skip('fake_skip')
            results.DidRunPage(self._story_set[0])

            results.WillRunPage(self._story_set[0])
            results.Skip('unexpected_skip', False)
            results.DidRunPage(self._story_set[0])

            results.WillRunPage(self._story_set[1])
            results.Fail('fake_failure')
            results.DidRunPage(self._story_set[1])

            d = json_3_output_formatter.ResultsAsDict(results)

            foo_story_result = d['tests']['benchmark_name']['Foo']
            self.assertEquals(foo_story_result['actual'], 'PASS SKIP SKIP')
            self.assertEquals(foo_story_result['expected'], 'PASS SKIP')
            self.assertTrue(foo_story_result['is_unexpected'])

            bar_story_result = d['tests']['benchmark_name']['Bar']
            self.assertEquals(bar_story_result['actual'], 'PASS FAIL')
            self.assertEquals(bar_story_result['expected'], 'PASS')
            self.assertEquals(bar_story_result['shard'], expected_shard_index)
            self.assertTrue(bar_story_result['is_unexpected'])

            self.assertEquals(d['num_failures_by_type'], {
                'PASS': 2,
                'FAIL': 1,
                'SKIP': 2
            })
        finally:
            if delete_env_var_after:
                del os.environ['GTEST_SHARD_INDEX']
コード例 #3
0
def _MakePageTestResults(reporter):
    results = page_test_results.PageTestResults(progress_reporter=reporter)
    results.telemetry_info.benchmark_name = 'bench'
    results.telemetry_info.benchmark_start_epoch = 123
    results.telemetry_info.benchmark_descriptions = 'foo'
    return results
コード例 #4
0
    def testPassAndFailedPages(self):
        test_story_set = _MakeStorySet()
        results = page_test_results.PageTestResults(
            progress_reporter=self._reporter)
        exc_info = self.CreateException()

        results.WillRunPage(test_story_set.stories[0])
        self._fake_timer.SetTime(0.007)
        results.DidRunPage(test_story_set.stories[0])

        results.WillRunPage(test_story_set.stories[1])
        self._fake_timer.SetTime(0.009)
        results.AddValue(
            failure.FailureValue(test_story_set.stories[1], exc_info))
        results.DidRunPage(test_story_set.stories[1])

        results.WillRunPage(test_story_set.stories[2])
        self._fake_timer.SetTime(0.015)
        results.AddValue(
            failure.FailureValue(test_story_set.stories[2], exc_info))
        results.DidRunPage(test_story_set.stories[2])

        results.WillRunPage(test_story_set.stories[3])
        self._fake_timer.SetTime(0.020)
        results.DidRunPage(test_story_set.stories[3])

        results.WillRunPage(test_story_set.stories[4])
        self._fake_timer.SetTime(0.025)
        results.DidRunPage(test_story_set.stories[4])

        results.WillRunPage(test_story_set.stories[5])
        self._fake_timer.SetTime(0.030)
        results.AddValue(
            failure.FailureValue(test_story_set.stories[5], exc_info))
        results.DidRunPage(test_story_set.stories[5])

        results.PrintSummary()
        exception_trace = ''.join(traceback.format_exception(*exc_info))
        expected = ("[ RUN      ] http://www.foo.com/\n"
                    "[       OK ] http://www.foo.com/ (7 ms)\n"
                    "[ RUN      ] http://www.bar.com/\n"
                    "%s\n"
                    "[  FAILED  ] http://www.bar.com/ (2 ms)\n"
                    "[ RUN      ] http://www.baz.com/\n"
                    "%s\n"
                    "[  FAILED  ] http://www.baz.com/ (6 ms)\n"
                    "[ RUN      ] http://www.roz.com/\n"
                    "[       OK ] http://www.roz.com/ (5 ms)\n"
                    "[ RUN      ] http://www.fus.com/@{'1': '2'}\n"
                    "[       OK ] http://www.fus.com/@{'1': '2'} (5 ms)\n"
                    "[ RUN      ] http://www.ro.com/@{'1': '2'}\n"
                    "%s\n"
                    "[  FAILED  ] http://www.ro.com/@{'1': '2'} (5 ms)\n"
                    "[  PASSED  ] 3 tests.\n"
                    "[  FAILED  ] 3 tests, listed below:\n"
                    "[  FAILED  ]  http://www.bar.com/\n"
                    "[  FAILED  ]  http://www.baz.com/\n"
                    "[  FAILED  ]  http://www.ro.com/@{'1': '2'}\n\n"
                    "3 FAILED TESTS\n\n" %
                    (exception_trace, exception_trace, exception_trace))
        self.assertEquals(expected, ''.join(self._output_stream.output_data))
コード例 #5
0
    def testFilterIsFirstResult(self):
        def AcceptSecondValues(_, is_first_result):
            return not is_first_result

        results = page_test_results.PageTestResults(
            should_add_value=AcceptSecondValues)

        # First results (filtered out)
        results.WillRunPage(self.pages[0])
        results.AddValue(
            scalar.ScalarValue(self.pages[0],
                               'a',
                               'seconds',
                               7,
                               improvement_direction=improvement_direction.UP))
        results.AddValue(
            scalar.ScalarValue(self.pages[0],
                               'b',
                               'seconds',
                               8,
                               improvement_direction=improvement_direction.UP))
        results.DidRunPage(self.pages[0])
        results.WillRunPage(self.pages[1])
        results.AddValue(
            scalar.ScalarValue(self.pages[1],
                               'a',
                               'seconds',
                               5,
                               improvement_direction=improvement_direction.UP))
        results.AddValue(
            scalar.ScalarValue(self.pages[1],
                               'd',
                               'seconds',
                               6,
                               improvement_direction=improvement_direction.UP))
        results.DidRunPage(self.pages[1])

        # Second results
        results.WillRunPage(self.pages[0])
        results.AddValue(
            scalar.ScalarValue(self.pages[0],
                               'a',
                               'seconds',
                               3,
                               improvement_direction=improvement_direction.UP))
        results.AddValue(
            scalar.ScalarValue(self.pages[0],
                               'b',
                               'seconds',
                               4,
                               improvement_direction=improvement_direction.UP))
        results.DidRunPage(self.pages[0])
        results.WillRunPage(self.pages[1])
        results.AddValue(
            scalar.ScalarValue(self.pages[1],
                               'a',
                               'seconds',
                               1,
                               improvement_direction=improvement_direction.UP))
        results.AddValue(
            scalar.ScalarValue(self.pages[1],
                               'd',
                               'seconds',
                               2,
                               improvement_direction=improvement_direction.UP))
        results.DidRunPage(self.pages[1])
        results.PrintSummary()
        expected_values = [('a', 'http://www.foo.com/', 3),
                           ('b', 'http://www.foo.com/', 4),
                           ('a', 'http://www.bar.com/', 1),
                           ('d', 'http://www.bar.com/', 2)]
        actual_values = [(v.name, v.page.url, v.value)
                         for v in results.all_page_specific_values]
        self.assertEquals(expected_values, actual_values)
コード例 #6
0
ファイル: results_options.py プロジェクト: sanjana87/catapult
def CreateResults(options,
                  benchmark_name=None,
                  benchmark_description=None,
                  report_progress=False):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
    benchmark_name: A string with the name of the currently running benchmark.
    benchmark_description: A string with a description of the currently
        running benchmark.
    report_progress: A boolean indicating whether to emit gtest style
        report of progress as story runs are being recorded.

  Returns:
    A PageTestResults object.
  """
    assert options.output_dir, 'An output_dir must be provided to create results'

    # Make sure the directory exists.
    if not os.path.exists(options.output_dir):
        os.makedirs(options.output_dir)

    if options.external_results_processor:
        output_formats = options.legacy_output_formats
    else:
        output_formats = options.output_formats

    output_formatters = []
    for output_format in output_formats:
        if output_format == 'none':
            continue
        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, options.reset_results,
                    options.upload_bucket))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise NotImplementedError(output_format)

    return page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_stream=sys.stdout if report_progress else None,
        output_dir=options.output_dir,
        intermediate_dir=options.intermediate_dir,
        benchmark_name=benchmark_name,
        benchmark_description=benchmark_description,
        upload_bucket=options.upload_bucket,
        results_label=options.results_label)
コード例 #7
0
ファイル: summary_unittest.py プロジェクト: stevenjb/catapult
  def testBasicSummaryNonuniformResults(self):
    page0 = self.pages[0]
    page1 = self.pages[1]
    page2 = self.pages[2]

    results = page_test_results.PageTestResults()
    results.WillRunPage(page0)
    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v0)
    v1 = scalar.ScalarValue(page0, 'b', 'seconds', 10,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v1)
    results.DidRunPage(page0)

    results.WillRunPage(page1)
    v2 = scalar.ScalarValue(page1, 'a', 'seconds', 3,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v2)
    v3 = scalar.ScalarValue(page1, 'b', 'seconds', 10,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v3)
    results.DidRunPage(page1)

    results.WillRunPage(page2)
    v4 = scalar.ScalarValue(page2, 'a', 'seconds', 7,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v4)
    # Note, page[2] does not report a 'b' metric.
    results.DidRunPage(page2)

    summary = summary_module.Summary(results.all_page_specific_values)
    values = summary.interleaved_computed_per_page_values_and_summaries

    v0_list = list_of_scalar_values.ListOfScalarValues(
        page0, 'a', 'seconds', [3],
        improvement_direction=improvement_direction.UP)
    v1_list = list_of_scalar_values.ListOfScalarValues(
        page0, 'b', 'seconds', [10],
        improvement_direction=improvement_direction.UP)
    v2_list = list_of_scalar_values.ListOfScalarValues(
        page1, 'a', 'seconds', [3],
        improvement_direction=improvement_direction.UP)
    v3_list = list_of_scalar_values.ListOfScalarValues(
        page1, 'b', 'seconds', [10],
        improvement_direction=improvement_direction.UP)
    v4_list = list_of_scalar_values.ListOfScalarValues(
        page2, 'a', 'seconds', [7],
        improvement_direction=improvement_direction.UP)

    a_summary = list_of_scalar_values.ListOfScalarValues(
        None, 'a', 'seconds', [3, 3, 7],
        improvement_direction=improvement_direction.UP)
    b_summary = list_of_scalar_values.ListOfScalarValues(
        None, 'b', 'seconds', [10, 10],
        improvement_direction=improvement_direction.UP)

    self.assertEquals(7, len(values))
    self.assertIn(v0_list, values)
    self.assertIn(v1_list, values)
    self.assertIn(v2_list, values)
    self.assertIn(v3_list, values)
    self.assertIn(v4_list, values)
    self.assertIn(a_summary, values)
    self.assertIn(b_summary, values)
コード例 #8
0
    def testRepeatedPages(self):
        page0 = self.pages[0]
        page1 = self.pages[1]

        results = page_test_results.PageTestResults()
        results.WillRunPage(page0)
        v0 = scalar.ScalarValue(page0,
                                'a',
                                'seconds',
                                3,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v0)
        results.DidRunPage(page0)

        results.WillRunPage(page0)
        v2 = scalar.ScalarValue(page0,
                                'a',
                                'seconds',
                                4,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v2)
        results.DidRunPage(page0)

        results.WillRunPage(page1)
        v1 = scalar.ScalarValue(page1,
                                'a',
                                'seconds',
                                7,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v1)
        results.DidRunPage(page1)

        results.WillRunPage(page1)
        v3 = scalar.ScalarValue(page1,
                                'a',
                                'seconds',
                                8,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v3)
        results.DidRunPage(page1)

        summary = summary_module.Summary(results.all_page_specific_values)
        values = summary.interleaved_computed_per_page_values_and_summaries

        page0_aggregated = list_of_scalar_values.ListOfScalarValues(
            page0,
            'a',
            'seconds', [3, 4],
            improvement_direction=improvement_direction.UP)
        page1_aggregated = list_of_scalar_values.ListOfScalarValues(
            page1,
            'a',
            'seconds', [7, 8],
            improvement_direction=improvement_direction.UP)
        # Std is computed using pooled standard deviation.
        a_summary = list_of_scalar_values.ListOfScalarValues(
            None,
            'a',
            'seconds', [3, 4, 7, 8],
            std=math.sqrt(0.5),
            improvement_direction=improvement_direction.UP)

        self.assertEquals(3, len(values))
        self.assertIn(page0_aggregated, values)
        self.assertIn(page1_aggregated, values)
        self.assertIn(a_summary, values)
コード例 #9
0
    def testSummaryUsesKeyFunc(self):
        page0 = self.pages[0]
        page1 = self.pages[1]

        results = page_test_results.PageTestResults()

        results.WillRunPage(page0)
        v0 = scalar.ScalarValue(page0,
                                'a',
                                'seconds',
                                20,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v0)

        v1 = scalar.ScalarValue(page0,
                                'b',
                                'seconds',
                                42,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v1)
        results.DidRunPage(page0)

        results.WillRunPage(page1)
        v2 = scalar.ScalarValue(page1,
                                'a',
                                'seconds',
                                20,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v2)

        v3 = scalar.ScalarValue(page1,
                                'b',
                                'seconds',
                                42,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v3)
        results.DidRunPage(page1)

        summary = summary_module.Summary(results.all_page_specific_values,
                                         key_func=lambda v: True)
        values = summary.interleaved_computed_per_page_values_and_summaries

        v0_list = list_of_scalar_values.ListOfScalarValues(
            page0,
            'a',
            'seconds', [20, 42],
            improvement_direction=improvement_direction.UP)
        v2_list = list_of_scalar_values.ListOfScalarValues(
            page1,
            'a',
            'seconds', [20, 42],
            improvement_direction=improvement_direction.UP)
        # Std is computed using pooled standard deviation.
        merged_value = list_of_scalar_values.ListOfScalarValues(
            None,
            'a',
            'seconds', [20, 42, 20, 42],
            std=math.sqrt(242.0),
            improvement_direction=improvement_direction.UP)

        self.assertEquals(3, len(values))
        self.assertIn(v0_list, values)
        self.assertIn(v2_list, values)
        self.assertIn(merged_value, values)
コード例 #10
0
def CreateResults(options,
                  benchmark_name=None,
                  benchmark_description=None,
                  benchmark_enabled=True,
                  report_progress=False,
                  should_add_value=None):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
    benchmark_name: A string with the name of the currently running benchmark.
    benchmark_description: A string with a description of the currently
        running benchmark.
    benchmark_enabled: A boolean indicating whether the benchmark to run
        is enabled. (Some output formats need to produce special output for
        disabled benchmarks).
    report_progress: A boolean indicating whether to emit gtest style
        report of progress as story runs are being recorded.
    should_add_value: A function that takes two arguments: a value name and
        a boolean (True when the value belongs to the first run of the
        corresponding story). It returns True if the value should be added
        to the test results and False otherwise.

  Returns:
    A PageTestResults object.
  """
    if not options.output_formats:
        options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none':
            continue
        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, options.reset_results, upload_bucket))
        elif output_format == 'json-test-results':
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(output_stream))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    return page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_stream=sys.stdout if report_progress else None,
        output_dir=options.output_dir,
        should_add_value=should_add_value,
        benchmark_name=benchmark_name,
        benchmark_description=benchmark_description,
        benchmark_enabled=benchmark_enabled,
        upload_bucket=upload_bucket,
        results_label=options.results_label)
コード例 #11
0
    def testRepeatedPagesetOneIterationOnePageFails(self):
        """Page fails on one iteration, no averaged results should print."""
        page0 = self.pages[0]
        page1 = self.pages[1]

        results = page_test_results.PageTestResults()
        results.WillRunPage(page0)
        v0 = scalar.ScalarValue(page0,
                                'a',
                                'seconds',
                                3,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v0)
        results.DidRunPage(page0)

        results.WillRunPage(page1)
        v1 = scalar.ScalarValue(page1,
                                'a',
                                'seconds',
                                7,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v1)
        v2 = failure.FailureValue.FromMessage(page1, 'message')
        results.AddValue(v2)
        results.DidRunPage(page1)

        results.WillRunPage(page0)
        v3 = scalar.ScalarValue(page0,
                                'a',
                                'seconds',
                                4,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v3)
        results.DidRunPage(page0)

        results.WillRunPage(page1)
        v4 = scalar.ScalarValue(page1,
                                'a',
                                'seconds',
                                8,
                                improvement_direction=improvement_direction.UP)
        results.AddValue(v4)
        results.DidRunPage(page1)

        summary = summary_module.Summary(results.all_page_specific_values)
        values = summary.interleaved_computed_per_page_values_and_summaries

        page0_aggregated = list_of_scalar_values.ListOfScalarValues(
            page0,
            'a',
            'seconds', [3, 4],
            improvement_direction=improvement_direction.UP)
        page1_aggregated = list_of_scalar_values.ListOfScalarValues(
            page1,
            'a',
            'seconds', [7, 8],
            improvement_direction=improvement_direction.UP)

        self.assertEquals(2, len(values))
        self.assertIn(page0_aggregated, values)
        self.assertIn(page1_aggregated, values)
コード例 #12
0
    def test_basic_summary(self):
        test_story_set = _MakeStorySet()
        output_file = StringIOFile()

        # Run the first time and verify the results are written to the HTML file.
        results = page_test_results.PageTestResults()
        results.WillRunPage(test_story_set.stories[0])
        results.AddValue(
            scalar.ScalarValue(test_story_set.stories[0], 'a', 'seconds', 3))
        results.DidRunPage(test_story_set.stories[0])

        results.WillRunPage(test_story_set.stories[1])
        results.AddValue(
            scalar.ScalarValue(test_story_set.stories[1], 'a', 'seconds', 7))
        results.DidRunPage(test_story_set.stories[1])

        formatter = DeterministicHtmlOutputFormatter(output_file,
                                                     FakeMetadataForTest(),
                                                     False, False,
                                                     'browser_type')
        formatter.Format(results)
        expected = {
            "platform": "browser_type",
            "buildTime": "1998-09-04T13:00:00.007777",
            "label": 'test_name (1998-09-04 13:00:00)',
            "tests": {
                "test_name": {
                    "metrics": {
                        "a": {
                            "current": [3, 7],
                            "units": "seconds",
                            "important": True
                        },
                        "telemetry_page_measurement_results.num_failed": {
                            "current": [0],
                            "units": "count",
                            "important": False
                        },
                        "a.http://www.bar.com/": {
                            "current": [7],
                            "units": "seconds",
                            "important": False
                        },
                        "a.http://www.foo.com/": {
                            "current": [3],
                            "units": "seconds",
                            "important": False
                        }
                    }
                }
            },
        }
        self.assertEquals(expected, formatter.GetResults())

        # Run the second time and verify the results are appended to the HTML file.
        output_file.seek(0)
        results = page_test_results.PageTestResults()
        results.WillRunPage(test_story_set.stories[0])
        results.AddValue(
            scalar.ScalarValue(test_story_set.stories[0], 'a', 'seconds', 4))
        results.DidRunPage(test_story_set.stories[0])

        results.WillRunPage(test_story_set.stories[1])
        results.AddValue(
            scalar.ScalarValue(test_story_set.stories[1], 'a', 'seconds', 8))
        results.DidRunPage(test_story_set.stories[1])

        formatter = DeterministicHtmlOutputFormatter(output_file,
                                                     FakeMetadataForTest(),
                                                     False, False,
                                                     'browser_type')
        formatter.Format(results)
        expected = [{
            "platform": "browser_type",
            "buildTime": "1998-09-04T13:00:00.007777",
            "label": 'test_name (1998-09-04 13:00:00)',
            "tests": {
                "test_name": {
                    "metrics": {
                        "a": {
                            "current": [3, 7],
                            "units": "seconds",
                            "important": True
                        },
                        "telemetry_page_measurement_results.num_failed": {
                            "current": [0],
                            "units": "count",
                            "important": False
                        },
                        "a.http://www.bar.com/": {
                            "current": [7],
                            "units": "seconds",
                            "important": False
                        },
                        "a.http://www.foo.com/": {
                            "current": [3],
                            "units": "seconds",
                            "important": False
                        }
                    }
                }
            },
        }, {
            "platform": "browser_type",
            "buildTime": "1998-09-04T13:00:00.007777",
            "label": 'test_name (1998-09-04 13:00:00)',
            "tests": {
                "test_name": {
                    "metrics": {
                        "a": {
                            "current": [4, 8],
                            "units": "seconds",
                            "important": True
                        },
                        "telemetry_page_measurement_results.num_failed": {
                            "current": [0],
                            "units": "count",
                            "important": False,
                        },
                        "a.http://www.bar.com/": {
                            "current": [8],
                            "units": "seconds",
                            "important": False
                        },
                        "a.http://www.foo.com/": {
                            "current": [4],
                            "units": "seconds",
                            "important": False
                        }
                    }
                }
            },
        }]
        self.assertEquals(expected, formatter.GetCombinedResults())
        last_output_len = len(output_file.getvalue())

        # Now reset the results and verify the old ones are gone.
        output_file.seek(0)
        results = page_test_results.PageTestResults()
        results.WillRunPage(test_story_set.stories[0])
        results.AddValue(
            scalar.ScalarValue(test_story_set.stories[0], 'a', 'seconds', 5))
        results.DidRunPage(test_story_set.stories[0])

        results.WillRunPage(test_story_set.stories[1])
        results.AddValue(
            scalar.ScalarValue(test_story_set.stories[1], 'a', 'seconds', 9))
        results.DidRunPage(test_story_set.stories[1])

        formatter = DeterministicHtmlOutputFormatter(output_file,
                                                     FakeMetadataForTest(),
                                                     True, False,
                                                     'browser_type')
        formatter.Format(results)
        expected = [{
            "platform": "browser_type",
            "buildTime": "1998-09-04T13:00:00.007777",
            "label": 'test_name (1998-09-04 13:00:00)',
            "tests": {
                "test_name": {
                    "metrics": {
                        "a": {
                            "current": [5, 9],
                            "units": "seconds",
                            "important": True
                        },
                        "telemetry_page_measurement_results.num_failed": {
                            "current": [0],
                            "units": "count",
                            "important": False
                        },
                        "a.http://www.bar.com/": {
                            "current": [9],
                            "units": "seconds",
                            "important": False
                        },
                        "a.http://www.foo.com/": {
                            "current": [5],
                            "units": "seconds",
                            "important": False
                        }
                    }
                }
            },
        }]
        self.assertEquals(expected, formatter.GetCombinedResults())
        self.assertTrue(len(output_file.getvalue()) < last_output_len)
コード例 #13
0
def CreateResults(benchmark_metadata,
                  options,
                  value_can_be_added_predicate=lambda v, is_first: True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue

        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'csv-pivot-table':
            output_formatters.append(
                csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
                    output_stream, trace_tag=options.output_trace_tag))
        elif output_format == 'buildbot':
            output_formatters.append(
                buildbot_output_formatter.BuildbotOutputFormatter(
                    output_stream, trace_tag=options.output_trace_tag))
        elif output_format == 'html':
            # TODO(chrishenry): We show buildbot output so that users can grep
            # through the results easily without needing to open the html
            # file.  Another option for this is to output the results directly
            # in gtest-style results (via some sort of progress reporter),
            # as we plan to enable gtest-style output for all output formatters.
            output_formatters.append(
                buildbot_output_formatter.BuildbotOutputFormatter(
                    sys.stdout, trace_tag=options.output_trace_tag))
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    options.upload_results, options.browser_type,
                    options.results_label))
        elif output_format == 'html2':
            output_formatters.append(
                html2_output_formatter.Html2OutputFormatter(
                    output_stream, options.reset_results,
                    options.upload_results))
        elif output_format == 'json':
            output_formatters.append(
                json_output_formatter.JsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'valueset':
            output_formatters.append(
                valueset_output_formatter.ValueSetOutputFormatter(
                    output_stream))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = 'gtest' in options.output_formats

    reporter = _GetProgressReporter(output_skipped_tests_summary,
                                    options.suppress_gtest_report)

    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        value_can_be_added_predicate=value_can_be_added_predicate)

    results.iteration_info.benchmark_name = benchmark_metadata.name
    results.iteration_info.benchmark_start_ms = time.time() * 1000.0
    if options.results_label:
        results.iteration_info.label = options.results_label

    return results
コード例 #14
0
def GetSingleEventMetrics(events, interactions):
    results = page_test_results.PageTestResults()
    results.WillRunPage(page.Page('file://blank.html'))
    SingleEventTestMetric()._AddResultsInternal(events, interactions, results)
    return dict((value.name, value.values)
                for value in results.current_page_run.values)
コード例 #15
0
 def __init__(self, thread_name):
     self._model = model.TimelineModel()
     self._renderer_process = self._model.GetOrCreateProcess(1)
     self._renderer_thread = self._renderer_process.GetOrCreateThread(2)
     self._renderer_thread.name = thread_name
     self._results = page_test_results.PageTestResults()
コード例 #16
0
def CreateResults(benchmark_metadata,
                  options,
                  should_add_value=lambda name, is_first: True,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

    artifacts = artifact_results.NoopArtifactResults(options.output_dir)

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue
        # pylint: disable=redefined-variable-type
        if isinstance(artifacts, artifact_results.NoopArtifactResults):
            artifacts = artifact_results.ArtifactResults(options.output_dir)
        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    upload_bucket))
        elif output_format == 'json-test-results':
            # Only create artifact results if we're going to actually output them
            # through an output format.
            artifacts = artifact_results.ArtifactResults(options.output_dir)
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(
                    output_stream, artifacts))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                benchmark_metadata,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = 'gtest' in options.output_formats

    reporter = _GetProgressReporter(output_skipped_tests_summary,
                                    options.suppress_gtest_report)

    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        should_add_value=should_add_value,
        benchmark_enabled=benchmark_enabled,
        upload_bucket=upload_bucket,
        artifact_results=artifacts,
        benchmark_metadata=benchmark_metadata)

    results.telemetry_info.benchmark_name = benchmark_metadata.name
    results.telemetry_info.benchmark_descriptions = benchmark_metadata.description
    results.telemetry_info.benchmark_start_epoch = time.time()
    if options.results_label:
        results.telemetry_info.label = options.results_label

    return results
コード例 #17
0
 def ClearResults(self):
     self._results = page_test_results.PageTestResults()
コード例 #18
0
    def testAsChartDictNoDescription(self):
        d = chart_json_output_formatter.ResultsAsChartDict(
            benchmark.BenchmarkMetadata('benchmark_name', ''),
            page_test_results.PageTestResults())

        self.assertEquals('', d['benchmark_metadata']['description'])
コード例 #19
0
 def setUp(self):
   self._output = StringIO.StringIO()
   self._story_set = _MakeStorySet()
   self._results = page_test_results.PageTestResults()
   self._formatter = None
   self.MakeFormatter()
コード例 #20
0
def CreateResults(benchmark_metadata,
                  options,
                  value_can_be_added_predicate=lambda v, is_first: True,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue

        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'csv-pivot-table':
            output_formatters.append(
                csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
                    output_stream, trace_tag=options.output_trace_tag))
        elif output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    upload_bucket))
        elif output_format == 'json':
            output_formatters.append(
                json_output_formatter.JsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'json-test-results':
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(output_stream))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                benchmark_metadata,
                                                options.reset_results))
        elif output_format == 'legacy-html':
            output_formatters.append(
                legacy_html_output_formatter.LegacyHtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    options.browser_type, options.results_label))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = 'gtest' in options.output_formats

    reporter = _GetProgressReporter(output_skipped_tests_summary,
                                    options.suppress_gtest_report)

    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        value_can_be_added_predicate=value_can_be_added_predicate,
        benchmark_enabled=benchmark_enabled)

    results.telemetry_info.benchmark_name = benchmark_metadata.name
    results.telemetry_info.benchmark_start_epoch = time.time()
    if options.results_label:
        results.telemetry_info.label = options.results_label

    return results
コード例 #21
0
 def CreateResults(self, **kwargs):
     kwargs.setdefault('output_dir', self._output_dir)
     kwargs.setdefault('intermediate_dir', self.intermediate_dir)
     return page_test_results.PageTestResults(**kwargs)
コード例 #22
0
 def _MakeResults(self, **kwargs):
     kwargs.setdefault('benchmark_name', 'benchmark_name')
     kwargs.setdefault('output_dir', self._output_dir)
     with mock.patch('time.time', return_value=1501773200):
         return page_test_results.PageTestResults(**kwargs)