Exemple #1
0
    def test_basic_summary(self):
        sample = {
            'name': 'a',
            'guid': '42',
            'description': 'desc',
            'important': False,
            'diagnostics': [],
            'type': 'numeric',
            'numeric': {
                'unit': 'n%',
                'type': 'scalar',
                'value': 42
            }
        }

        results = page_test_results.PageTestResults()
        results.value_set.extend([sample])

        with open(self.output_file_path, 'w') as output_file:
            formatter = valueset_output_formatter.ValueSetOutputFormatter(
                output_file)
            formatter.Format(results)

        written_data = json.load(open(self.output_file_path))
        self.assertEqual([sample], written_data)
Exemple #2
0
def CreateResults(benchmark_metadata,
                  options,
                  value_can_be_added_predicate=lambda v, is_first: True,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue

        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'csv-pivot-table':
            output_formatters.append(
                csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
                    output_stream, trace_tag=options.output_trace_tag))
        elif output_format == 'html':
            output_formatters.append(
                html2_output_formatter.Html2OutputFormatter(
                    output_stream,
                    benchmark_metadata,
                    options.reset_results,
                    options.upload_results,
                    upload_bucket=upload_bucket))
        elif output_format == 'json':
            output_formatters.append(
                json_output_formatter.JsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'valueset':
            output_formatters.append(
                valueset_output_formatter.ValueSetOutputFormatter(
                    output_stream))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = 'gtest' in options.output_formats

    reporter = _GetProgressReporter(output_skipped_tests_summary,
                                    options.suppress_gtest_report)

    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        value_can_be_added_predicate=value_can_be_added_predicate,
        benchmark_enabled=benchmark_enabled)

    results.iteration_info.benchmark_name = benchmark_metadata.name
    results.iteration_info.benchmark_start_ms = time.time() * 1000.0
    if options.results_label:
        results.iteration_info.label = options.results_label

    return results
def CreateResults(benchmark_metadata,
                  options,
                  value_can_be_added_predicate=lambda v, is_first: True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue

        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'csv-pivot-table':
            output_formatters.append(
                csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
                    output_stream, trace_tag=options.output_trace_tag))
        elif output_format == 'buildbot':
            output_formatters.append(
                buildbot_output_formatter.BuildbotOutputFormatter(
                    output_stream, trace_tag=options.output_trace_tag))
        elif output_format == 'html':
            # TODO(chrishenry): We show buildbot output so that users can grep
            # through the results easily without needing to open the html
            # file.  Another option for this is to output the results directly
            # in gtest-style results (via some sort of progress reporter),
            # as we plan to enable gtest-style output for all output formatters.
            output_formatters.append(
                buildbot_output_formatter.BuildbotOutputFormatter(
                    sys.stdout, trace_tag=options.output_trace_tag))
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    options.upload_results, options.browser_type,
                    options.results_label))
        elif output_format == 'html2':
            output_formatters.append(
                html2_output_formatter.Html2OutputFormatter(
                    output_stream, options.reset_results,
                    options.upload_results))
        elif output_format == 'json':
            output_formatters.append(
                json_output_formatter.JsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'valueset':
            output_formatters.append(
                valueset_output_formatter.ValueSetOutputFormatter(
                    output_stream))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = 'gtest' in options.output_formats

    reporter = _GetProgressReporter(output_skipped_tests_summary,
                                    options.suppress_gtest_report)

    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        value_can_be_added_predicate=value_can_be_added_predicate)

    results.iteration_info.benchmark_name = benchmark_metadata.name
    results.iteration_info.benchmark_start_ms = time.time() * 1000.0
    if options.results_label:
        results.iteration_info.label = options.results_label

    return results