def Format(self, page_test_results): if page_test_results.value_set: html2_formatter = html2_output_formatter.Html2OutputFormatter( self._output_stream, self._reset_results, self._upload_results) html2_formatter.Format(page_test_results) return chart_json_dict = chart_json_output_formatter.ResultsAsChartDict( self._metadata, page_test_results.all_page_specific_values, page_test_results.all_summary_values) self._TranslateChartJson(chart_json_dict) self._PrintPerfResult('telemetry_page_measurement_results', 'num_failed', [len(page_test_results.failures)], 'count', 'unimportant') self._combined_results = self._ReadExistingResults(self._output_stream) self._combined_results.append(self._result) html = self._GetHtmlTemplate() html = html.replace('%json_results%', json.dumps(self.GetCombinedResults())) html = html.replace('%json_units%', self._GetUnitJson()) html = html.replace('%plugins%', self._GetPlugins()) self._SaveResults(html) if self._upload_results: file_path = os.path.abspath(self._output_stream.name) file_name = 'html-results/results-%s' % datetime.datetime.now( ).strftime('%Y-%m-%d_%H-%M-%S') try: cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET, file_name, file_path) print print( 'View online at ' 'http://storage.googleapis.com/chromium-telemetry/%s' % file_name) except cloud_storage.PermissionError as e: logging.error( 'Cannot upload profiling files to cloud storage due to ' ' permission error: %s' % e.message) print print 'View result at file://%s' % os.path.abspath( self._output_stream.name)
def CreateResults(benchmark_metadata, options, value_can_be_added_predicate=lambda v, is_first: True, benchmark_enabled=True): """ Args: options: Contains the options specified in AddResultsOptions. """ if not options.output_formats: options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]] upload_bucket = None if options.upload_results: upload_bucket = options.upload_bucket if upload_bucket in cloud_storage.BUCKET_ALIASES: upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket] output_formatters = [] for output_format in options.output_formats: if output_format == 'none' or output_format == "gtest": continue output_stream = _GetOutputStream(output_format, options.output_dir) if output_format == 'csv-pivot-table': output_formatters.append( csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter( output_stream, trace_tag=options.output_trace_tag)) elif output_format == 'html': output_formatters.append( html2_output_formatter.Html2OutputFormatter( output_stream, benchmark_metadata, options.reset_results, options.upload_results, upload_bucket=upload_bucket)) elif output_format == 'json': output_formatters.append( json_output_formatter.JsonOutputFormatter( output_stream, benchmark_metadata)) elif output_format == 'chartjson': output_formatters.append( chart_json_output_formatter.ChartJsonOutputFormatter( output_stream, benchmark_metadata)) elif output_format == 'valueset': output_formatters.append( valueset_output_formatter.ValueSetOutputFormatter( output_stream)) else: # Should never be reached. The parser enforces the choices. raise Exception( 'Invalid --output-format "%s". Valid choices are: %s' % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES))) # TODO(chrishenry): This is here to not change the output of # gtest. Let's try enabling skipped tests summary for gtest test # results too (in a separate patch), and see if we break anything. output_skipped_tests_summary = 'gtest' in options.output_formats reporter = _GetProgressReporter(output_skipped_tests_summary, options.suppress_gtest_report) results = page_test_results.PageTestResults( output_formatters=output_formatters, progress_reporter=reporter, output_dir=options.output_dir, value_can_be_added_predicate=value_can_be_added_predicate, benchmark_enabled=benchmark_enabled) results.iteration_info.benchmark_name = benchmark_metadata.name results.iteration_info.benchmark_start_ms = time.time() * 1000.0 if options.results_label: results.iteration_info.label = options.results_label return results
def CreateFormatter(reset_results): return html2_output_formatter.Html2OutputFormatter( output_file, reset_results, False)
def CreateResults(benchmark_metadata, options, value_can_be_added_predicate=lambda v, is_first: True): """ Args: options: Contains the options specified in AddResultsOptions. """ if not options.output_formats: options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]] output_formatters = [] for output_format in options.output_formats: if output_format == 'none' or output_format == "gtest": continue output_stream = _GetOutputStream(output_format, options.output_dir) if output_format == 'csv-pivot-table': output_formatters.append( csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter( output_stream, trace_tag=options.output_trace_tag)) elif output_format == 'buildbot': output_formatters.append( buildbot_output_formatter.BuildbotOutputFormatter( output_stream, trace_tag=options.output_trace_tag)) elif output_format == 'html': # TODO(chrishenry): We show buildbot output so that users can grep # through the results easily without needing to open the html # file. Another option for this is to output the results directly # in gtest-style results (via some sort of progress reporter), # as we plan to enable gtest-style output for all output formatters. output_formatters.append( buildbot_output_formatter.BuildbotOutputFormatter( sys.stdout, trace_tag=options.output_trace_tag)) output_formatters.append( html_output_formatter.HtmlOutputFormatter( output_stream, benchmark_metadata, options.reset_results, options.upload_results, options.browser_type, options.results_label)) elif output_format == 'html2': output_formatters.append( html2_output_formatter.Html2OutputFormatter( output_stream, options.reset_results, options.upload_results)) elif output_format == 'json': output_formatters.append( json_output_formatter.JsonOutputFormatter( output_stream, benchmark_metadata)) elif output_format == 'chartjson': output_formatters.append( chart_json_output_formatter.ChartJsonOutputFormatter( output_stream, benchmark_metadata)) elif output_format == 'valueset': output_formatters.append( valueset_output_formatter.ValueSetOutputFormatter( output_stream)) else: # Should never be reached. The parser enforces the choices. raise Exception( 'Invalid --output-format "%s". Valid choices are: %s' % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES))) # TODO(chrishenry): This is here to not change the output of # gtest. Let's try enabling skipped tests summary for gtest test # results too (in a separate patch), and see if we break anything. output_skipped_tests_summary = 'gtest' in options.output_formats reporter = _GetProgressReporter(output_skipped_tests_summary, options.suppress_gtest_report) results = page_test_results.PageTestResults( output_formatters=output_formatters, progress_reporter=reporter, output_dir=options.output_dir, value_can_be_added_predicate=value_can_be_added_predicate) results.iteration_info.benchmark_name = benchmark_metadata.name results.iteration_info.benchmark_start_ms = time.time() * 1000.0 if options.results_label: results.iteration_info.label = options.results_label return results