def Main(args): if len(args) is not 1: print 'Invalid arguments. Usage: measure_trace.py <trace file>' return 1 with open(args[0]) as trace_file: trace_data = tracing_timeline_data.TracingTimelineData( json.load(trace_file)) timeline_model = model.TimelineModel(trace_data) smoothness_metric = smoothness.SmoothnessMetric() formatters = [ buildbot_output_formatter.BuildbotOutputFormatter(sys.stdout) ] results = page_test_results.PageTestResults(output_formatters=formatters) for thread in timeline_model.GetAllThreads(): interaction_records = _ExtractInteractionsRecordFromThread( thread, timeline_model) if not any(interaction_records): continue records_label_to_records_map = collections.defaultdict(list) for r in interaction_records: records_label_to_records_map[r.label].append(r) for label, records in records_label_to_records_map.iteritems(): if records[0].is_smooth: page = page_module.Page('interaction-record://%s' % label) results.WillRunPage(page) smoothness_metric.AddResults(timeline_model, thread, records, results) results.DidRunPage(page) results.PrintSummary() return 0
def test_overall_results_page_runs_twice(self): test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.AddSummaryValue( scalar.ScalarValue(None, 'a', 'seconds', 1)) measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'b', 'seconds', 2)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'b', 'seconds', 3)) measurement_results.DidRunPage(test_page_set.pages[0]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ 'RESULT b: http___www.foo.com_= [2,3] seconds\n' + 'Avg b: 2.500000seconds\nSd b: 0.707107seconds\n', '*RESULT b: b= [2,3] seconds\n' + 'Avg b: 2.500000seconds\nSd b: 0.707107seconds\n', '*RESULT a: a= 1 seconds\n', 'RESULT telemetry_page_measurement_results: num_failed= 0 count\n', 'RESULT telemetry_page_measurement_results: num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def test_basic_summary(self): test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'a', 'seconds', 3)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'a', 'seconds', 7)) measurement_results.DidRunPage(test_page_set.pages[1]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ 'RESULT a: http___www.bar.com_= 7 seconds\n', 'RESULT a: http___www.foo.com_= 3 seconds\n', '*RESULT a: a= [3,7] seconds\nAvg a: 5.000000seconds\n' + 'Sd a: 2.828427seconds\n', 'RESULT telemetry_page_measurement_results: ' + 'num_failed= 0 count\n', 'RESULT telemetry_page_measurement_results: ' + 'num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def test_basic_summary_pass_and_fail_page(self): """If a page failed, only print summary for individual pages.""" test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'a', 'seconds', 3)) measurement_results.AddValue( failure.FailureValue.FromMessage(test_page_set.pages[0], 'message')) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'a', 'seconds', 7)) measurement_results.DidRunPage(test_page_set.pages[1]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ 'RESULT a: http___www.bar.com_= 7 seconds\n', 'RESULT a: http___www.foo.com_= 3 seconds\n', 'RESULT telemetry_page_measurement_results: ' + 'num_failed= 1 count\n', 'RESULT telemetry_page_measurement_results: ' + 'num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def test_basic_summary_nonuniform_results(self): test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'a', 'seconds', 3)) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'b', 'seconds', 10)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'a', 'seconds', 3)) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'b', 'seconds', 10)) measurement_results.DidRunPage(test_page_set.pages[1]) measurement_results.WillRunPage(test_page_set.pages[2]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[2], 'a', 'seconds', 7)) # Note, page[2] does not report a 'b' metric. measurement_results.DidRunPage(test_page_set.pages[1]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ 'RESULT a: http___www.bar.com_= 3 seconds\n', 'RESULT a: http___www.baz.com_= 7 seconds\n', 'RESULT a: http___www.foo.com_= 3 seconds\n', '*RESULT a: a= [3,3,7] seconds\nAvg a: 4.333333seconds\n' + 'Sd a: 2.309401seconds\n', 'RESULT b: http___www.bar.com_= 10 seconds\n', 'RESULT b: http___www.foo.com_= 10 seconds\n', '*RESULT b: b= [10,10] seconds\nAvg b: 10.000000seconds\n', 'RESULT telemetry_page_measurement_results: ' + 'num_failed= 0 count\n', 'RESULT telemetry_page_measurement_results: ' + 'num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def test_repeated_pageset_one_iteration_one_page_fails(self): """Page fails on one iteration, no averaged results should print.""" test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'a', 'seconds', 3)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'a', 'seconds', 7)) measurement_results.AddValue( failure.FailureValue.FromMessage(test_page_set.pages[1], 'message')) measurement_results.DidRunPage(test_page_set.pages[1]) measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'a', 'seconds', 4)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'a', 'seconds', 8)) measurement_results.DidRunPage(test_page_set.pages[1]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ 'RESULT a: http___www.bar.com_= [7,8] seconds\n' + 'Avg a: 7.500000seconds\n' + 'Sd a: 0.707107seconds\n', 'RESULT a: http___www.foo.com_= [3,4] seconds\n' + 'Avg a: 3.500000seconds\n' + 'Sd a: 0.707107seconds\n', 'RESULT telemetry_page_measurement_results: ' + 'num_failed= 1 count\n', 'RESULT telemetry_page_measurement_results: ' + 'num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def test_repeated_pages(self): test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'a', 'seconds', 3)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'a', 'seconds', 4)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'a', 'seconds', 7)) measurement_results.DidRunPage(test_page_set.pages[1]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'a', 'seconds', 8)) measurement_results.DidRunPage(test_page_set.pages[1]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ 'RESULT a: http___www.bar.com_= [7,8] seconds\n' + 'Avg a: 7.500000seconds\n' + 'Sd a: 0.707107seconds\n', 'RESULT a: http___www.foo.com_= [3,4] seconds\n' + 'Avg a: 3.500000seconds\n' + 'Sd a: 0.707107seconds\n', '*RESULT a: a= [3,4,7,8] seconds\n' + 'Avg a: 5.500000seconds\n' + 'Sd a: 2.380476seconds\n', 'RESULT telemetry_page_measurement_results: ' + 'num_failed= 0 count\n', 'RESULT telemetry_page_measurement_results: ' + 'num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def test_basic_summary_with_only_one_page(self): test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'a', 'seconds', 3)) measurement_results.DidRunPage(test_page_set.pages[0]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ '*RESULT a: a= 3 seconds\n', 'RESULT telemetry_page_measurement_results: ' + 'num_failed= 0 count\n', 'RESULT telemetry_page_measurement_results: ' + 'num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def test_histogram(self): test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( histogram.HistogramValue( test_page_set.pages[0], 'a', 'units', raw_value_json= '{"buckets": [{"low": 1, "high": 2, "count": 1}]}', important=False)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( histogram.HistogramValue( test_page_set.pages[1], 'a', 'units', raw_value_json= '{"buckets": [{"low": 2, "high": 3, "count": 1}]}', important=False)) measurement_results.DidRunPage(test_page_set.pages[1]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ 'HISTOGRAM a: http___www.bar.com_= ' + '{"buckets": [{"low": 2, "high": 3, "count": 1}]} units\n' + 'Avg a: 2.500000units\n', 'HISTOGRAM a: http___www.foo.com_= ' + '{"buckets": [{"low": 1, "high": 2, "count": 1}]} units\n' + 'Avg a: 1.500000units\n', 'RESULT telemetry_page_measurement_results: num_failed= 0 count\n', 'RESULT telemetry_page_measurement_results: num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def test_unimportant_results(self): test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.AddSummaryValue( scalar.ScalarValue(None, 'a', 'seconds', 1, important=False)) measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[0], 'b', 'seconds', 2, important=False)) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( scalar.ScalarValue(test_page_set.pages[1], 'b', 'seconds', 3, important=False)) measurement_results.DidRunPage(test_page_set.pages[1]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) self.assertEquals(self._test_output_stream.output_data, [ 'RESULT b: http___www.bar.com_= 3 seconds\n', 'RESULT b: http___www.foo.com_= 2 seconds\n', 'RESULT b: b= [2,3] seconds\n' + 'Avg b: 2.500000seconds\nSd b: 0.707107seconds\n', 'RESULT a: a= 1 seconds\n', 'RESULT telemetry_page_measurement_results: num_failed= 0 count\n', 'RESULT telemetry_page_measurement_results: num_errored= 0 count\n' ])
def test_list_value(self): test_page_set = _MakePageSet() measurement_results = page_test_results.PageTestResults() measurement_results.AddSummaryValue( list_of_scalar_values.ListOfScalarValues(None, 'a', 'seconds', [1, 1])) measurement_results.WillRunPage(test_page_set.pages[0]) measurement_results.AddValue( list_of_scalar_values.ListOfScalarValues(test_page_set.pages[0], 'b', 'seconds', [2, 2])) measurement_results.DidRunPage(test_page_set.pages[0]) measurement_results.WillRunPage(test_page_set.pages[1]) measurement_results.AddValue( list_of_scalar_values.ListOfScalarValues(test_page_set.pages[1], 'b', 'seconds', [3, 3])) measurement_results.DidRunPage(test_page_set.pages[1]) formatter = buildbot_output_formatter.BuildbotOutputFormatter( self._test_output_stream) formatter.Format(measurement_results) expected = [ 'RESULT b: http___www.bar.com_= [3,3] seconds\n' + 'Avg b: 3.000000seconds\n', 'RESULT b: http___www.foo.com_= [2,2] seconds\n' + 'Avg b: 2.000000seconds\n', '*RESULT b: b= [2,2,3,3] seconds\nAvg b: 2.500000seconds\n' + 'Sd b: 0.577350seconds\n', '*RESULT a: a= [1,1] seconds\nAvg a: 1.000000seconds\n', 'RESULT telemetry_page_measurement_results: num_failed= 0 count\n', 'RESULT telemetry_page_measurement_results: num_errored= 0 count\n' ] self.assertEquals(expected, self._test_output_stream.output_data)
def CreateResults(benchmark_metadata, options): """ Args: options: Contains the options specified in AddResultsOptions. """ if not options.output_formats: options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]] output_formatters = [] for output_format in options.output_formats: if output_format == 'none' or output_format == "gtest" or options.chartjson: continue output_stream = _GetOutputStream(output_format, options.output_dir) if output_format == 'csv': output_formatters.append( csv_output_formatter.CsvOutputFormatter(output_stream)) elif output_format == 'csv-pivot-table': output_formatters.append( csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter( output_stream, trace_tag=options.output_trace_tag)) elif output_format == 'buildbot': output_formatters.append( buildbot_output_formatter.BuildbotOutputFormatter( output_stream, trace_tag=options.output_trace_tag)) elif output_format == 'html': # TODO(chrishenry): We show buildbot output so that users can grep # through the results easily without needing to open the html # file. Another option for this is to output the results directly # in gtest-style results (via some sort of progress reporter), # as we plan to enable gtest-style output for all output formatters. output_formatters.append( buildbot_output_formatter.BuildbotOutputFormatter( sys.stdout, trace_tag=options.output_trace_tag)) output_formatters.append( html_output_formatter.HtmlOutputFormatter( output_stream, benchmark_metadata, options.reset_results, options.upload_results, options.browser_type, options.results_label, trace_tag=options.output_trace_tag)) elif output_format == 'json': output_formatters.append( json_output_formatter.JsonOutputFormatter( output_stream, benchmark_metadata)) elif output_format == 'chartjson': output_formatters.append( chart_json_output_formatter.ChartJsonOutputFormatter( output_stream, benchmark_metadata)) else: # Should never be reached. The parser enforces the choices. raise Exception( 'Invalid --output-format "%s". Valid choices are: %s' % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES))) # TODO(chrishenry): This is here to not change the output of # gtest. Let's try enabling skipped tests summary for gtest test # results too (in a separate patch), and see if we break anything. output_skipped_tests_summary = 'gtest' in options.output_formats reporter = _GetProgressReporter(output_skipped_tests_summary, options.suppress_gtest_report) return page_test_results.PageTestResults( output_formatters=output_formatters, progress_reporter=reporter, output_dir=options.output_dir)
def CreateResults(metadata, options): """ Args: options: Contains the options specified in AddResultsOptions. """ # TODO(chrishenry): This logic prevents us from having multiple # OutputFormatters. We should have an output_file per OutputFormatter. # Maybe we should have --output-dir instead of --output-file? if options.output_format == 'html' and not options.output_file: options.output_file = os.path.join(util.GetBaseDir(), 'results.html') elif options.output_format == 'json' and not options.output_file: options.output_file = os.path.join(util.GetBaseDir(), 'results.json') if hasattr(options, 'output_file') and options.output_file: output_file = os.path.expanduser(options.output_file) open(output_file, 'a').close() # Create file if it doesn't exist. output_stream = open(output_file, 'r+') else: output_stream = sys.stdout if not hasattr(options, 'output_format'): options.output_format = _OUTPUT_FORMAT_CHOICES[0] if not hasattr(options, 'output_trace_tag'): options.output_trace_tag = '' output_formatters = [] output_skipped_tests_summary = True reporter = None if options.output_format == 'none': pass elif options.output_format == 'csv': output_formatters.append(csv_output_formatter.CsvOutputFormatter( output_stream)) elif options.output_format == 'buildbot': output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter( output_stream, trace_tag=options.output_trace_tag)) elif options.output_format == 'gtest': # TODO(chrishenry): This is here to not change the output of # gtest. Let's try enabling skipped tests summary for gtest test # results too (in a separate patch), and see if we break anything. output_skipped_tests_summary = False elif options.output_format == 'html': # TODO(chrishenry): We show buildbot output so that users can grep # through the results easily without needing to open the html # file. Another option for this is to output the results directly # in gtest-style results (via some sort of progress reporter), # as we plan to enable gtest-style output for all output formatters. output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter( sys.stdout, trace_tag=options.output_trace_tag)) output_formatters.append(html_output_formatter.HtmlOutputFormatter( output_stream, metadata, options.reset_results, options.upload_results, options.browser_type, options.results_label, trace_tag=options.output_trace_tag)) elif options.output_format == 'json': output_formatters.append( json_output_formatter.JsonOutputFormatter(output_stream, metadata)) else: # Should never be reached. The parser enforces the choices. raise Exception('Invalid --output-format "%s". Valid choices are: %s' % (options.output_format, ', '.join(_OUTPUT_FORMAT_CHOICES))) if options.suppress_gtest_report: reporter = progress_reporter.ProgressReporter() else: reporter = gtest_progress_reporter.GTestProgressReporter( sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary) return page_test_results.PageTestResults( output_formatters=output_formatters, progress_reporter=reporter)