def _ComputeTimelineBasedMetrics(self, results, trace_value): metrics = self._tbm_options.GetTimelineBasedMetrics() extra_import_options = {'trackDetailedModelStats': True} start = time.time() mre_result = metric_runner.RunMetric( trace_value.filename, metrics, extra_import_options, report_progress=False, canonical_url=results.telemetry_info.trace_url) logging.warning('Processing resulting traces took %.3f seconds' % (time.time() - start)) page = results.current_page failure_dicts = mre_result.failures for d in failure_dicts: results.AddValue(common_value_helpers.TranslateMreFailure(d, page)) results.histograms.ImportDicts(mre_result.pairs.get('histograms', [])) results.histograms.ResolveRelatedHistograms() for d in mre_result.pairs.get('scalars', []): results.AddValue(common_value_helpers.TranslateScalarValue( d, page))
def _ComputeTimelineBasedMetrics(self, results, trace_value): metrics = self._tbm_options.GetTimelineBasedMetrics() if (self._tbm_options.config.chrome_trace_config.HasUMAHistograms() and 'umaMetric' not in metrics): raise Exception('UMA histograms are enabled but umaMetric is not used') extra_import_options = { 'trackDetailedModelStats': True } trace_size_in_mib = os.path.getsize(trace_value.filename) / (2 ** 20) # Bails out on trace that are too big. See crbug.com/812631 for more # details. if trace_size_in_mib > 400: results.Fail('Trace size is too big: %s MiB' % trace_size_in_mib) return logging.info('Starting to compute metrics on trace') start = time.time() mre_result = metric_runner.RunMetric( trace_value.filename, metrics, extra_import_options, report_progress=False, canonical_url=results.telemetry_info.trace_url) logging.info('Processing resulting traces took %.3f seconds' % ( time.time() - start)) page = results.current_page for f in mre_result.failures: results.Fail(f.stack) histogram_dicts = mre_result.pairs.get('histograms', []) results.ImportHistogramDicts(histogram_dicts) for d in mre_result.pairs.get('scalars', []): results.AddValue(common_value_helpers.TranslateScalarValue(d, page))
def CheckValidTrace(self, stop_tracing_result): trace_data, errors = stop_tracing_result self.assertEqual(errors, []) with tempfile_ext.NamedTemporaryDirectory() as temp_dir: trace_file = os.path.join(temp_dir, 'trace.html') trace_data.Serialize(trace_file) mre_result = metric_runner.RunMetric(trace_file, ['tracingMetric']) self.assertFalse(mre_result.failures)
def _ComputeMetricsInPool((run, trace_value)): story_name = run.story.name try: assert not trace_value.is_serialized, ( "%s: TraceValue should not be serialized." % story_name) retvalue = { 'run': run, 'fail': [], 'histogram_dicts': None, 'scalars': [] } extra_import_options = {'trackDetailedModelStats': True} logging.info('%s: Serializing trace.', story_name) trace_value.SerializeTraceData() trace_size_in_mib = os.path.getsize(trace_value.filename) / (2**20) # Bails out on trace that are too big. See crbug.com/812631 for more # details. if trace_size_in_mib > 400: retvalue['fail'].append('%s: Trace size is too big: %s MiB' % (story_name, trace_size_in_mib)) return retvalue logging.info('%s: Starting to compute metrics on trace.', story_name) start = time.time() mre_result = metric_runner.RunMetric( trace_value.filename, trace_value.timeline_based_metric, extra_import_options, report_progress=False, canonical_url=trace_value.trace_url) logging.info('%s: Computing metrics took %.3f seconds.' % (story_name, time.time() - start)) if mre_result.failures: for f in mre_result.failures: retvalue['fail'].append('%s: %s' % (story_name, str(f))) histogram_dicts = mre_result.pairs.get('histograms', []) retvalue['histogram_dicts'] = histogram_dicts scalars = [] for d in mre_result.pairs.get('scalars', []): scalars.append( common_value_helpers.TranslateScalarValue(d, trace_value.page)) retvalue['scalars'] = scalars return retvalue except Exception as e: # pylint: disable=broad-except # logging exception here is the only way to get a stack trace since # multiprocessing's pool implementation does not save that data. See # crbug.com/953365. logging.error('%s: Exception while calculating metric', story_name) logging.exception(e) raise
def main(): all_metrics = discover.DiscoverMetrics( ['/tracing/metrics/all_metrics.html']) parser = argparse.ArgumentParser( description='Runs metrics on a local trace') parser.add_argument('--local-trace-path', type=str, help='The local path to the trace file') parser.add_argument('--cloud-trace-link', type=str, help=('Cloud link from where the local trace file was ' 'downloaded from')) parser.add_argument('--metric-name', type=str, help=('Function name of registered metric ' '(not filename.) Available metrics are: %s' % ', '.join(all_metrics))) parser.add_argument('--output-csv', default='results', type=str, help='Output CSV file path') args = parser.parse_args() trace_size_in_mib = os.path.getsize(args.local_trace_path) / (2 ** 20) # Bails out on trace that are too big. See crbug.com/812631 for more details. if trace_size_in_mib > 400: print('Trace size is too big: %s MiB' % trace_size_in_mib) return 1 logging.warning('Starting to compute metrics on trace') start = time.time() mre_result = metric_runner.RunMetric( args.local_trace_path, [args.metric_name], {}, report_progress=False, canonical_url=args.cloud_trace_link) logging.warning('Processing resulting traces took %.3f seconds' % ( time.time() - start)) for f in mre_result.failures: print('Running metric failed:') print(f.stack) return 1 with tempfile.NamedTemporaryFile() as temp: json.dump(mre_result.pairs.get('histograms', []), temp, indent=2, sort_keys=True, separators=(',', ': ')) temp.flush() result = histograms_to_csv.HistogramsToCsv(temp.name) if result.returncode != 0: print('histograms_to_csv.HistogramsToCsv returned %d' % result.returncode) return result.returncode else: with open(args.output_csv, 'w') as f: f.write(result.stdout.rstrip()) print('Output CSV created in file://' + args.output_csv)
def ValidateAndMeasurePage(self, _, tab, results): self._results = results tab.browser.platform.tracing_controller.telemetry_info = ( results.telemetry_info) trace_result = tab.browser.platform.tracing_controller.StopTracing() # TODO(charliea): This is part of a three-sided Chromium/Telemetry patch # where we're changing the return type of StopTracing from a TraceValue to a # (TraceValue, nonfatal_exception_list) tuple. Once the tuple return value # lands in Chromium, the non-tuple logic should be deleted. if isinstance(trace_result, tuple): trace_result = trace_result[0] trace_value = trace.TraceValue( results.current_page, trace_result, file_path=results.telemetry_info.trace_local_path, remote_path=results.telemetry_info.trace_remote_path, upload_bucket=results.telemetry_info.upload_bucket, cloud_url=results.telemetry_info.trace_remote_url) results.AddValue(trace_value) model = model_module.TimelineModel(trace_result) renderer_thread = model.GetFirstRendererThread(tab.id) records = _CollectRecordsFromRendererThreads(model, renderer_thread) smoothness_metric = smoothness.SmoothnessMetric() smoothness_metric.AddResults(model, renderer_thread, records, results) thread_times_metric = timeline.ThreadTimesTimelineMetric() thread_times_metric.AddResults(model, renderer_thread, records, results) # TBMv2 metrics. mre_result = metric_runner.RunMetric( trace_value.filename, metrics=['renderingMetric'], extra_import_options={'trackDetailedModelStats': True}, report_progress=False, canonical_url=results.telemetry_info.trace_url) for f in mre_result.failures: results.Fail(f.stack) results.ImportHistogramDicts(mre_result.pairs.get('histograms', []), import_immediately=False)
def _ComputeTimelineBasedMetrics(self, results, trace_value): metrics = self._tbm_options.GetTimelineBasedMetrics() extra_import_options = {'trackDetailedModelStats': True} mre_result = metric_runner.RunMetric(trace_value.filename, metrics, extra_import_options) page = results.current_page failure_dicts = mre_result.failures for d in failure_dicts: results.AddValue(common_value_helpers.TranslateMreFailure(d, page)) results.value_set.extend(mre_result.pairs.get('histograms', [])) for d in mre_result.pairs.get('scalars', []): results.AddValue(common_value_helpers.TranslateScalarValue( d, page))
def _ComputeTimelineBasedMetric(self, results, trace_value): metric = self._tbm_options.GetTimelineBasedMetric() extra_import_options = {'trackDetailedModelStats': True} mre_result = metric_runner.RunMetric(trace_value.filename, metric, extra_import_options) page = results.current_page failure_dicts = mre_result.failures for d in failure_dicts: results.AddValue(common_value_helpers.TranslateMreFailure(d, page)) value_dicts = mre_result.pairs.get('values', []) for d in value_dicts: if common_value_helpers.IsScalarNumericValue(d): results.AddValue( common_value_helpers.TranslateScalarValue(d, page))
def AddTBMv2RenderingMetrics(trace_value, results, import_experimental_metrics): mre_result = metric_runner.RunMetric( trace_value.filename, metrics=['renderingMetric'], extra_import_options={'trackDetailedModelStats': True}, report_progress=False, canonical_url=results.telemetry_info.trace_url) for f in mre_result.failures: results.Fail(f.stack) histograms = [] for histogram in mre_result.pairs.get('histograms', []): if (import_experimental_metrics or histogram.get('name', '').find('_tbmv2') < 0): histograms.append(histogram) results.ImportHistogramDicts(histograms, import_immediately=False)
def _ComputeMetricsInPool((run, trace_value)): assert not trace_value.is_serialized, "TraceValue should not be serialized." retvalue = {'run': run, 'fail': [], 'histogram_dicts': None, 'scalars': []} extra_import_options = {'trackDetailedModelStats': True} trace_value.SerializeTraceData() trace_size_in_mib = os.path.getsize(trace_value.filename) / (2**20) # Bails out on trace that are too big. See crbug.com/812631 for more # details. if trace_size_in_mib > 400: retvalue['fail'].append('Trace size is too big: %s MiB' % trace_size_in_mib) return retvalue logging.info('Starting to compute metrics on trace') start = time.time() mre_result = metric_runner.RunMetric(trace_value.filename, trace_value.timeline_based_metric, extra_import_options, report_progress=False, canonical_url=trace_value.trace_url) logging.info('Processing resulting traces took %.3f seconds' % (time.time() - start)) if mre_result.failures: for f in mre_result.failures: retvalue['fail'].append(f.stack) histogram_dicts = mre_result.pairs.get('histograms', []) retvalue['histogram_dicts'] = histogram_dicts scalars = [] for d in mre_result.pairs.get('scalars', []): scalars.append( common_value_helpers.TranslateScalarValue(d, trace_value.page)) retvalue['scalars'] = scalars return retvalue
def Measure(self, platform, results): """Collect all possible metrics and add them to results.""" # Extract the file name without the "file:/" prefix. assert results.current_page.name.startswith("file:/"), \ "current page path should start with file:/" filename = results.current_page.name[len("file:/"):] metrics = self._tbm_options.GetTimelineBasedMetrics() extra_import_options = {'trackDetailedModelStats': True} trace_size_in_mib = os.path.getsize(filename) / (2**20) # Bails out on trace that are too big. See crbug.com/812631 for more # details. if trace_size_in_mib > 400: results.Fail('Trace size is too big: %s MiB' % trace_size_in_mib) return logging.warning('Starting to compute metrics on trace') start = time.time() mre_result = metric_runner.RunMetric( filename, metrics, extra_import_options, report_progress=False, canonical_url=results.current_page.cloud_trace_link) logging.warning('Processing resulting traces took %.3f seconds' % (time.time() - start)) page = results.current_page for f in mre_result.failures: results.Fail(f.stack) histogram_dicts = mre_result.pairs.get('histograms', []) results.ImportHistogramDicts(histogram_dicts) for d in mre_result.pairs.get('scalars', []): results.AddValue(common_value_helpers.TranslateScalarValue( d, page))