def main(): all_metrics = discover.DiscoverMetrics( ['/tracing/metrics/all_metrics.html']) parser = argparse.ArgumentParser( description='Runs metrics on a local trace') parser.add_argument('--local-trace-path', type=str, help='The local path to the trace file') parser.add_argument('--cloud-trace-link', type=str, help=('Cloud link from where the local trace file was ' 'downloaded from')) parser.add_argument('--metric-name', type=str, help=('Function name of registered metric ' '(not filename.) Available metrics are: %s' % ', '.join(all_metrics))) parser.add_argument('--output-csv', default='results', type=str, help='Output CSV file path') args = parser.parse_args() trace_size_in_mib = os.path.getsize(args.local_trace_path) / (2 ** 20) # Bails out on trace that are too big. See crbug.com/812631 for more details. if trace_size_in_mib > 400: print('Trace size is too big: %s MiB' % trace_size_in_mib) return 1 logging.warning('Starting to compute metrics on trace') start = time.time() mre_result = metric_runner.RunMetric( args.local_trace_path, [args.metric_name], {}, report_progress=False, canonical_url=args.cloud_trace_link) logging.warning('Processing resulting traces took %.3f seconds' % ( time.time() - start)) for f in mre_result.failures: print('Running metric failed:') print(f.stack) return 1 with tempfile.NamedTemporaryFile() as temp: json.dump(mre_result.pairs.get('histograms', []), temp, indent=2, sort_keys=True, separators=(',', ': ')) temp.flush() result = histograms_to_csv.HistogramsToCsv(temp.name) if result.returncode != 0: print('histograms_to_csv.HistogramsToCsv returned %d' % result.returncode) return result.returncode else: with open(args.output_csv, 'w') as f: f.write(result.stdout.rstrip()) print('Output CSV created in file://' + args.output_csv)
def Main(): all_metrics = discover.DiscoverMetrics( ['/tracing/metrics/all_metrics.html']) parser = argparse.ArgumentParser() parser.add_argument('--metric_name', help='Metric name, valid choices are: %s' % ', '.join(all_metrics)) parser.add_argument( '--mapper_handle', help='Mapper handle, in the format path/to/handle.html:handleFunction') parser.add_argument('trace_file_or_dir', help='Path to trace file or directory of trace files.') parser.add_argument('--output_file', help='Path to output file to store results.') args = parser.parse_args() if args.metric_name and args.mapper_handle: parser.error('Specify either metric or mapper handle, not both.') if not args.metric_name and not args.mapper_handle: parser.error('Specify either metric or mapper handle.') traces = _GetListFromFileOrDir(os.path.abspath(args.trace_file_or_dir)) if args.output_file: args.output_file = os.path.abspath(args.output_file) if args.metric_name: # Didn't put in choices because the commandline help is super ugly and # repetitive. if not args.metric_name in all_metrics: parser.error('Invalid metric specified.') return _ProcessTracesWithMetric(args.metric_name, traces, args.output_file) elif args.mapper_handle: return _ProcessTracesWithMapper(args.mapper_handle, traces, args.output_file)
def testMetricsDiscoverMultipleMetrics(self): self.assertGreater( len(discover.DiscoverMetrics( ['/tracing/metrics/all_metrics.html'])), 1)
def testMetricsDiscoverEmpty(self): self.assertFalse(discover.DiscoverMetrics([]))
def testMetricsDiscoverNonEmpty(self): self.assertEquals(['sampleMetric'], discover.DiscoverMetrics( ['/tracing/metrics/sample_metric.html']))