コード例 #1
0
 def testApplyInParallelOnFailure(self):
   work_list = [[1], [2], [3]]
   def fun(x):
     if x == [3]:
       raise RuntimeError()
   util.ApplyInParallel(fun, work_list, on_failure=lambda x: x.pop())
   self.assertEqual(work_list, [[1], [2], []])
コード例 #2
0
ファイル: processor.py プロジェクト: tt20050510/chromium
def AggregateTraces(intermediate_results):
  """Replace individual traces with an aggregate one for each test result.

  For each test run with traces, generates an aggregate HTML trace. Removes
  all entries for individual traces and adds one entry for aggregate one.
  """
  work_list = []
  for result in intermediate_results['testResults']:
    artifacts = result.get('outputArtifacts', {})
    # TODO(crbug.com/981349): Stop checking for HTML_TRACE_NAME after
    # Telemetry does not aggregate traces anymore.
    if (any(name.startswith('trace/') for name in artifacts) and
        compute_metrics.HTML_TRACE_NAME not in artifacts):
      work_list.append(artifacts)

  if work_list:
    for _ in util.ApplyInParallel(_AggregateTraceWorker, work_list):
      pass

  # TODO(crbug.com/981349): This is to clean up traces that have been
  # aggregated by Telemetry. Remove this after Telemetry no longer does this.
  for result in intermediate_results['testResults']:
    artifacts = result.get('outputArtifacts', {})
    for name in artifacts.keys():
      if name.startswith('trace/'):
        del artifacts[name]
コード例 #3
0
ファイル: processor.py プロジェクト: zhangjiayun/chromium.bb
def ProcessResults(options):
    """Process intermediate results and produce the requested outputs.

  This function takes the intermediate results generated by Telemetry after
  running benchmarks (including artifacts such as traces, etc.), and processes
  them as requested by the result processing options.

  Args:
    options: An options object with values parsed from the command line and
      after any adjustments from ProcessOptions were applied.
  """
    if not getattr(options, 'output_formats', None):
        return 0

    test_results = _LoadTestResults(options.intermediate_dir)
    if not test_results:
        # TODO(crbug.com/981349): Make sure that no one is expecting Results
        # Processor to output results in the case of empty input
        # and make this an error.
        logging.warning('No test results to process.')

    upload_bucket = options.upload_bucket
    results_label = options.results_label
    max_num_values = options.max_values_per_test_case
    test_path_format = options.test_path_format
    trace_processor_path = options.trace_processor_path
    test_suite_start = (test_results[0]['startTime']
                        if test_results and 'startTime' in test_results[0] else
                        datetime.datetime.utcnow().isoformat() + 'Z')
    run_identifier = RunIdentifier(results_label, test_suite_start)
    should_compute_metrics = any(fmt in FORMATS_WITH_METRICS
                                 for fmt in options.output_formats)

    util.ApplyInParallel(
        lambda result: ProcessTestResult(
            result, upload_bucket, results_label, run_identifier,
            test_suite_start, should_compute_metrics, max_num_values,
            test_path_format, trace_processor_path),
        test_results,
        on_failure=util.SetUnexpectedFailure,
    )

    if should_compute_metrics:
        histogram_dicts = ExtractHistograms(test_results)

    for output_format in options.output_formats:
        logging.info('Processing format: %s', output_format)
        formatter = formatters.FORMATTERS[output_format]
        if output_format in FORMATS_WITH_METRICS:
            output_file = formatter.ProcessHistogramDicts(
                histogram_dicts, options)
        else:
            output_file = formatter.ProcessIntermediateResults(
                test_results, options)
        print('View results at file://', output_file, sep='')

    return GenerateExitCode(test_results)
コード例 #4
0
    def testApplyInParallelExceptionRaised(self):
        work_list = [1, 2, 3]

        def fun(x):
            if x == 3:
                raise RuntimeError()

        with self.assertRaises(RuntimeError):
            list(util.ApplyInParallel(fun, work_list))
コード例 #5
0
def ComputeTBMv2Metrics(intermediate_results):
    """Compute metrics on aggregated traces in parallel.

  For each test run that has an aggregate trace and some TBMv2 metrics listed
  in its tags, compute the metrics and return the list of all resulting
  histograms. Note: the order of histograms in the results may be different
  from the order of tests in intermediate_results.
  """
    histogram_dicts = []
    work_list = []
    for test_result in intermediate_results['testResults']:
        artifacts = test_result.get('outputArtifacts', {})
        # TODO(crbug.com/981349): If metrics have already been computed in
        # Telemetry, we read it from the file. Remove this branch after Telemetry
        # does not compute metrics anymore.
        if HISTOGRAM_DICTS_FILE in artifacts:
            with open(artifacts[HISTOGRAM_DICTS_FILE]['filePath']) as f:
                histogram_dicts += json.load(f)
            del artifacts[HISTOGRAM_DICTS_FILE]
            continue

        if test_result['status'] == 'SKIP':
            continue

        if (HTML_TRACE_NAME not in artifacts
                or not any(tag['key'] == 'tbmv2'
                           for tag in test_result.get('tags', []))):
            continue

        trace_size_in_mib = (
            os.path.getsize(artifacts[HTML_TRACE_NAME]['filePath']) / (2**20))
        # Bails out on traces that are too big. See crbug.com/812631 for more
        # details.
        # TODO(crbug.com/1010041): Return a non-zero exit code in this case.
        if trace_size_in_mib > 400:
            test_result['status'] = 'FAIL'
            logging.error('%s: Trace size is too big: %s MiB',
                          test_result['testPath'], trace_size_in_mib)
            continue

        work_list.append(test_result)

    for dicts in util.ApplyInParallel(_PoolWorker, work_list):
        histogram_dicts += dicts

    return histogram_dicts
コード例 #6
0
ファイル: processor.py プロジェクト: tt20050510/chromium
def UploadArtifacts(intermediate_results, upload_bucket, results_label):
  """Upload all artifacts to cloud.

  For each test run, uploads all its artifacts to cloud and sets remoteUrl
  fields in intermediate_results.
  """
  if upload_bucket is None:
    return

  run_identifier = _RunIdentifier(
      results_label, intermediate_results['benchmarkRun']['startTime'])
  work_list = []

  for result in intermediate_results['testResults']:
    artifacts = result.get('outputArtifacts', {})
    for name, artifact in artifacts.iteritems():
      if 'remoteUrl' in artifact:
        continue
      # TODO(crbug.com/981349): Remove this check after Telemetry does not
      # save histograms as an artifact anymore.
      if name == compute_metrics.HISTOGRAM_DICTS_FILE:
        continue
      remote_name = '/'.join([run_identifier, result['testPath'], name])
      work_list.append((artifact, remote_name))

  def PoolUploader(work_item):
    artifact, remote_name = work_item
    artifact['remoteUrl'] = cloud_storage.Insert(
        upload_bucket, remote_name, artifact['filePath'])

  for _ in util.ApplyInParallel(PoolUploader, work_list):
    pass

  for result in intermediate_results['testResults']:
    artifacts = result.get('outputArtifacts', {})
    for name, artifact in artifacts.iteritems():
      logging.info('Uploaded %s of %s to %s', name, result['testPath'],
                   artifact['remoteUrl'])
コード例 #7
0
 def testApplyInParallel(self):
   work_list = [[1], [2], [3]]
   def fun(x):
     x.extend(x)
   util.ApplyInParallel(fun, work_list)
   self.assertEqual(work_list, [[1, 1], [2, 2], [3, 3]])
コード例 #8
0
 def testApplyInParallel(self):
     work_list = [1, 2, 3]
     fun = lambda x: x * x
     result = set(util.ApplyInParallel(fun, work_list))
     self.assertEqual(result, set([1, 4, 9]))