Exemplo n.º 1
0
def AddDiagnosticsToHistograms(test_result, test_suite_start, results_label,
                               test_path_format):
    """Add diagnostics to all histograms of a test result.

  Reads diagnostics from the test artifact and adds them to all histograms.
  Also sets additional diagnostics based on test result metadata.
  This overwrites the corresponding diagnostics previously set by e.g.
  run_metrics.
  """
    artifacts = test_result.get('outputArtifacts', {})
    if DIAGNOSTICS_NAME in artifacts:
        with open(artifacts[DIAGNOSTICS_NAME]['filePath']) as f:
            diagnostics = json.load(f)['diagnostics']
        for name, diag in diagnostics.items():
            # For now, we only support GenericSet diagnostics that are serialized
            # as lists of values.
            assert isinstance(diag, list)
            test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
                name, generic_set.GenericSet(diag))
        del artifacts[DIAGNOSTICS_NAME]

    test_suite, test_case = util.SplitTestPath(test_result, test_path_format)
    if 'startTime' in test_result:
        test_start_ms = util.IsoTimestampToEpoch(
            test_result['startTime']) * 1e3
    else:
        test_start_ms = None
    test_suite_start_ms = util.IsoTimestampToEpoch(test_suite_start) * 1e3
    story_tags = [
        tag['value'] for tag in test_result.get('tags', [])
        if tag['key'] == 'story_tag'
    ]
    result_id = int(test_result.get('resultId', 0))
    trace_url = GetTraceUrl(test_result)

    additional_diagnostics = [
        (reserved_infos.BENCHMARKS, test_suite),
        (reserved_infos.BENCHMARK_START, test_suite_start_ms),
        (reserved_infos.LABELS, results_label),
        (reserved_infos.STORIES, test_case),
        (reserved_infos.STORYSET_REPEATS, result_id),
        (reserved_infos.STORY_TAGS, story_tags),
        (reserved_infos.TRACE_START, test_start_ms),
        (reserved_infos.TRACE_URLS, trace_url),
    ]
    for name, value in _WrapDiagnostics(additional_diagnostics):
        test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
            name, value)
Exemplo n.º 2
0
def AddDiagnosticsToHistograms(test_result, test_suite_start, results_label):
    """Add diagnostics to all histograms of a test run.

  Reads diagnostics from the test artifact and adds them to all histograms.
  This overwrites the corresponding diagnostics previously set by e.g.
  run_metrics.
  """
    artifacts = test_result.get('outputArtifacts', {})
    if DIAGNOSTICS_NAME in artifacts:
        with open(artifacts[DIAGNOSTICS_NAME]['filePath']) as f:
            diagnostics = json.load(f)['diagnostics']
        for name, diag in diagnostics.items():
            # For now, we only support GenericSet diagnostics that are serialized
            # as lists of values.
            assert isinstance(diag, list)
            test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
                name, generic_set.GenericSet(diag))

    timestamp_ms = util.IsoTimestampToEpoch(test_suite_start) * 1e3
    test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
        reserved_infos.BENCHMARK_START.name,
        date_range.DateRange(timestamp_ms))

    if results_label is not None:
        test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
            reserved_infos.LABELS.name,
            generic_set.GenericSet([results_label]))
Exemplo n.º 3
0
def _GlobalDiagnostics(benchmark_run):
  """Extract diagnostics information about the whole benchmark run.

  These diagnostics will be added to ad-hoc measurements recorded by
  benchmarks.
  """
  timestamp_ms = util.IsoTimestampToEpoch(benchmark_run['startTime']) * 1e3
  return {
    reserved_infos.BENCHMARK_START.name: date_range.DateRange(timestamp_ms),
  }
Exemplo n.º 4
0
def Convert(test_results, base_dir, test_path_format):
    """Convert intermediate results to the JSON Test Results Format.

  Args:
    test_results: The parsed intermediate results.
    base_dir: A string with the path to a base directory; artifact file paths
      will be written relative to this.

  Returns:
    A JSON serializable dict with the converted results.
  """
    results = {'tests': {}}
    status_counter = collections.Counter()

    for result in test_results:
        benchmark_name, story_name = util.SplitTestPath(
            result, test_path_format)
        actual_status = result['status']
        expected_status = actual_status if result['expected'] else 'PASS'
        status_counter[actual_status] += 1
        artifacts = result.get('outputArtifacts', {})
        shard = _GetTagValue(result.get('tags', []), 'shard', as_type=int)
        _MergeDict(
            results['tests'], {
                benchmark_name: {
                    story_name: {
                        'actual': actual_status,
                        'expected': expected_status,
                        'is_unexpected': not result['expected'],
                        'times': float(result['runDuration'].rstrip('s')),
                        'shard': shard,
                        'artifacts': {
                            name: _ArtifactPath(artifact, base_dir)
                            for name, artifact in artifacts.items()
                        }
                    }
                }
            })

    for stories in results['tests'].values():
        for test in stories.values():
            test['actual'] = _DedupedStatus(test['actual'])
            test['expected'] = ' '.join(sorted(set(test['expected'])))
            test['is_unexpected'] = any(test['is_unexpected'])
            test['time'] = test['times'][0]
            test['shard'] = test['shard'][
                0]  # All shard values should be the same.
            if test['shard'] is None:
                del test['shard']

    # Test results are written in order of execution, so the first test start
    # time is approximately the start time of the whole suite.
    test_suite_start_time = (test_results[0]['startTime'] if test_results else
                             datetime.datetime.utcnow().isoformat() + 'Z')
    # If Telemetry stops with a unhandleable error, then remaining stories
    # are marked as unexpectedly skipped.
    interrupted = any(t['status'] == 'SKIP' and not t['expected']
                      for t in test_results)
    results.update(
        seconds_since_epoch=util.IsoTimestampToEpoch(test_suite_start_time),
        interrupted=interrupted,
        num_failures_by_type=dict(status_counter),
        path_delimiter='/',
        version=3,
    )

    return results
Exemplo n.º 5
0
def Convert(in_results, base_dir):
  """Convert intermediate results to the JSON Test Results Format.

  Args:
    in_results: The parsed intermediate results.
    base_dir: A string with the path to a base directory; artifact file paths
      will be written relative to this.

  Returns:
    A JSON serializable dict with the converted results.
  """
  results = {'tests': {}}
  status_counter = collections.Counter()

  for result in in_results['testResults']:
    benchmark_name, story_name = result['testPath'].split('/')
    story_name = urllib.unquote(story_name)
    actual_status = result['status']
    expected_status = actual_status if result['isExpected'] else 'PASS'
    status_counter[actual_status] += 1
    artifacts = result.get('outputArtifacts', {})
    shard = _GetTagValue(result.get('tags', []), 'shard', as_type=int)
    _MergeDict(
        results['tests'],
        {
            benchmark_name: {
                story_name: {
                    'actual': actual_status,
                    'expected': expected_status,
                    'is_unexpected': not result['isExpected'],
                    'times': float(result['runDuration'].rstrip('s')),
                    'shard': shard,
                    'artifacts': {
                        name: _ArtifactPath(artifact, base_dir)
                        for name, artifact in artifacts.items()
                    }
                }
            }
        }
    )

  for stories in results['tests'].values():
    for test in stories.values():
      test['actual'] = _DedupedStatus(test['actual'])
      test['expected'] = ' '.join(sorted(set(test['expected'])))
      test['is_unexpected'] = any(test['is_unexpected'])
      test['time'] = test['times'][0]
      test['shard'] = test['shard'][0]  # All shard values should be the same.
      if test['shard'] is None:
        del test['shard']

  benchmark_run = in_results['benchmarkRun']
  results.update(
      seconds_since_epoch=util.IsoTimestampToEpoch(benchmark_run['startTime']),
      interrupted=benchmark_run['interrupted'],
      num_failures_by_type=dict(status_counter),
      path_delimiter='/',
      version=3,
  )

  return results