Esempio n. 1
0
def _handle_perf_results(
    benchmark_enabled_map, benchmark_directory_map, configuration_name,
    build_properties, service_account_file, extra_links):
  """
    Upload perf results to the perf dashboard.

    This method also upload the perf results to logdog and augment it to
    |extra_links|.

    Returns:
      0 if this upload to perf dashboard succesfully, 1 otherwise.
  """
  tmpfile_dir = tempfile.mkdtemp('resultscache')
  try:
    # Upload all eligible benchmarks to the perf dashboard
    logdog_dict = {}
    logdog_stream = None
    logdog_label = 'Results Dashboard'
    upload_fail = False
    with oauth_api.with_access_token(service_account_file) as oauth_file:
      for benchmark_name, directories in benchmark_directory_map.iteritems():
        if not benchmark_enabled_map[benchmark_name]:
          continue
        # There are potentially multiple directores with results, re-write and
        # merge them if necessary
        results_filename = None
        if len(directories) > 1:
          merge_perf_dir = os.path.join(
              os.path.abspath(tmpfile_dir), benchmark_name)
          if not os.path.exists(merge_perf_dir):
            os.makedirs(merge_perf_dir)
          results_filename = os.path.join(
              merge_perf_dir, 'merged_perf_results.json')
          _merge_perf_results(results_filename, directories)
        else:
          # It was only written to one shard, use that shards data
          results_filename = join(directories[0], 'perf_results.json')
        print 'Uploading perf results from %s benchmark' % benchmark_name
        upload_fail = _upload_and_write_perf_data_to_logfile(
            benchmark_name, results_filename, configuration_name,
            build_properties, oauth_file, tmpfile_dir, logdog_dict,
            ('.reference' in benchmark_name))

    logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
    logdog_stream = logdog_helper.text(logdog_file_name,
        json.dumps(logdog_dict, sort_keys=True,
                   indent=4, separators=(',', ': ')),
        content_type=JSON_CONTENT_TYPE)
    if upload_fail:
      logdog_label += ' Upload Failure'
    extra_links[logdog_label] = logdog_stream
    if upload_fail:
      return 1
    return 0
  finally:
    shutil.rmtree(tmpfile_dir)
def _process_perf_results(output_json, configuration_name,
                          service_account_file,
                          build_properties, task_output_dir):
  """Process one or more perf JSON results.

  Consists of merging the json-test-format output and uploading the perf test
  output (chartjson and histogram).

  Each directory in the task_output_dir represents one benchmark
  that was run. Within this directory, there is a subdirectory with the name
  of the benchmark that was run. In that subdirectory, there is a
  perftest-output.json file containing the performance results in histogram
  or dashboard json format and an output.json file containing the json test
  results for the benchmark.
  """
  directory_list = [
      f for f in listdir(task_output_dir)
      if not isfile(join(task_output_dir, f))
  ]
  benchmark_directory_list = []
  for directory in directory_list:
    benchmark_directory_list += [
      join(task_output_dir, directory, f)
      for f in listdir(join(task_output_dir, directory))
    ]

  test_results_list = []
  tmpfile_dir = tempfile.mkdtemp('resultscache')
  try:
    for directory in benchmark_directory_list:
      if '.reference' in directory:
        # We don't need to upload reference build data to the
        # flakiness dashboard since we don't monitor the ref build
        continue
      with open(join(directory, 'test_results.json')) as json_data:
        test_results_list.append(json.load(json_data))
    _merge_json_output(output_json, test_results_list)

    with oauth_api.with_access_token(service_account_file) as oauth_file:
      for directory in benchmark_directory_list:
        _upload_perf_results(join(directory, 'perf_results.json'),
            directory, configuration_name, build_properties,
            oauth_file, tmpfile_dir)
  finally:
    shutil.rmtree(tmpfile_dir)
  return 0
Esempio n. 3
0
def _handle_perf_results(benchmark_enabled_map, benchmark_directory_list,
                         configuration_name, build_properties,
                         service_account_file, extra_links):
    """
    Upload perf results to the perf dashboard.

    This method also upload the perf results to logdog and augment it to
    |extra_links|.

    Returns:
      0 if this upload to perf dashboard succesfully, 1 otherwise.
  """
    tmpfile_dir = tempfile.mkdtemp('resultscache')
    try:
        # Upload all eligible benchmarks to the perf dashboard
        logdog_dict = {}
        logdog_stream = None
        logdog_label = 'Results Dashboard'
        upload_fail = False
        with oauth_api.with_access_token(service_account_file) as oauth_file:
            for directory in benchmark_directory_list:
                benchmark_name = _get_benchmark_name(directory)
                if not benchmark_enabled_map[benchmark_name]:
                    continue
                print 'Uploading perf results from %s benchmark' % benchmark_name
                upload_fail = _upload_and_write_perf_data_to_logfile(
                    benchmark_name, directory, configuration_name,
                    build_properties, oauth_file, tmpfile_dir, logdog_dict,
                    ('.reference' in benchmark_name))

        logdog_file_name = _generate_unique_logdog_filename(
            'Results_Dashboard_')
        logdog_stream = logdog_helper.text(
            logdog_file_name,
            json.dumps(logdog_dict,
                       sort_keys=True,
                       indent=4,
                       separators=(',', ': ')))
        if upload_fail:
            logdog_label += ' Upload Failure'
        extra_links[logdog_label] = logdog_stream
        if upload_fail:
            return 1
        return 0
    finally:
        shutil.rmtree(tmpfile_dir)
def _upload_individual(benchmark_name, directories, configuration_name,
                       build_properties, output_json_file,
                       service_account_file):
    tmpfile_dir = tempfile.mkdtemp('resultscache')
    try:
        upload_begin_time = time.time()
        # There are potentially multiple directores with results, re-write and
        # merge them if necessary
        results_filename = None
        if len(directories) > 1:
            merge_perf_dir = os.path.join(os.path.abspath(tmpfile_dir),
                                          benchmark_name)
            if not os.path.exists(merge_perf_dir):
                os.makedirs(merge_perf_dir)
            results_filename = os.path.join(merge_perf_dir,
                                            'merged_perf_results.json')
            _merge_perf_results(benchmark_name, results_filename, directories)
        else:
            # It was only written to one shard, use that shards data
            results_filename = join(directories[0], 'perf_results.json')

        print 'Uploading perf results from %s benchmark' % benchmark_name
        # We generate an oauth token for every benchmark upload in the event
        # the token could time out, see crbug.com/854162
        with oauth_api.with_access_token(
                service_account_file,
            ("%s_tok" % benchmark_name)) as oauth_file:
            with open(output_json_file, 'w') as oj:
                upload_fail = _upload_perf_results(results_filename,
                                                   benchmark_name,
                                                   configuration_name,
                                                   build_properties,
                                                   oauth_file, tmpfile_dir, oj)
                upload_end_time = time.time()
                print_duration(('%s upload time' % (benchmark_name)),
                               upload_begin_time, upload_end_time)
                return (benchmark_name, upload_fail)
    finally:
        shutil.rmtree(tmpfile_dir)
Esempio n. 5
0
def _process_perf_results(output_json, configuration_name,
                          service_account_file, build_properties,
                          task_output_dir):
    """Process one or more perf JSON results.

  Consists of merging the json-test-format output and uploading the perf test
  output (chartjson and histogram).

  Each directory in the task_output_dir represents one benchmark
  that was run. Within this directory, there is a subdirectory with the name
  of the benchmark that was run. In that subdirectory, there is a
  perftest-output.json file containing the performance results in histogram
  or dashboard json format and an output.json file containing the json test
  results for the benchmark.
  """
    directory_list = [
        f for f in listdir(task_output_dir)
        if not isfile(join(task_output_dir, f))
    ]
    benchmark_directory_list = []
    for directory in directory_list:
        benchmark_directory_list += [
            join(task_output_dir, directory, f)
            for f in listdir(join(task_output_dir, directory))
        ]

    # We need to keep track of disabled benchmarks so we don't try to
    # upload the results.
    test_results_list = []
    tmpfile_dir = tempfile.mkdtemp('resultscache')
    try:
        with oauth_api.with_access_token(service_account_file) as oauth_file:
            for directory in benchmark_directory_list:
                # Obtain the test name we are running
                benchmark_name = basename(directory)
                disabled = False
                with open(join(directory, 'test_results.json')) as json_data:
                    json_results = json.load(json_data)
                    if not json_results:
                        # Output is null meaning the test didn't produce any results.
                        # Want to output an error and continue loading the rest of the
                        # test results.
                        print 'No results produced for %s, skipping upload' % directory
                        continue
                    if json_results.get('version') == 3:
                        # Non-telemetry tests don't have written json results but
                        # if they are executing then they are enabled and will generate
                        # chartjson results.
                        if not bool(json_results.get('tests')):
                            disabled = True
                    if not '.reference' in directory:
                        # We don't need to upload reference build data to the
                        # flakiness dashboard since we don't monitor the ref build
                        test_results_list.append(json_results)
                if disabled:
                    # We don't upload disabled benchmarks
                    print 'Benchmark %s disabled' % benchmark_name
                    continue
                print 'Uploading perf results from %s benchmark' % benchmark_name
                _upload_perf_results(join(directory, 'perf_results.json'),
                                     benchmark_name, configuration_name,
                                     build_properties, oauth_file, tmpfile_dir)
            _merge_json_output(output_json, test_results_list)
    finally:
        shutil.rmtree(tmpfile_dir)
    return 0
Esempio n. 6
0
def _process_perf_results(output_json, configuration_name,
                          service_account_file, build_properties,
                          task_output_dir):
    """Process one or more perf JSON results.

  Consists of merging the json-test-format output and uploading the perf test
  output (chartjson and histogram).

  Each directory in the task_output_dir represents one benchmark
  that was run. Within this directory, there is a subdirectory with the name
  of the benchmark that was run. In that subdirectory, there is a
  perftest-output.json file containing the performance results in histogram
  or dashboard json format and an output.json file containing the json test
  results for the benchmark.
  """
    directory_list = [
        f for f in listdir(task_output_dir)
        if not isfile(join(task_output_dir, f))
    ]
    benchmark_directory_list = []
    for directory in directory_list:
        benchmark_directory_list += [
            join(task_output_dir, directory, f)
            for f in listdir(join(task_output_dir, directory))
        ]

    # We need to keep track of disabled benchmarks so we don't try to
    # upload the results.
    test_results_list = []
    tmpfile_dir = tempfile.mkdtemp('resultscache')
    upload_failure = False

    build_properties = json.loads(build_properties)
    if not configuration_name:
        # we are deprecating perf-id crbug.com/817823
        configuration_name = build_properties['buildername']

    try:
        logdog_dict = {}
        with oauth_api.with_access_token(service_account_file) as oauth_file:
            for directory in benchmark_directory_list:
                # Obtain the test name we are running
                benchmark_name = basename(directory).replace(" benchmark", "")
                is_ref = '.reference' in benchmark_name
                disabled = False
                with open(join(directory, 'test_results.json')) as json_data:
                    json_results = json.load(json_data)
                    if not json_results:
                        # Output is null meaning the test didn't produce any results.
                        # Want to output an error and continue loading the rest of the
                        # test results.
                        print 'No results produced for %s, skipping upload' % directory
                        continue
                    if json_results.get('version') == 3:
                        # Non-telemetry tests don't have written json results but
                        # if they are executing then they are enabled and will generate
                        # chartjson results.
                        if not bool(json_results.get('tests')):
                            disabled = True
                    if not is_ref:
                        # We don't need to upload reference build data to the
                        # flakiness dashboard since we don't monitor the ref build
                        test_results_list.append(json_results)
                if disabled:
                    # We don't upload disabled benchmarks
                    print 'Benchmark %s disabled' % benchmark_name
                    continue

                print 'Uploading perf results from %s benchmark' % benchmark_name

                upload_fail = _upload_and_write_perf_data_to_logfile(
                    benchmark_name, directory, configuration_name,
                    build_properties, oauth_file, tmpfile_dir, logdog_dict,
                    is_ref)
                upload_failure = upload_failure or upload_fail

            logdog_label = 'Results Dashboard'
            logdog_file_name = 'Results_Dashboard_' + str(uuid.uuid4())
            if upload_failure:
                logdog_label += ' Upload Failure'
            _merge_json_output(
                output_json, test_results_list,
                logdog_helper.text(
                    logdog_file_name,
                    json.dumps(logdog_dict,
                               sort_keys=True,
                               indent=4,
                               separators=(',', ':'))), logdog_label)
    finally:
        shutil.rmtree(tmpfile_dir)
    return upload_failure
Esempio n. 7
0
def _process_perf_results(output_json, configuration_name,
                          service_account_file,
                          build_properties, task_output_dir,
                          smoke_test_mode):
  """Process one or more perf JSON results.

  Consists of merging the json-test-format output and uploading the perf test
  output (chartjson and histogram).

  Each directory in the task_output_dir represents one benchmark
  that was run. Within this directory, there is a subdirectory with the name
  of the benchmark that was run. In that subdirectory, there is a
  perftest-output.json file containing the performance results in histogram
  or dashboard json format and an output.json file containing the json test
  results for the benchmark.
  """
  directory_list = [
      f for f in listdir(task_output_dir)
      if not isfile(join(task_output_dir, f))
  ]
  benchmark_directory_list = []
  for directory in directory_list:
    benchmark_directory_list += [
      join(task_output_dir, directory, f)
      for f in listdir(join(task_output_dir, directory))
    ]

  test_results_list = []
  tmpfile_dir = tempfile.mkdtemp('resultscache')
  upload_failure = False

  build_properties = json.loads(build_properties)
  if not configuration_name:
    # we are deprecating perf-id crbug.com/817823
    configuration_name = build_properties['buildername']

  try:
    # First obtain the list of json test results to merge
    # and determine the status of each benchmark
    benchmark_enabled_map = _handle_perf_json_test_results(
        benchmark_directory_list, test_results_list)

    # Upload all eligible benchmarks to the perf dashboard
    logdog_dict = {}
    logdog_stream = None
    logdog_label = 'Results Dashboard'
    if not smoke_test_mode:
      with oauth_api.with_access_token(service_account_file) as oauth_file:
        for directory in benchmark_directory_list:
          benchmark_name = _get_benchmark_name(directory)
          if not benchmark_enabled_map[benchmark_name]:
            continue
          print 'Uploading perf results from %s benchmark' % benchmark_name
          upload_fail = _upload_and_write_perf_data_to_logfile(
              benchmark_name, directory, configuration_name, build_properties,
              oauth_file, tmpfile_dir, logdog_dict,
              ('.reference' in benchmark_name))
          upload_failure = upload_failure or upload_fail

      logdog_file_name = 'Results_Dashboard_' + str(uuid.uuid4())
      logdog_stream = logdog_helper.text(logdog_file_name,
          json.dumps(logdog_dict, sort_keys=True,
              indent=4, separators=(',', ':')))
      if upload_failure:
        logdog_label += ' Upload Failure'

    # Finally, merge all test results json and write out to output location
    _merge_json_output(output_json, test_results_list,
                       logdog_stream, logdog_label)
  finally:
    shutil.rmtree(tmpfile_dir)
  return upload_failure