コード例 #1
0
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    env['CHROME_HEADLESS'] = '1'

    return_code = 0
    try:
        command = command_generator.generate(output_paths.benchmark_path)
        if use_xvfb:
            # When running with xvfb, we currently output both to stdout and to the
            # file. It would be better to only output to the file to keep the logs
            # clean.
            return_code = xvfb.run_executable(command,
                                              env,
                                              stdoutfile=output_paths.logs)
        else:
            with open(output_paths.logs, 'w') as handle:
                try:
                    return_code = test_env.run_command_output_to_handle(
                        command, handle, env=env)
                except OSError as e:
                    print(
                        'Command to run gtest perf test %s failed with an OSError: %s'
                        % (output_paths.name, e))
                    return_code = 1
        if (not os.path.exists(output_paths.perf_results)
                and os.path.exists(output_paths.logs)):
            # Get the correct json format from the stdout to write to the perf
            # results file if gtest does not generate one.
            results_processor = generate_legacy_perf_dashboard_json.\
                LegacyResultsProcessor()
            graph_json_string = results_processor.GenerateJsonResults(
                output_paths.logs)
            with open(output_paths.perf_results, 'w') as fh:
                fh.write(graph_json_string)
    except Exception:
        traceback.print_exc()
        return_code = 1
    if os.path.exists(output_paths.perf_results):
        if command_generator.executable_name in GTEST_CONVERSION_WHITELIST:
            with path_util.SysPath(path_util.GetTracingDir()):
                # pylint: disable=no-name-in-module
                from tracing.value import gtest_json_converter
                # pylint: enable=no-name-in-module
            gtest_json_converter.ConvertGtestJsonFile(
                output_paths.perf_results)
    else:
        print('ERROR: gtest perf test %s did not generate perf output' %
              output_paths.name)
        return_code = 1
    write_simple_test_results(return_code, output_paths.test_results,
                              output_paths.name)
    return return_code
コード例 #2
0
def _upload_perf_results(json_to_upload, name, configuration_name,
                         build_properties, service_account_file,
                         output_json_file):
    """Upload the contents of result JSON(s) to the perf dashboard."""
    args = [
        '--buildername', build_properties['buildername'], '--buildnumber',
        build_properties['buildnumber'], '--name', name,
        '--configuration-name', configuration_name, '--results-file',
        json_to_upload, '--results-url', RESULTS_URL, '--got-revision-cp',
        build_properties['got_revision_cp'], '--got-v8-revision',
        build_properties['got_v8_revision'], '--got-webrtc-revision',
        build_properties['got_webrtc_revision'], '--output-json-file',
        output_json_file, '--perf-dashboard-machine-group',
        _GetMachineGroup(build_properties)
    ]
    is_luci = False
    buildbucket = build_properties.get('buildbucket', {})
    if isinstance(buildbucket, basestring):
        buildbucket = json.loads(buildbucket)
    if ('build' in buildbucket
            and buildbucket['build'].get('bucket') == 'luci.chrome.ci'):
        is_luci = True

    if is_luci and _is_gtest(json_to_upload) and (
            name in GTEST_CONVERSION_WHITELIST):
        path_util.AddTracingToPath()
        from tracing.value import (  # pylint: disable=no-name-in-module
            gtest_json_converter)
        gtest_json_converter.ConvertGtestJsonFile(json_to_upload)
        _data_format_cache[json_to_upload] = DATA_FORMAT_HISTOGRAMS

    if 'build' in buildbucket:
        args += [
            '--project',
            buildbucket['build'].get('project'),
            '--buildbucket',
            buildbucket['build'].get('bucket'),
        ]

    if service_account_file and not is_luci:
        args += ['--service-account-file', service_account_file]

    if build_properties.get('git_revision'):
        args.append('--git-revision')
        args.append(build_properties['git_revision'])
    if _is_histogram(json_to_upload):
        args.append('--send-as-histograms')

    return upload_results_to_perf_dashboard.main(args)
コード例 #3
0
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False,
                            is_unittest=False):
  start = time.time()

  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  env['CHROME_HEADLESS'] = '1'
  #TODO(crbug/1138988): Some gtests do not implements the unit_test_launcher.cc.
  # As a result, they will not respect the arguments added by
  # _generate_shard_args() and will still use the values of GTEST_SHARD_INDEX
  # and GTEST_TOTAL_SHARDS to run part of the tests.
  # Removing those environment variables as a workaround.
  if command_generator._ignore_shard_env_vars:
    if 'GTEST_TOTAL_SHARDS' in env:
      env.pop('GTEST_TOTAL_SHARDS')
    if 'GTEST_SHARD_INDEX' in env:
      env.pop('GTEST_SHARD_INDEX')

  return_code = 0
  try:
    command = command_generator.generate(output_paths.benchmark_path)
    if use_xvfb:
      # When running with xvfb, we currently output both to stdout and to the
      # file. It would be better to only output to the file to keep the logs
      # clean.
      return_code = xvfb.run_executable(
          command, env, stdoutfile=output_paths.logs)
    else:
      with open(output_paths.logs, 'w') as handle:
        try:
          return_code = test_env.run_command_output_to_handle(
              command, handle, env=env)
        except OSError as e:
          print('Command to run gtest perf test %s failed with an OSError: %s' %
                (output_paths.name, e))
          return_code = 1
    if (not os.path.exists(output_paths.perf_results) and
        os.path.exists(output_paths.logs)):
      # Get the correct json format from the stdout to write to the perf
      # results file if gtest does not generate one.
      results_processor = generate_legacy_perf_dashboard_json.\
          LegacyResultsProcessor()
      graph_json_string = results_processor.GenerateJsonResults(
          output_paths.logs)
      with open(output_paths.perf_results, 'w') as fh:
        fh.write(graph_json_string)
  except Exception:
    traceback.print_exc()
    return_code = 1
  if os.path.exists(output_paths.perf_results):
    if command_generator.executable_name in GTEST_CONVERSION_WHITELIST:
      with path_util.SysPath(path_util.GetTracingDir()):
        # pylint: disable=no-name-in-module
        from tracing.value import gtest_json_converter
        # pylint: enable=no-name-in-module
      gtest_json_converter.ConvertGtestJsonFile(output_paths.perf_results)
  else:
    print('ERROR: gtest perf test %s did not generate perf output' %
          output_paths.name)
    return_code = 1
  write_simple_test_results(return_code, output_paths.test_results,
                            output_paths.name)
  if not is_unittest:
    upload_simple_test_results(return_code, output_paths.name)

  print_duration(
      'executing gtest %s' % command_generator.executable_name, start)

  return return_code