Beispiel #1
0
    def testSysPath(self):
        sys_path_before = list(sys.path)
        with path_util.SysPath('_test_dir'):
            sys_path_within_context = list(sys.path)
        sys_path_after = list(sys.path)

        self.assertEquals(sys_path_before, sys_path_after)
        self.assertEquals(sys_path_before + ['_test_dir'],
                          sys_path_within_context)
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    env['CHROME_HEADLESS'] = '1'

    return_code = 0
    try:
        command = command_generator.generate(output_paths.benchmark_path)
        if use_xvfb:
            # When running with xvfb, we currently output both to stdout and to the
            # file. It would be better to only output to the file to keep the logs
            # clean.
            return_code = xvfb.run_executable(command,
                                              env,
                                              stdoutfile=output_paths.logs)
        else:
            with open(output_paths.logs, 'w') as handle:
                try:
                    return_code = test_env.run_command_output_to_handle(
                        command, handle, env=env)
                except OSError as e:
                    print(
                        'Command to run gtest perf test %s failed with an OSError: %s'
                        % (output_paths.name, e))
                    return_code = 1
        if (not os.path.exists(output_paths.perf_results)
                and os.path.exists(output_paths.logs)):
            # Get the correct json format from the stdout to write to the perf
            # results file if gtest does not generate one.
            results_processor = generate_legacy_perf_dashboard_json.\
                LegacyResultsProcessor()
            graph_json_string = results_processor.GenerateJsonResults(
                output_paths.logs)
            with open(output_paths.perf_results, 'w') as fh:
                fh.write(graph_json_string)
    except Exception:
        traceback.print_exc()
        return_code = 1
    if os.path.exists(output_paths.perf_results):
        if command_generator.executable_name in GTEST_CONVERSION_WHITELIST:
            with path_util.SysPath(path_util.GetTracingDir()):
                # pylint: disable=no-name-in-module
                from tracing.value import gtest_json_converter
                # pylint: enable=no-name-in-module
            gtest_json_converter.ConvertGtestJsonFile(
                output_paths.perf_results)
    else:
        print('ERROR: gtest perf test %s did not generate perf output' %
              output_paths.name)
        return_code = 1
    write_simple_test_results(return_code, output_paths.test_results,
                              output_paths.name)
    return return_code
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False,
                            is_unittest=False):
  start = time.time()

  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  env['CHROME_HEADLESS'] = '1'
  #TODO(crbug/1138988): Some gtests do not implements the unit_test_launcher.cc.
  # As a result, they will not respect the arguments added by
  # _generate_shard_args() and will still use the values of GTEST_SHARD_INDEX
  # and GTEST_TOTAL_SHARDS to run part of the tests.
  # Removing those environment variables as a workaround.
  if command_generator._ignore_shard_env_vars:
    if 'GTEST_TOTAL_SHARDS' in env:
      env.pop('GTEST_TOTAL_SHARDS')
    if 'GTEST_SHARD_INDEX' in env:
      env.pop('GTEST_SHARD_INDEX')

  return_code = 0
  try:
    command = command_generator.generate(output_paths.benchmark_path)
    if use_xvfb:
      # When running with xvfb, we currently output both to stdout and to the
      # file. It would be better to only output to the file to keep the logs
      # clean.
      return_code = xvfb.run_executable(
          command, env, stdoutfile=output_paths.logs)
    else:
      with open(output_paths.logs, 'w') as handle:
        try:
          return_code = test_env.run_command_output_to_handle(
              command, handle, env=env)
        except OSError as e:
          print('Command to run gtest perf test %s failed with an OSError: %s' %
                (output_paths.name, e))
          return_code = 1
    if (not os.path.exists(output_paths.perf_results) and
        os.path.exists(output_paths.logs)):
      # Get the correct json format from the stdout to write to the perf
      # results file if gtest does not generate one.
      results_processor = generate_legacy_perf_dashboard_json.\
          LegacyResultsProcessor()
      graph_json_string = results_processor.GenerateJsonResults(
          output_paths.logs)
      with open(output_paths.perf_results, 'w') as fh:
        fh.write(graph_json_string)
  except Exception:
    traceback.print_exc()
    return_code = 1
  if os.path.exists(output_paths.perf_results):
    if command_generator.executable_name in GTEST_CONVERSION_WHITELIST:
      with path_util.SysPath(path_util.GetTracingDir()):
        # pylint: disable=no-name-in-module
        from tracing.value import gtest_json_converter
        # pylint: enable=no-name-in-module
      gtest_json_converter.ConvertGtestJsonFile(output_paths.perf_results)
  else:
    print('ERROR: gtest perf test %s did not generate perf output' %
          output_paths.name)
    return_code = 1
  write_simple_test_results(return_code, output_paths.test_results,
                            output_paths.name)
  if not is_unittest:
    upload_simple_test_results(return_code, output_paths.name)

  print_duration(
      'executing gtest %s' % command_generator.executable_name, start)

  return return_code
Beispiel #4
0
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import os

# pylint: disable=wrong-import-position
from core import path_util

CLIENT_CONFIG_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                  'binary_dependencies.json')

with path_util.SysPath(path_util.GetTelemetryDir()):
    from telemetry import project_config
with path_util.SysPath(path_util.GetVariationsDir()):
    import fieldtrial_util  # pylint: disable=import-error


class ChromiumConfig(project_config.ProjectConfig):
    def __init__(self,
                 top_level_dir=None,
                 benchmark_dirs=None,
                 client_configs=None,
                 default_chrome_root=None,
                 expectations_files=None):
        if client_configs is None:
            client_configs = [CLIENT_CONFIG_PATH]
        if default_chrome_root is None:
            default_chrome_root = path_util.GetChromiumSrcDir()

        super(ChromiumConfig,