def v0_4(client, options, test_name):
  """Handles swarm_client/swarming.py starting b39e8cf08c."""
  swarming = os.path.join(client, 'swarming.py')
  cmd = [
    sys.executable,
    swarming,
    'collect',
    '--swarming', options.swarming,
    '--decorate',
    test_name,
  ]
  print('Running: %s' % ' '.join(cmd))
  sys.stdout.flush()
  proc = subprocess2.Popen(cmd, bufsize=0, stdout=subprocess2.PIPE)
  gtest_parser = gtest_utils.GTestLogParser()
  for line in proc.stdout.readlines():
    line = line.rstrip()
    print line
    gtest_parser.ProcessLine(line)

  proc.wait()

  annotation_utils.annotate(test_name, proc.returncode, gtest_parser)
  print('')
  return proc.returncode
예제 #2
0
def process_gtest_json_output(exit_code, output_dir):
    # summary.json is produced by swarming.py itself. We are mostly interested
    # in the number of shards.
    try:
        with open(os.path.join(output_dir, 'summary.json')) as f:
            summary = json.load(f)
    except (IOError, ValueError):
        emit_warning('summary.json is missing or can not be read',
                     traceback.format_exc())
        return

    # For each shard load its JSON output if available and feed it to the parser.
    parser = gtest_utils.GTestJSONParser()
    missing_shards = []
    for index, result in enumerate(summary['shards']):
        if result is not None:
            json_data = load_shard_json(output_dir, index)
            if json_data:
                parser.ProcessJSONData(json_data)
                continue
        missing_shards.append(index)

    # If some shards are missing, make it known. Continue parsing anyway. Step
    # should be red anyway, since swarming.py return non-zero exit code in that
    # case.
    if missing_shards:
        as_str = ' ,'.join(map(str, missing_shards))
        emit_warning(
            'missing results from some shards',
            'Missing results from the following shard(s): %s' % as_str)

    # Emit annotations with a summary of test execution.
    annotation_utils.annotate('', exit_code, parser)
예제 #3
0
파일: runtest.py 프로젝트: krunalsoni01/src
def _MainMac(options, args, extra_env):
  """Runs the test on mac."""
  if len(args) < 1:
    raise chromium_utils.MissingArgument('Usage: %s' % USAGE)

  test_exe = args[0]
  if options.run_python_script:
    build_dir = os.path.normpath(os.path.abspath(options.build_dir))
    test_exe_path = test_exe
  else:
    build_dir = os.path.normpath(os.path.abspath(options.build_dir))
    test_exe_path = os.path.join(build_dir, options.target, test_exe)

  # Nuke anything that appears to be stale chrome items in the temporary
  # directory from previous test runs (i.e.- from crashes or unittest leaks).
  slave_utils.RemoveChromeTemporaryFiles()

  if options.run_shell_script:
    command = ['bash', test_exe_path]
  elif options.run_python_script:
    command = [sys.executable, test_exe]
  else:
    command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
  command.extend(args[1:])

  log_processor_class = _SelectLogProcessor(options)
  log_processor = _CreateLogProcessor(log_processor_class, options)

  if options.generate_json_file:
    if os.path.exists(options.test_output_xml):
      # remove the old XML output file.
      os.remove(options.test_output_xml)

  try:
    if _UsingGtestJson(options):
      json_file_name = log_processor.PrepareJSONFile(
          options.test_launcher_summary_output)
      command.append('--test-launcher-summary-output=%s' % json_file_name)

    pipes = []
    if options.use_symbolization_script:
      pipes = [_GetSanitizerSymbolizeCommand()]

    command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
                                          command)
    result = _RunGTestCommand(options, command, extra_env, pipes=pipes)
  finally:
    if _UsingGtestJson(options):
      log_processor.ProcessJSONFile(options.build_dir)

  if options.generate_json_file:
    if not _GenerateJSONForTestResults(options, log_processor):
      return 1

  if options.annotate:
    annotation_utils.annotate(
        options.test_type, result, log_processor,
        perf_dashboard_id=options.perf_dashboard_id)

  return result
예제 #4
0
def process_gtest_json_output(exit_code, output_dir):
  # summary.json is produced by swarming.py itself. We are mostly interested
  # in the number of shards.
  try:
    with open(os.path.join(output_dir, 'summary.json')) as f:
      summary = json.load(f)
  except (IOError, ValueError):
    emit_warning(
        'summary.json is missing or can not be read', traceback.format_exc())
    return

  # For each shard load its JSON output if available and feed it to the parser.
  parser = gtest_utils.GTestJSONParser()
  missing_shards = []
  for index, result in enumerate(summary['shards']):
    if result is not None:
      json_data = load_shard_json(output_dir, index)
      if json_data:
        parser.ProcessJSONData(json_data)
        continue
    missing_shards.append(index)

  # If some shards are missing, make it known. Continue parsing anyway. Step
  # should be red anyway, since swarming.py return non-zero exit code in that
  # case.
  if missing_shards:
    as_str = ' ,'.join(map(str, missing_shards))
    emit_warning(
        'missing results from some shards',
        'Missing results from the following shard(s): %s' % as_str)

  # Emit annotations with a summary of test execution.
  annotation_utils.annotate('', exit_code, parser)
예제 #5
0
def v0_4(client, options, test_name):
    """Handles swarm_client/swarming.py starting b39e8cf08c."""
    swarming = os.path.join(client, 'swarming.py')
    cmd = [
        sys.executable,
        swarming,
        'collect',
        '--swarming',
        options.swarming,
        '--decorate',
        test_name,
    ]
    print('Running: %s' % ' '.join(cmd))
    sys.stdout.flush()
    proc = subprocess2.Popen(cmd, bufsize=0, stdout=subprocess2.PIPE)
    gtest_parser = gtest_utils.GTestLogParser()
    for line in proc.stdout.readlines():
        line = line.rstrip()
        print line
        gtest_parser.ProcessLine(line)

    proc.wait()

    annotation_utils.annotate(test_name, proc.returncode, gtest_parser)
    print('')
    return proc.returncode
예제 #6
0
def v0(client, options, test_name):
    """This code supports all the earliest versions of swarm_client.

  This is before --version was added.
  """
    sys.path.insert(0, client)
    import swarm_get_results  # pylint: disable=F0401

    timeout = swarm_get_results.DEFAULT_SHARD_WAIT_TIME
    test_keys = swarm_get_results.get_test_keys(options.swarming, test_name,
                                                timeout)
    if not test_keys:
        print >> sys.stderr, 'No test keys to get results with.'
        return 1

    if options.shards == -1:
        options.shards = len(test_keys)
    elif len(test_keys) < options.shards:
        print >> sys.stderr, (
            'Warning: Test should have %d shards, but only %d '
            'test keys were found' % (options.shards, len(test_keys)))

    gtest_parser = gtest_utils.GTestLogParser()
    exit_code = None
    shards_remaining = range(len(test_keys))
    first_result = True
    for index, result in swarm_get_results.yield_results(
            options.swarming, test_keys, timeout, None):
        assert index == result['config_instance_index']
        if first_result and result['num_config_instances'] != len(test_keys):
            # There are more test_keys than actual shards.
            shards_remaining = shards_remaining[:
                                                result['num_config_instances']]
        shards_remaining.remove(index)
        first_result = False
        output, test_exit_code = gen_shard_output(result, gtest_parser)
        print output
        exit_code = max(exit_code, test_exit_code)

    # Print the annotation before the summary so it's easier to find when scolling
    # down.
    annotation_utils.annotate(test_name, exit_code, gtest_parser)
    print('')
    return exit_code
def v0(client, options, test_name):
  """This code supports all the earliest versions of swarm_client.

  This is before --version was added.
  """
  sys.path.insert(0, client)
  import swarm_get_results  # pylint: disable=F0401

  timeout = swarm_get_results.DEFAULT_SHARD_WAIT_TIME
  test_keys = swarm_get_results.get_test_keys(
      options.swarming, test_name, timeout)
  if not test_keys:
    print >> sys.stderr, 'No test keys to get results with.'
    return 1

  if options.shards == -1:
    options.shards = len(test_keys)
  elif len(test_keys) < options.shards:
    print >> sys.stderr, ('Warning: Test should have %d shards, but only %d '
                          'test keys were found' % (options.shards,
                                                    len(test_keys)))

  gtest_parser = gtest_utils.GTestLogParser()
  exit_code = None
  shards_remaining = range(len(test_keys))
  first_result = True
  for index, result in swarm_get_results.yield_results(
      options.swarming, test_keys, timeout, None):
    assert index == result['config_instance_index']
    if first_result and result['num_config_instances'] != len(test_keys):
      # There are more test_keys than actual shards.
      shards_remaining = shards_remaining[:result['num_config_instances']]
    shards_remaining.remove(index)
    first_result = False
    output, test_exit_code = gen_shard_output(result, gtest_parser)
    print output
    exit_code = max(exit_code, test_exit_code)

  # Print the annotation before the summary so it's easier to find when scolling
  # down.
  annotation_utils.annotate(test_name, exit_code, gtest_parser)
  print('')
  return exit_code
예제 #8
0
def emit_test_annotations(exit_code, json_data):
  """Emits annotations with logs of failed tests."""
  parser = gtest_utils.GTestJSONParser()
  if json_data:
    parser.ProcessJSONData(json_data)
  annotation_utils.annotate('', exit_code, parser)
예제 #9
0
def _Main(options, args, extra_env):
  """Using the target build configuration, run the executable given in the
  first non-option argument, passing any following arguments to that
  executable.

  Args:
    options: Command-line options for this invocation of runtest.py.
    args: Command and arguments for the test.
    extra_env: A dictionary of extra environment variables to set.

  Returns:
    Exit status code.
  """
  if len(args) < 1:
    raise chromium_utils.MissingArgument('Usage: %s' % USAGE)

  xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..',
                           'third_party', 'xvfb', platform.architecture()[0])
  special_xvfb_dir = None
  fp_chromeos = options.factory_properties.get('chromeos', None)
  if (fp_chromeos or
      slave_utils.GypFlagIsOn(options, 'use_aura') or
      slave_utils.GypFlagIsOn(options, 'chromeos')):
    special_xvfb_dir = xvfb_path

  build_dir = os.path.normpath(os.path.abspath(options.build_dir))
  bin_dir = os.path.join(build_dir, options.target)
  slave_name = options.slave_name or slave_utils.SlaveBuildName(build_dir)

  test_exe = args[0]
  if options.run_python_script:
    test_exe_path = test_exe
  else:
    test_exe_path = os.path.join(bin_dir, test_exe)

  if not os.path.exists(test_exe_path):
    if options.factory_properties.get('succeed_on_missing_exe', False):
      print '%s missing but succeed_on_missing_exe used, exiting' % (
          test_exe_path)
      return 0
    raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path)

  if sys.platform == 'linux2':
    # Unset http_proxy and HTTPS_PROXY environment variables.  When set, this
    # causes some tests to hang.  See http://crbug.com/139638 for more info.
    if 'http_proxy' in os.environ:
      del os.environ['http_proxy']
      print 'Deleted http_proxy environment variable.'
    if 'HTTPS_PROXY' in os.environ:
      del os.environ['HTTPS_PROXY']
      print 'Deleted HTTPS_PROXY environment variable.'

    # Path to SUID sandbox binary. This must be installed on all bots.
    extra_env['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH

    extra_env['LD_LIBRARY_PATH'] = ''
    if options.enable_lsan:
      # Use the debug version of libstdc++ under LSan. If we don't, there will
      # be a lot of incomplete stack traces in the reports.
      extra_env['LD_LIBRARY_PATH'] += '/usr/lib/x86_64-linux-gnu/debug:'
    extra_env['LD_LIBRARY_PATH'] += '%s:%s/lib:%s/lib.target' % (
        bin_dir, bin_dir, bin_dir)

  if options.run_shell_script:
    command = ['bash', test_exe_path]
  elif options.run_python_script:
    command = [sys.executable, test_exe]
  else:
    command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
  command.extend(args[1:])

  # Nuke anything that appears to be stale chrome items in the temporary
  # directory from previous test runs (i.e.- from crashes or unittest leaks).
  slave_utils.RemoveChromeTemporaryFiles()

  log_processor = None
  if _UsingGtestJson(options):
    log_processor = gtest_utils.GTestJSONParser(
        options.build_properties.get('mastername'))

  if options.generate_json_file:
    if os.path.exists(options.test_output_xml):
      # remove the old XML output file.
      os.remove(options.test_output_xml)

  try:
    # TODO(dpranke): checking on test_exe is a temporary hack until we
    # can change the buildbot master to pass --xvfb instead of --no-xvfb
    # for these two steps. See
    # https://code.google.com/p/chromium/issues/detail?id=179814
    start_xvfb = (
        sys.platform == 'linux2' and (
            options.xvfb or
            'layout_test_wrapper' in test_exe or
            'devtools_perf_test_wrapper' in test_exe))
    if start_xvfb:
      xvfb.StartVirtualX(
          slave_name, bin_dir,
          with_wm=(options.factory_properties.get('window_manager', 'True') ==
                   'True'),
          server_dir=special_xvfb_dir)

    if _UsingGtestJson(options):
      json_file_name = log_processor.PrepareJSONFile(
          options.test_launcher_summary_output)
      command.append('--test-launcher-summary-output=%s' % json_file_name)

    pipes = []
    # See the comment in main() regarding offline symbolization.
    if options.use_symbolization_script:
      symbolize_command = _GetSanitizerSymbolizeCommand(
          strip_path_prefix=options.strip_path_prefix)
      pipes = [symbolize_command]

    command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
                                          command)
    result = _RunGTestCommand(options, command, extra_env, pipes=pipes)
  finally:
    if start_xvfb:
      xvfb.StopVirtualX(slave_name)
    if _UsingGtestJson(options):
      if options.use_symbolization_script:
        _SymbolizeSnippetsInJSON(options, json_file_name)
      log_processor.ProcessJSONFile(options.build_dir)

  if options.generate_json_file:
    if not _GenerateJSONForTestResults(options, log_processor):
      return 1

  if options.annotate:
    annotation_utils.annotate(options.test_type, result, log_processor)

  return result
예제 #10
0
파일: runtest.py 프로젝트: krunalsoni01/src
def _MainWin(options, args, extra_env):
  """Runs tests on windows.

  Using the target build configuration, run the executable given in the
  first non-option argument, passing any following arguments to that
  executable.

  Args:
    options: Command-line options for this invocation of runtest.py.
    args: Command and arguments for the test.
    extra_env: A dictionary of extra environment variables to set.

  Returns:
    Exit status code.
  """
  if len(args) < 1:
    raise chromium_utils.MissingArgument('Usage: %s' % USAGE)

  test_exe = args[0]
  build_dir = os.path.abspath(options.build_dir)
  if options.run_python_script:
    test_exe_path = test_exe
  else:
    test_exe_path = os.path.join(build_dir, options.target, test_exe)

  if not os.path.exists(test_exe_path):
    if options.factory_properties.get('succeed_on_missing_exe', False):
      print '%s missing but succeed_on_missing_exe used, exiting' % (
          test_exe_path)
      return 0
    raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path)

  if options.run_python_script:
    command = [sys.executable, test_exe]
  else:
    command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)

  command.extend(args[1:])

  # Nuke anything that appears to be stale chrome items in the temporary
  # directory from previous test runs (i.e.- from crashes or unittest leaks).
  slave_utils.RemoveChromeTemporaryFiles()

  log_processor_class = _SelectLogProcessor(options)
  log_processor = _CreateLogProcessor(log_processor_class, options)

  if options.generate_json_file:
    if os.path.exists(options.test_output_xml):
      # remove the old XML output file.
      os.remove(options.test_output_xml)

  try:
    if _UsingGtestJson(options):
      json_file_name = log_processor.PrepareJSONFile(
          options.test_launcher_summary_output)
      command.append('--test-launcher-summary-output=%s' % json_file_name)

    command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
                                          command)
    result = _RunGTestCommand(options, command, extra_env)
  finally:
    if _UsingGtestJson(options):
      log_processor.ProcessJSONFile(options.build_dir)

  if options.generate_json_file:
    if not _GenerateJSONForTestResults(options, log_processor):
      return 1

  if options.annotate:
    annotation_utils.annotate(
        options.test_type, result, log_processor,
        perf_dashboard_id=options.perf_dashboard_id)

  return result
예제 #11
0
def _Main(options, args, extra_env):
  """Using the target build configuration, run the executable given in the
  first non-option argument, passing any following arguments to that
  executable.

  Args:
    options: Command-line options for this invocation of runtest.py.
    args: Command and arguments for the test.
    extra_env: A dictionary of extra environment variables to set.

  Returns:
    Exit status code.
  """
  if len(args) < 1:
    raise chromium_utils.MissingArgument('Usage: %s' % USAGE)

  xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..',
                           'third_party', 'xvfb', platform.architecture()[0])

  build_dir = os.path.normpath(os.path.abspath(options.build_dir))
  bin_dir = os.path.join(build_dir, options.target)
  slave_name = options.slave_name or slave_utils.SlaveBuildName(build_dir)

  test_exe = args[0]
  if options.run_python_script:
    test_exe_path = test_exe
  else:
    test_exe_path = os.path.join(bin_dir, test_exe)

  if not os.path.exists(test_exe_path):
    if options.factory_properties.get('succeed_on_missing_exe', False):
      print '%s missing but succeed_on_missing_exe used, exiting' % (
          test_exe_path)
      return 0
    raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path)

  if sys.platform == 'linux2':
    # Unset http_proxy and HTTPS_PROXY environment variables.  When set, this
    # causes some tests to hang.  See http://crbug.com/139638 for more info.
    if 'http_proxy' in os.environ:
      del os.environ['http_proxy']
      print 'Deleted http_proxy environment variable.'
    if 'HTTPS_PROXY' in os.environ:
      del os.environ['HTTPS_PROXY']
      print 'Deleted HTTPS_PROXY environment variable.'

    # Path to SUID sandbox binary. This must be installed on all bots.
    extra_env['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH

    extra_env['LD_LIBRARY_PATH'] = ''
    if options.enable_lsan:
      # Use the debug version of libstdc++ under LSan. If we don't, there will
      # be a lot of incomplete stack traces in the reports.
      extra_env['LD_LIBRARY_PATH'] += '/usr/lib/x86_64-linux-gnu/debug:'
    extra_env['LD_LIBRARY_PATH'] += '%s:%s/lib:%s/lib.target' % (
        bin_dir, bin_dir, bin_dir)

  if options.run_shell_script:
    command = ['bash', test_exe_path]
  elif options.run_python_script:
    command = [sys.executable, test_exe]
  else:
    command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
  command.extend(args[1:])

  # Nuke anything that appears to be stale chrome items in the temporary
  # directory from previous test runs (i.e.- from crashes or unittest leaks).
  slave_utils.RemoveChromeTemporaryFiles()

  log_processor = None
  if _UsingGtestJson(options):
    log_processor = gtest_utils.GTestJSONParser(
        options.build_properties.get('mastername'))

  try:
    # TODO(dpranke): checking on test_exe is a temporary hack until we
    # can change the buildbot master to pass --xvfb instead of --no-xvfb
    # for these two steps. See
    # https://code.google.com/p/chromium/issues/detail?id=179814
    start_xvfb = (
        sys.platform == 'linux2' and (
            options.xvfb or
            'layout_test_wrapper' in test_exe or
            'devtools_perf_test_wrapper' in test_exe))
    if start_xvfb:
      xvfb.StartVirtualX(
          slave_name, bin_dir,
          with_wm=(options.factory_properties.get('window_manager', 'True') ==
                   'True'))

    if _UsingGtestJson(options):
      json_file_name = log_processor.PrepareJSONFile(
          options.test_launcher_summary_output)
      command.append('--test-launcher-summary-output=%s' % json_file_name)

    command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
                                          command)

    env = os.environ.copy()
    if extra_env:
      print 'Additional test environment:'
      for k, v in sorted(extra_env.items()):
        print '  %s=%s' % (k, v)
    env.update(extra_env or {})

    # Trigger bot mode (test retries, redirection of stdio, possibly faster,
    # etc.) - using an environment variable instead of command-line flags
    # because some internal waterfalls run this for totally non-gtest code.
    # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed.
    env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'})

    if options.use_symbolization_script:
      symbolize_command = _GetSanitizerSymbolizeCommand(
          strip_path_prefix=options.strip_path_prefix)

      command_process = subprocess.Popen(
          command, env=env, stdout=subprocess.PIPE)
      symbolize_process = subprocess.Popen(
          symbolize_command, env=env, stdin=command_process.stdout)
      command_process.stdout.close()

      command_process.wait()
      symbolize_process.wait()

      result = command_process.returncode
      if result == 0:
        result = symbolize_process.returncode
    else:
      result = subprocess.call(command, env=env)
  finally:
    if start_xvfb:
      xvfb.StopVirtualX(slave_name)
    if _UsingGtestJson(options):
      if options.use_symbolization_script:
        _SymbolizeSnippetsInJSON(options, json_file_name)
      log_processor.ProcessJSONFile(options.build_dir)

  if options.annotate:
    annotation_utils.annotate(options.test_type, result, log_processor)

  return result
예제 #12
0
def emit_test_annotations(exit_code, json_data):
    """Emits annotations with logs of failed tests."""
    parser = gtest_utils.GTestJSONParser()
    if json_data:
        parser.ProcessJSONData(json_data)
    annotation_utils.annotate('', exit_code, parser)
예제 #13
0
def _Main(options, args, extra_env):
    """Using the target build configuration, run the executable given in the
  first non-option argument, passing any following arguments to that
  executable.

  Args:
    options: Command-line options for this invocation of runtest.py.
    args: Command and arguments for the test.
    extra_env: A dictionary of extra environment variables to set.

  Returns:
    Exit status code.
  """
    if len(args) < 1:
        raise chromium_utils.MissingArgument('Usage: %s' % USAGE)

    xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..',
                             'third_party', 'xvfb',
                             platform.architecture()[0])
    special_xvfb_dir = None
    fp_chromeos = options.factory_properties.get('chromeos', None)
    if (fp_chromeos or slave_utils.GypFlagIsOn(options, 'use_aura')
            or slave_utils.GypFlagIsOn(options, 'chromeos')):
        special_xvfb_dir = xvfb_path

    build_dir = os.path.normpath(os.path.abspath(options.build_dir))
    bin_dir = os.path.join(build_dir, options.target)
    slave_name = options.slave_name or slave_utils.SlaveBuildName(build_dir)

    test_exe = args[0]
    if options.run_python_script:
        test_exe_path = test_exe
    else:
        test_exe_path = os.path.join(bin_dir, test_exe)

    if not os.path.exists(test_exe_path):
        if options.factory_properties.get('succeed_on_missing_exe', False):
            print '%s missing but succeed_on_missing_exe used, exiting' % (
                test_exe_path)
            return 0
        raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path)

    if sys.platform == 'linux2':
        # Unset http_proxy and HTTPS_PROXY environment variables.  When set, this
        # causes some tests to hang.  See http://crbug.com/139638 for more info.
        if 'http_proxy' in os.environ:
            del os.environ['http_proxy']
            print 'Deleted http_proxy environment variable.'
        if 'HTTPS_PROXY' in os.environ:
            del os.environ['HTTPS_PROXY']
            print 'Deleted HTTPS_PROXY environment variable.'

        # Path to SUID sandbox binary. This must be installed on all bots.
        extra_env['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH

        extra_env['LD_LIBRARY_PATH'] = ''
        if options.enable_lsan:
            # Use the debug version of libstdc++ under LSan. If we don't, there will
            # be a lot of incomplete stack traces in the reports.
            extra_env['LD_LIBRARY_PATH'] += '/usr/lib/x86_64-linux-gnu/debug:'
        extra_env['LD_LIBRARY_PATH'] += '%s:%s/lib:%s/lib.target' % (
            bin_dir, bin_dir, bin_dir)

    if options.run_shell_script:
        command = ['bash', test_exe_path]
    elif options.run_python_script:
        command = [sys.executable, test_exe]
    else:
        command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
    command.extend(args[1:])

    # Nuke anything that appears to be stale chrome items in the temporary
    # directory from previous test runs (i.e.- from crashes or unittest leaks).
    slave_utils.RemoveChromeTemporaryFiles()

    log_processor = None
    if _UsingGtestJson(options):
        log_processor = gtest_utils.GTestJSONParser(
            options.build_properties.get('mastername'))

    if options.generate_json_file:
        if os.path.exists(options.test_output_xml):
            # remove the old XML output file.
            os.remove(options.test_output_xml)

    try:
        # TODO(dpranke): checking on test_exe is a temporary hack until we
        # can change the buildbot master to pass --xvfb instead of --no-xvfb
        # for these two steps. See
        # https://code.google.com/p/chromium/issues/detail?id=179814
        start_xvfb = (sys.platform == 'linux2'
                      and (options.xvfb or 'layout_test_wrapper' in test_exe
                           or 'devtools_perf_test_wrapper' in test_exe))
        if start_xvfb:
            xvfb.StartVirtualX(slave_name,
                               bin_dir,
                               with_wm=(options.factory_properties.get(
                                   'window_manager', 'True') == 'True'),
                               server_dir=special_xvfb_dir)

        if _UsingGtestJson(options):
            json_file_name = log_processor.PrepareJSONFile(
                options.test_launcher_summary_output)
            command.append('--test-launcher-summary-output=%s' %
                           json_file_name)

        pipes = []
        # See the comment in main() regarding offline symbolization.
        if options.use_symbolization_script:
            symbolize_command = _GetSanitizerSymbolizeCommand(
                strip_path_prefix=options.strip_path_prefix)
            pipes = [symbolize_command]

        command = _GenerateRunIsolatedCommand(build_dir, test_exe_path,
                                              options, command)
        result = _RunGTestCommand(options, command, extra_env, pipes=pipes)
    finally:
        if start_xvfb:
            xvfb.StopVirtualX(slave_name)
        if _UsingGtestJson(options):
            if options.use_symbolization_script:
                _SymbolizeSnippetsInJSON(options, json_file_name)
            log_processor.ProcessJSONFile(options.build_dir)

    if options.generate_json_file:
        if not _GenerateJSONForTestResults(options, log_processor):
            return 1

    if options.annotate:
        annotation_utils.annotate(options.test_type, result, log_processor)

    return result
예제 #14
0
def _Main(options, args, extra_env):
    """Using the target build configuration, run the executable given in the
  first non-option argument, passing any following arguments to that
  executable.

  Args:
    options: Command-line options for this invocation of runtest.py.
    args: Command and arguments for the test.
    extra_env: A dictionary of extra environment variables to set.

  Returns:
    Exit status code.
  """
    if len(args) < 1:
        raise chromium_utils.MissingArgument('Usage: %s' % USAGE)

    xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..',
                             'third_party', 'xvfb',
                             platform.architecture()[0])

    build_dir = os.path.normpath(os.path.abspath(options.build_dir))
    bin_dir = os.path.join(build_dir, options.target)

    test_exe = args[0]
    if options.run_python_script:
        test_exe_path = test_exe
    else:
        test_exe_path = os.path.join(bin_dir, test_exe)

    if not os.path.exists(test_exe_path):
        if options.factory_properties.get('succeed_on_missing_exe', False):
            print '%s missing but succeed_on_missing_exe used, exiting' % (
                test_exe_path)
            return 0
        raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path)

    if sys.platform == 'linux2':
        # Unset http_proxy and HTTPS_PROXY environment variables.  When set, this
        # causes some tests to hang.  See http://crbug.com/139638 for more info.
        if 'http_proxy' in os.environ:
            del os.environ['http_proxy']
            print 'Deleted http_proxy environment variable.'
        if 'HTTPS_PROXY' in os.environ:
            del os.environ['HTTPS_PROXY']
            print 'Deleted HTTPS_PROXY environment variable.'

        # Path to SUID sandbox binary. This must be installed on all bots.
        extra_env['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH

        extra_env['LD_LIBRARY_PATH'] = ''
        if options.enable_lsan:
            # Use the debug version of libstdc++ under LSan. If we don't, there will
            # be a lot of incomplete stack traces in the reports.
            extra_env['LD_LIBRARY_PATH'] += '/usr/lib/x86_64-linux-gnu/debug:'
        extra_env['LD_LIBRARY_PATH'] += '%s:%s/lib:%s/lib.target' % (
            bin_dir, bin_dir, bin_dir)

    if options.run_shell_script:
        command = ['bash', test_exe_path]
    elif options.run_python_script:
        command = [sys.executable, test_exe]
    else:
        command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
    command.extend(args[1:])

    log_processor = None
    if _UsingGtestJson(options):
        log_processor = gtest_utils.GTestJSONParser(
            options.build_properties.get('mastername'))

    try:
        # TODO(dpranke): checking on test_exe is a temporary hack until we
        # can change the buildbot master to pass --xvfb instead of --no-xvfb
        # for these two steps. See
        # https://code.google.com/p/chromium/issues/detail?id=179814
        start_xvfb = (sys.platform == 'linux2'
                      and (options.xvfb or 'layout_test_wrapper' in test_exe
                           or 'devtools_perf_test_wrapper' in test_exe))
        if start_xvfb:
            xvfb.StartVirtualX(None,
                               bin_dir,
                               with_wm=(options.factory_properties.get(
                                   'window_manager', 'True') == 'True'))

        if _UsingGtestJson(options):
            json_file_name = log_processor.PrepareJSONFile(
                options.test_launcher_summary_output)
            command.append('--test-launcher-summary-output=%s' %
                           json_file_name)

        command = _GenerateRunIsolatedCommand(build_dir, test_exe_path,
                                              options, command)

        env = os.environ.copy()
        if extra_env:
            print 'Additional test environment:'
            for k, v in sorted(extra_env.items()):
                print '  %s=%s' % (k, v)
        env.update(extra_env or {})

        # Trigger bot mode (test retries, redirection of stdio, possibly faster,
        # etc.) - using an environment variable instead of command-line flags
        # because some internal waterfalls run this for totally non-gtest code.
        # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed.
        env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'})

        if options.use_symbolization_script:
            symbolize_command = _GetSanitizerSymbolizeCommand(
                strip_path_prefix=options.strip_path_prefix)

            command_process = subprocess.Popen(command,
                                               env=env,
                                               stdout=subprocess.PIPE)
            symbolize_process = subprocess.Popen(symbolize_command,
                                                 env=env,
                                                 stdin=command_process.stdout)
            command_process.stdout.close()

            command_process.wait()
            symbolize_process.wait()

            result = command_process.returncode
            if result == 0:
                result = symbolize_process.returncode
        else:
            result = subprocess.call(command, env=env)
    finally:
        if start_xvfb:
            xvfb.StopVirtualX(None)
        if _UsingGtestJson(options):
            if options.use_symbolization_script:
                _SymbolizeSnippetsInJSON(options, json_file_name)
            log_processor.ProcessJSONFile(options.build_dir)

    if options.annotate:
        annotation_utils.annotate(options.test_type, result, log_processor)

    return result