def main():
  parser = argparse.ArgumentParser()
  AddCommonArgs(parser)
  args, gpu_test_args = parser.parse_known_args()
  ConfigureLogging(args)

  # If output directory is not set, assume the script is being launched
  # from the output directory.
  if not args.output_directory:
    args.output_directory = os.getcwd()

  gpu_script = [os.path.join(path_util.GetChromiumSrcDir(), 'content',
                'test', 'gpu', 'run_gpu_integration_test.py')]

  # Pass all other arguments to the gpu integration tests.
  gpu_script.extend(gpu_test_args)
  with GetDeploymentTargetForArgs(args) as target:
    target.Start()
    _, fuchsia_ssh_port = target._GetEndpoint()
    gpu_script.extend(['--fuchsia-ssh-config-dir', args.output_directory])
    gpu_script.extend(['--fuchsia-ssh-port', str(fuchsia_ssh_port)])

    web_engine_dir = os.path.join(args.output_directory, 'gen',
        'fuchsia', 'engine')

    # Install necessary packages on the device.
    target.InstallPackage([
        os.path.join(web_engine_dir, 'web_engine', 'web_engine.far'),
        os.path.join(web_engine_dir, 'web_engine_shell',
            'web_engine_shell.far')
    ])
    return subprocess.call(gpu_script)
示例#2
0
def main():
    parser = argparse.ArgumentParser()
    AddCommonArgs(parser)
    parser.add_argument('child_args',
                        nargs='*',
                        help='Arguments for the test process.')
    args = parser.parse_args()
    ConfigureLogging(args)

    with GetDeploymentTargetForArgs(args) as target:
        target.Start()
        RunPackage(args.output_directory, target, args.package,
                   args.package_name, args.child_args)
示例#3
0
def main():
    parser = argparse.ArgumentParser()

    logging.getLogger().setLevel(logging.INFO)
    parser.add_argument(
        '--command',
        required=True,
        help='FFX command to run. Runtime arguments are handled '
        'using the %%args%% placeholder.')
    parser.add_argument('child_args',
                        nargs='*',
                        help='Arguments for the command.')
    AddCommonArgs(parser)
    AddTargetSpecificArgs(parser)
    args = parser.parse_args()

    # Prepare the arglist for "ffx". %args% is replaced with all positional
    # arguments given to the script.
    ffx_args = shlex.split(args.command)
    # replace %args% in the command with the given arguments.
    try:
        args_index = ffx_args.index('%args%')
        ffx_args[args_index:args_index + 1] = args.child_args
    except ValueError:
        # %args% is not present; use the command as-is.
        pass

    with GetDeploymentTargetForArgs(args) as target:
        target.Start()
        target.StartSystemLog(args.package)

        # Extend the lifetime of |pkg_repo| beyond InstallPackage so that the
        # package can be instantiated after resolution.
        with target.GetPkgRepo() as pkg_repo:
            target.InstallPackage(args.package)
            process = target.RunFFXCommand(ffx_args)

            # It's possible that components installed by this script may be
            # instantiated at arbitrary points in the future.
            # This script (specifically |pkg_repo|) must be kept alive until it
            # is explicitly terminated by the user, otherwise pkgsvr will
            # throw an error when launching components.
            logging.info('Command is now running. Press CTRL-C to exit.')
            try:
                while True:
                    time.sleep(1)
            except KeyboardInterrupt:
                pass

    return 0
示例#4
0
def main():
  parser = argparse.ArgumentParser()
  AddCommonArgs(parser)
  parser.add_argument(
      '--child-arg',
      action='append',
      help='Arguments for the executable.',
      default=[])
  parser.add_argument(
      'child_args', nargs='*', help='Arguments for the executable.', default=[])
  args = parser.parse_args()
  ConfigureLogging(args)

  with GetDeploymentTargetForArgs(args) as target:
    target.Start()

    child_args = args.child_arg + args.child_args
    run_package_args = RunPackageArgs.FromCommonArgs(args)
    returncode = RunPackage(args.output_directory, target, args.package,
                            args.package_name, child_args, run_package_args)
示例#5
0
def main():
  parser = argparse.ArgumentParser()

  logging.getLogger().setLevel(logging.INFO)
  parser.add_argument('--command',
                      required=True,
                      help='FFX command to run. Runtime arguments are handled '
                      'using the %%args%% placeholder.')
  AddCommonArgs(parser)
  AddTargetSpecificArgs(parser)
  args, runtime_args = parser.parse_known_args()

  command_substituted = [
      chunk.replace('%args%', ' '.join(runtime_args))
      for chunk in shlex.split(args.command)
  ]

  with GetDeploymentTargetForArgs(args) as target:
    target.Start()
    target.StartSystemLog(args.package)

    # Extend the lifetime of |pkg_repo| beyond InstallPackage so that the
    # package can be instantiated after resolution.
    with target.GetPkgRepo() as pkg_repo:
      target.InstallPackage(args.package)
      process = target.RunFFXCommand(command_substituted)

      # It's possible that components installed by this script may be
      # instantiated at arbitrary points in the future.
      # This script (specifically |pkg_repo|) must be kept alive until it
      # is explicitly terminated by the user, otherwise pkgsvr will
      # throw an error when launching components.
      logging.info('Command is now running. Press CTRL-C to exit.')
      try:
        while True:
          time.sleep(1)
      except KeyboardInterrupt:
        pass

  return 0
示例#6
0
def main():
  parser = argparse.ArgumentParser()
  AddCommonArgs(parser)
  args, gpu_test_args = parser.parse_known_args()
  ConfigureLogging(args)

  additional_target_args = {}

  # If output_dir is not set, assume the script is being launched
  # from the output directory.
  if not args.out_dir:
    args.out_dir = os.getcwd()
    additional_target_args['out_dir'] = args.out_dir

  # Create a temporary log file that Telemetry will look to use to build
  # an artifact when tests fail.
  temp_log_file = False
  if not args.system_log_file:
    args.system_log_file = os.path.join(tempfile.mkdtemp(), 'system-log')
    temp_log_file = True
    additional_target_args['system_log_file'] = args.system_log_file

  package_names = ['web_engine_with_webui', 'web_engine_shell']
  web_engine_dir = os.path.join(args.out_dir, 'gen', 'fuchsia', 'engine')
  gpu_script = [
      os.path.join(path_util.GetChromiumSrcDir(), 'content', 'test', 'gpu',
                   'run_gpu_integration_test.py')
  ]

  # Pass all other arguments to the gpu integration tests.
  gpu_script.extend(gpu_test_args)
  try:
    with GetDeploymentTargetForArgs(additional_target_args) as target:
      target.Start()
      fuchsia_device_address, fuchsia_ssh_port = target._GetEndpoint()
      gpu_script.extend(['--chromium-output-directory', args.out_dir])
      gpu_script.extend(['--fuchsia-device-address', fuchsia_device_address])
      gpu_script.extend(['--fuchsia-ssh-config', target._GetSshConfigPath()])
      if fuchsia_ssh_port:
        gpu_script.extend(['--fuchsia-ssh-port', str(fuchsia_ssh_port)])
      gpu_script.extend(['--fuchsia-system-log-file', args.system_log_file])
      if args.verbose:
        gpu_script.append('-v')

      # Set up logging of WebEngine
      listener = target.RunCommandPiped(['log_listener'],
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.STDOUT)
      build_ids_paths = map(
          lambda package_name: os.path.join(
              web_engine_dir, package_name, 'ids.txt'),
          package_names)
      RunSymbolizer(listener.stdout, open(args.system_log_file, 'w'),
                    build_ids_paths)

      # Keep the Amber repository live while the test runs.
      with target.GetAmberRepo():
        # Install necessary packages on the device.
        far_files = map(
            lambda package_name: os.path.join(
                web_engine_dir, package_name, package_name + '.far'),
            package_names)
        target.InstallPackage(far_files)
        return subprocess.call(gpu_script)
  finally:
    if temp_log_file:
      shutil.rmtree(os.path.dirname(args.system_log_file))
示例#7
0
def main():
  parser = argparse.ArgumentParser()
  AddCommonArgs(parser)
  parser.add_argument('--gtest_filter',
                      help='GTest filter to use in place of any default.')
  parser.add_argument('--gtest_repeat',
                      help='GTest repeat value to use. This also disables the '
                           'test launcher timeout.')
  parser.add_argument('--gtest_break_on_failure', action='store_true',
                      default=False,
                      help='Should GTest break on failure; useful with '
                           '--gtest_repeat.')
  parser.add_argument('--single-process-tests', action='store_true',
                      default=False,
                      help='Runs the tests and the launcher in the same '
                           'process. Useful for debugging.')
  parser.add_argument('--test-launcher-batch-limit',
                      type=int,
                      help='Sets the limit of test batch to run in a single '
                      'process.')
  # --test-launcher-filter-file is specified relative to --output-directory,
  # so specifying type=os.path.* will break it.
  parser.add_argument('--test-launcher-filter-file',
                      default=None,
                      help='Override default filter file passed to target test '
                      'process. Set an empty path to disable filtering.')
  parser.add_argument('--test-launcher-jobs',
                      type=int,
                      help='Sets the number of parallel test jobs.')
  parser.add_argument('--test-launcher-summary-output',
                      help='Where the test launcher will output its json.')
  parser.add_argument('--enable-test-server', action='store_true',
                      default=False,
                      help='Enable Chrome test server spawner.')
  parser.add_argument('child_args', nargs='*',
                      help='Arguments for the test process.')
  args = parser.parse_args()
  ConfigureLogging(args)

  child_args = ['--test-launcher-retry-limit=0']
  if args.single_process_tests:
    child_args.append('--single-process-tests')
  if args.test_launcher_batch_limit:
    child_args.append('--test-launcher-batch-limit=%d' %
                       args.test_launcher_batch_limit)

  test_concurrency = args.test_launcher_jobs \
      if args.test_launcher_jobs else DEFAULT_TEST_CONCURRENCY
  child_args.append('--test-launcher-jobs=%d' % test_concurrency)

  if args.gtest_filter:
    child_args.append('--gtest_filter=' + args.gtest_filter)
  if args.gtest_repeat:
    child_args.append('--gtest_repeat=' + args.gtest_repeat)
    child_args.append('--test-launcher-timeout=-1')
  if args.gtest_break_on_failure:
    child_args.append('--gtest_break_on_failure')
  if args.child_args:
    child_args.extend(args.child_args)

  if args.test_launcher_summary_output:
    child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)

  with GetDeploymentTargetForArgs(args) as target:
    target = GetDeploymentTargetForArgs(args)
    target.Start()

    if args.test_launcher_filter_file:
      target.PutFile(args.test_launcher_filter_file, TEST_FILTER_PATH)
      child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)

    forwarder = None
    if args.enable_test_server:
      test_server, forwarder = SetupTestServer(target, test_concurrency)

    RunPackage(args.output_directory, target, args.package,
               child_args, args.package_manifest)

    if forwarder:
      forwarder.terminate()
      forwarder.wait()

    if args.test_launcher_summary_output:
      target.GetFile(TEST_RESULT_PATH, args.test_launcher_summary_output)
示例#8
0
def main():
    parser = argparse.ArgumentParser()
    AddCommonArgs(parser)

    parser.add_argument('--gtest_filter',
                        help='GTest filter to use in place of any default.')
    parser.add_argument(
        '--gtest_repeat',
        help='GTest repeat value to use. This also disables the '
        'test launcher timeout.')
    parser.add_argument(
        '--test-launcher-retry-limit',
        help='Number of times that test suite will retry failing '
        'tests. This is multiplicative with --gtest_repeat.')
    parser.add_argument('--test-launcher-shard-index',
                        type=int,
                        default=os.environ.get('GTEST_SHARD_INDEX'),
                        help='Index of this instance amongst swarming shards.')
    parser.add_argument('--test-launcher-total-shards',
                        type=int,
                        default=os.environ.get('GTEST_TOTAL_SHARDS'),
                        help='Total number of swarming shards of this suite.')
    parser.add_argument('--gtest_break_on_failure',
                        action='store_true',
                        default=False,
                        help='Should GTest break on failure; useful with '
                        '--gtest_repeat.')
    parser.add_argument('--single-process-tests',
                        action='store_true',
                        default=False,
                        help='Runs the tests and the launcher in the same '
                        'process. Useful for debugging.')
    parser.add_argument('--test-launcher-batch-limit',
                        type=int,
                        help='Sets the limit of test batch to run in a single '
                        'process.')
    # --test-launcher-filter-file is specified relative to --output-directory,
    # so specifying type=os.path.* will break it.
    parser.add_argument(
        '--test-launcher-filter-file',
        default=None,
        help='Override default filter file passed to target test '
        'process. Set an empty path to disable filtering.')
    parser.add_argument('--test-launcher-jobs',
                        type=int,
                        help='Sets the number of parallel test jobs.')
    parser.add_argument('--test-launcher-summary-output',
                        help='Where the test launcher will output its json.')
    parser.add_argument('--enable-test-server',
                        action='store_true',
                        default=False,
                        help='Enable Chrome test server spawner.')
    parser.add_argument(
        '--test-launcher-bot-mode',
        action='store_true',
        default=False,
        help='Informs the TestLauncher to that it should enable '
        'special allowances for running on a test bot.')
    parser.add_argument('--child-arg',
                        action='append',
                        help='Arguments for the test process.')
    parser.add_argument('child_args',
                        nargs='*',
                        help='Arguments for the test process.')
    args = parser.parse_args()

    # Flag output_directory is required for tests launched with this script.
    if not args.output_directory:
        raise ValueError("output-directory must be specified.")

    ConfigureLogging(args)

    child_args = []
    if args.test_launcher_shard_index != None:
        child_args.append('--test-launcher-shard-index=%d' %
                          args.test_launcher_shard_index)
    if args.test_launcher_total_shards != None:
        child_args.append('--test-launcher-total-shards=%d' %
                          args.test_launcher_total_shards)
    if args.single_process_tests:
        child_args.append('--single-process-tests')
    if args.test_launcher_bot_mode:
        child_args.append('--test-launcher-bot-mode')
    if args.test_launcher_batch_limit:
        child_args.append('--test-launcher-batch-limit=%d' %
                          args.test_launcher_batch_limit)

    # Only set --test-launcher-jobs if the caller specifies it, in general.
    # If the caller enables the test-server then we need to launch the right
    # number of instances to match the maximum number of parallel test jobs, so
    # in that case we set --test-launcher-jobs based on the number of CPU cores
    # specified for the emulator to use.
    test_concurrency = None
    if args.test_launcher_jobs:
        test_concurrency = args.test_launcher_jobs
    elif args.enable_test_server:
        if args.device == 'device':
            test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
        else:
            test_concurrency = args.qemu_cpu_cores
    if test_concurrency:
        child_args.append('--test-launcher-jobs=%d' % test_concurrency)

    if args.gtest_filter:
        child_args.append('--gtest_filter=' + args.gtest_filter)
    if args.gtest_repeat:
        child_args.append('--gtest_repeat=' + args.gtest_repeat)
        child_args.append('--test-launcher-timeout=-1')
    if args.test_launcher_retry_limit:
        child_args.append('--test-launcher-retry-limit=' +
                          args.test_launcher_retry_limit)
    if args.gtest_break_on_failure:
        child_args.append('--gtest_break_on_failure')
    if args.test_launcher_summary_output:
        child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)

    if args.child_arg:
        child_args.extend(args.child_arg)
    if args.child_args:
        child_args.extend(args.child_args)

    with GetDeploymentTargetForArgs(args) as target:
        target.Start()

        if args.test_launcher_filter_file:
            target.PutFile(args.test_launcher_filter_file,
                           TEST_FILTER_PATH,
                           for_package=args.package_name)
            child_args.append('--test-launcher-filter-file=' +
                              TEST_FILTER_PATH)

        isolated_outputs_dir = os.path.dirname(args.system_log_file)
        if os.path.isdir(isolated_outputs_dir):
            # Store logging popen objects so that they live as long as the target.
            system_log_procs = StartSystemLogReader(target, args.package,
                                                    isolated_outputs_dir)

        test_server = None
        if args.enable_test_server:
            assert test_concurrency
            test_server = SetupTestServer(target, test_concurrency,
                                          args.package_name)

        run_package_args = RunPackageArgs.FromCommonArgs(args)
        returncode = RunPackage(args.output_directory, target, args.package,
                                args.package_name, child_args,
                                run_package_args)

        if test_server:
            test_server.Stop()

        if args.test_launcher_summary_output:
            target.GetFile(TEST_RESULT_PATH,
                           args.test_launcher_summary_output,
                           for_package=args.package_name)

        return returncode
示例#9
0
def main():
    parser = argparse.ArgumentParser()
    AddCommonArgs(parser)

    parser.add_argument('--gtest_filter',
                        help='GTest filter to use in place of any default.')
    parser.add_argument(
        '--gtest_repeat',
        help='GTest repeat value to use. This also disables the '
        'test launcher timeout.')
    parser.add_argument(
        '--test-launcher-retry-limit',
        help='Number of times that test suite will retry failing '
        'tests. This is multiplicative with --gtest_repeat.')
    parser.add_argument('--test-launcher-shard-index',
                        type=int,
                        default=os.environ.get('GTEST_SHARD_INDEX'),
                        help='Index of this instance amongst swarming shards.')
    parser.add_argument('--test-launcher-total-shards',
                        type=int,
                        default=os.environ.get('GTEST_TOTAL_SHARDS'),
                        help='Total number of swarming shards of this suite.')
    parser.add_argument('--gtest_break_on_failure',
                        action='store_true',
                        default=False,
                        help='Should GTest break on failure; useful with '
                        '--gtest_repeat.')
    parser.add_argument('--single-process-tests',
                        action='store_true',
                        default=False,
                        help='Runs the tests and the launcher in the same '
                        'process. Useful for debugging.')
    parser.add_argument('--test-launcher-batch-limit',
                        type=int,
                        help='Sets the limit of test batch to run in a single '
                        'process.')
    # --test-launcher-filter-file is specified relative to --out-dir,
    # so specifying type=os.path.* will break it.
    parser.add_argument(
        '--test-launcher-filter-file',
        default=None,
        help='Override default filter file passed to target test '
        'process. Set an empty path to disable filtering.')
    parser.add_argument('--test-launcher-jobs',
                        type=int,
                        help='Sets the number of parallel test jobs.')
    parser.add_argument('--test-launcher-summary-output',
                        help='Where the test launcher will output its json.')
    parser.add_argument('--enable-test-server',
                        action='store_true',
                        default=False,
                        help='Enable Chrome test server spawner.')
    parser.add_argument(
        '--test-launcher-bot-mode',
        action='store_true',
        default=False,
        help='Informs the TestLauncher to that it should enable '
        'special allowances for running on a test bot.')
    parser.add_argument('--child-arg',
                        action='append',
                        help='Arguments for the test process.')
    parser.add_argument('child_args',
                        nargs='*',
                        help='Arguments for the test process.')
    parser.add_argument('--isolated-script-test-output',
                        help='If present, store test results on this path.')
    parser.add_argument(
        '--isolated-script-test-perf-output',
        help='If present, store chartjson results on this path.')
    parser.add_argument('--use-run-test-component',
                        default=False,
                        action='store_true',
                        help='Run the test package hermetically using '
                        'run-test-component, rather than run.')
    parser.add_argument('--code-coverage',
                        default=False,
                        action='store_true',
                        help='Gather code coverage information.')
    parser.add_argument('--code-coverage-dir',
                        default=os.getcwd(),
                        help='Directory to place code coverage information. '
                        'Only relevant when --code-coverage set to true. '
                        'Defaults to current directory.')
    args = parser.parse_args()

    # Flag out_dir is required for tests launched with this script.
    if not args.out_dir:
        raise ValueError("out-dir must be specified.")

    # Code coverage uses runtests, which calls run_test_component.
    if args.code_coverage:
        args.use_run_test_component = True

    ConfigureLogging(args)

    child_args = []
    if args.test_launcher_shard_index != None:
        child_args.append('--test-launcher-shard-index=%d' %
                          args.test_launcher_shard_index)
    if args.test_launcher_total_shards != None:
        child_args.append('--test-launcher-total-shards=%d' %
                          args.test_launcher_total_shards)
    if args.single_process_tests:
        child_args.append('--single-process-tests')
    if args.test_launcher_bot_mode:
        child_args.append('--test-launcher-bot-mode')
    if args.test_launcher_batch_limit:
        child_args.append('--test-launcher-batch-limit=%d' %
                          args.test_launcher_batch_limit)

    # Only set --test-launcher-jobs if the caller specifies it, in general.
    # If the caller enables the test-server then we need to launch the right
    # number of instances to match the maximum number of parallel test jobs, so
    # in that case we set --test-launcher-jobs based on the number of CPU cores
    # specified for the emulator to use.
    test_concurrency = None
    if args.test_launcher_jobs:
        test_concurrency = args.test_launcher_jobs
    elif args.enable_test_server:
        if args.device == 'device':
            test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
        else:
            test_concurrency = args.cpu_cores
    if test_concurrency:
        child_args.append('--test-launcher-jobs=%d' % test_concurrency)

    if args.gtest_filter:
        child_args.append('--gtest_filter=' + args.gtest_filter)
    if args.gtest_repeat:
        child_args.append('--gtest_repeat=' + args.gtest_repeat)
        child_args.append('--test-launcher-timeout=-1')
    if args.test_launcher_retry_limit:
        child_args.append('--test-launcher-retry-limit=' +
                          args.test_launcher_retry_limit)
    if args.gtest_break_on_failure:
        child_args.append('--gtest_break_on_failure')
    if args.test_launcher_summary_output:
        child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)
    if args.isolated_script_test_output:
        child_args.append('--isolated-script-test-output=' + TEST_RESULT_PATH)
    if args.isolated_script_test_perf_output:
        child_args.append('--isolated-script-test-perf-output=' +
                          TEST_PERF_RESULT_PATH)

    if args.child_arg:
        child_args.extend(args.child_arg)
    if args.child_args:
        child_args.extend(args.child_args)

    test_realms = []
    if args.use_run_test_component:
        test_realms = [TEST_REALM_NAME]

    try:
        with GetDeploymentTargetForArgs() as target, \
             SystemLogReader() as system_logger, \
             RunnerLogManager(args.runner_logs_dir, BuildIdsPaths(args.package)):
            target.Start()

            if args.system_log_file and args.system_log_file != '-':
                system_logger.Start(target, args.package, args.system_log_file)

            if args.test_launcher_filter_file:
                target.PutFile(args.test_launcher_filter_file,
                               TEST_FILTER_PATH,
                               for_package=args.package_name,
                               for_realms=test_realms)
                child_args.append('--test-launcher-filter-file=' +
                                  TEST_FILTER_PATH)

            test_server = None
            if args.enable_test_server:
                assert test_concurrency
                test_server = SetupTestServer(target, test_concurrency,
                                              args.package_name, test_realms)

            run_package_args = RunTestPackageArgs.FromCommonArgs(args)
            if args.use_run_test_component:
                run_package_args.test_realm_label = TEST_REALM_NAME
                run_package_args.use_run_test_component = True
            returncode = RunTestPackage(args.out_dir, target, args.package,
                                        args.package_name, child_args,
                                        run_package_args)

            if test_server:
                test_server.Stop()

            if args.code_coverage:
                # Copy all the files in the profile directory. /* is used instead
                # of recursively copying due to permission issues for the latter.
                target.GetFile(TEST_LLVM_PROFILE_PATH + '/*',
                               args.code_coverage_dir)

            if args.test_launcher_summary_output:
                target.GetFile(TEST_RESULT_PATH,
                               args.test_launcher_summary_output,
                               for_package=args.package_name,
                               for_realms=test_realms)

            if args.isolated_script_test_output:
                target.GetFile(TEST_RESULT_PATH,
                               args.isolated_script_test_output,
                               for_package=args.package_name,
                               for_realms=test_realms)

            if args.isolated_script_test_perf_output:
                target.GetFile(TEST_PERF_RESULT_PATH,
                               args.isolated_script_test_perf_output,
                               for_package=args.package_name,
                               for_realms=test_realms)

            return returncode

    except:
        return HandleExceptionAndReturnExitCode()
示例#10
0
def RunTestOnFuchsiaDevice(script_cmd):
  """Preps Fuchsia device with pave and package update, then runs script."""

  parser = argparse.ArgumentParser()
  AddCommonArgs(parser)
  AddTargetSpecificArgs(parser)
  runner_script_args, test_args = parser.parse_known_args()
  ConfigureLogging(runner_script_args)

  # If out_dir is not set, assume the script is being launched
  # from the output directory.
  if not runner_script_args.out_dir:
    runner_script_args.out_dir = os.getcwd()

  # Create a temporary log file that Telemetry will look to use to build
  # an artifact when tests fail.
  temp_log_file = False
  if not runner_script_args.system_log_file:
    runner_script_args.system_log_file = os.path.join(tempfile.mkdtemp(),
                                                      'system-log')
    temp_log_file = True

  package_names = ['web_engine_with_webui', 'web_engine_shell']
  web_engine_dir = os.path.join(runner_script_args.out_dir, 'gen', 'fuchsia',
                                'engine')

  # Pass all other arguments to the gpu integration tests.
  script_cmd.extend(test_args)
  listener_process = None
  symbolizer_process = None
  try:
    with GetDeploymentTargetForArgs(runner_script_args) as target:
      target.Start()
      fuchsia_device_address, fuchsia_ssh_port = target._GetEndpoint()
      script_cmd.extend(
          ['--chromium-output-directory', runner_script_args.out_dir])
      script_cmd.extend(['--fuchsia-device-address', fuchsia_device_address])
      script_cmd.extend(['--fuchsia-ssh-config', target._GetSshConfigPath()])
      if fuchsia_ssh_port:
        script_cmd.extend(['--fuchsia-ssh-port', str(fuchsia_ssh_port)])
      script_cmd.extend(
          ['--fuchsia-system-log-file', runner_script_args.system_log_file])
      # Add to the script
      if runner_script_args.verbose:
        script_cmd.append('-v')

      # Set up logging of WebEngine
      listener_process = target.RunCommandPiped(['log_listener'],
                                                stdout=subprocess.PIPE,
                                                stderr=subprocess.STDOUT)
      build_ids_paths = map(
          lambda package_name: os.path.join(web_engine_dir, package_name,
                                            'ids.txt'), package_names)
      symbolizer_process = RunSymbolizer(
          listener_process.stdout, open(runner_script_args.system_log_file,
                                        'w'), build_ids_paths)

      # Keep the Amber repository live while the test runs.
      with target.GetAmberRepo():
        # Install necessary packages on the device.
        far_files = map(
            lambda package_name: os.path.join(web_engine_dir, package_name,
                                              package_name + '.far'),
            package_names)
        target.InstallPackage(far_files)
        return subprocess.call(script_cmd)
  finally:
    if temp_log_file:
      shutil.rmtree(os.path.dirname(runner_script_args.system_log_file))
    if listener_process:
      listener_process.kill()
    if symbolizer_process:
      symbolizer_process.kill()
示例#11
0
def main():
  parser = argparse.ArgumentParser()
  AddTestExecutionArgs(parser)
  AddCommonArgs(parser)
  AddTargetSpecificArgs(parser)
  args = parser.parse_args()

  # Flag out_dir is required for tests launched with this script.
  if not args.out_dir:
    raise ValueError("out-dir must be specified.")

  # Code coverage uses runtests, which calls run_test_component.
  if args.code_coverage:
    args.use_run_test_component = True

  ConfigureLogging(args)

  child_args = []
  if args.test_launcher_shard_index != None:
    child_args.append(
        '--test-launcher-shard-index=%d' % args.test_launcher_shard_index)
  if args.test_launcher_total_shards != None:
    child_args.append(
        '--test-launcher-total-shards=%d' % args.test_launcher_total_shards)
  if args.single_process_tests:
    child_args.append('--single-process-tests')
  if args.test_launcher_bot_mode:
    child_args.append('--test-launcher-bot-mode')
  if args.test_launcher_batch_limit:
    child_args.append('--test-launcher-batch-limit=%d' %
                       args.test_launcher_batch_limit)

  # Only set --test-launcher-jobs if the caller specifies it, in general.
  # If the caller enables the test-server then we need to launch the right
  # number of instances to match the maximum number of parallel test jobs, so
  # in that case we set --test-launcher-jobs based on the number of CPU cores
  # specified for the emulator to use.
  test_concurrency = None
  if args.test_launcher_jobs:
    test_concurrency = args.test_launcher_jobs
  elif args.enable_test_server:
    if args.device == 'device':
      test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
    else:
      test_concurrency = args.cpu_cores
  if test_concurrency:
    child_args.append('--test-launcher-jobs=%d' % test_concurrency)

  if args.gtest_filter:
    child_args.append('--gtest_filter=' + args.gtest_filter)
  if args.gtest_repeat:
    child_args.append('--gtest_repeat=' + args.gtest_repeat)
    child_args.append('--test-launcher-timeout=-1')
  if args.test_launcher_retry_limit:
    child_args.append(
        '--test-launcher-retry-limit=' + args.test_launcher_retry_limit)
  if args.gtest_break_on_failure:
    child_args.append('--gtest_break_on_failure')
  if args.test_launcher_summary_output:
    child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)
  if args.isolated_script_test_output:
    child_args.append('--isolated-script-test-output=' + TEST_RESULT_PATH)
  if args.isolated_script_test_perf_output:
    child_args.append('--isolated-script-test-perf-output=' +
                      TEST_PERF_RESULT_PATH)

  if args.child_arg:
    child_args.extend(args.child_arg)
  if args.child_args:
    child_args.extend(args.child_args)

  test_realms = []
  if args.use_run_test_component:
    test_realms = [TEST_REALM_NAME]

  try:
    with GetDeploymentTargetForArgs(args) as target, \
         SystemLogReader() as system_logger, \
         RunnerLogManager(args.runner_logs_dir, BuildIdsPaths(args.package)):
      target.Start()

      if args.system_log_file and args.system_log_file != '-':
        system_logger.Start(target, args.package, args.system_log_file)

      if args.test_launcher_filter_file:
        target.PutFile(args.test_launcher_filter_file,
                       TEST_FILTER_PATH,
                       for_package=args.package_name,
                       for_realms=test_realms)
        child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)

      test_server = None
      if args.enable_test_server:
        assert test_concurrency
        test_server = SetupTestServer(target, test_concurrency,
                                      args.package_name, test_realms)

      run_package_args = RunTestPackageArgs.FromCommonArgs(args)
      if args.use_run_test_component:
        run_package_args.test_realm_label = TEST_REALM_NAME
        run_package_args.use_run_test_component = True
      returncode = RunTestPackage(args.out_dir, target, args.package,
                                  args.package_name, child_args,
                                  run_package_args)

      if test_server:
        test_server.Stop()

      if args.code_coverage:
        # Copy all the files in the profile directory. /* is used instead
        # of recursively copying due to permission issues for the latter.
        target.GetFile(TEST_LLVM_PROFILE_PATH + '/*', args.code_coverage_dir)

      if args.test_launcher_summary_output:
        target.GetFile(TEST_RESULT_PATH,
                       args.test_launcher_summary_output,
                       for_package=args.package_name,
                       for_realms=test_realms)

      if args.isolated_script_test_output:
        target.GetFile(TEST_RESULT_PATH,
                       args.isolated_script_test_output,
                       for_package=args.package_name,
                       for_realms=test_realms)

      if args.isolated_script_test_perf_output:
        target.GetFile(TEST_PERF_RESULT_PATH,
                       args.isolated_script_test_perf_output,
                       for_package=args.package_name,
                       for_realms=test_realms)

      return returncode

  except:
    return HandleExceptionAndReturnExitCode()
示例#12
0
def main():
    parser = argparse.ArgumentParser()
    AddTestExecutionArgs(parser)
    AddCommonArgs(parser)
    AddTargetSpecificArgs(parser)
    args = parser.parse_args()

    # Flag out_dir is required for tests launched with this script.
    if not args.out_dir:
        raise ValueError("out-dir must be specified.")

    if args.component_version == "2":
        args.use_run_test_component = False

    if (args.code_coverage and args.component_version != "2"
            and not args.use_run_test_component):
        if args.enable_test_server:
            # TODO(1254563): Tests that need access to the test server cannot be run
            # as test component under CFv1. Because code coverage requires it, force
            # the test to run as a test component. It is expected that test that tries
            # to use the external test server will fail.
            args.use_run_test_component = True
        else:
            raise ValueError('Collecting code coverage info requires using '
                             'run-test-component.')

    ConfigureLogging(args)

    child_args = []
    if args.test_launcher_shard_index != None:
        child_args.append('--test-launcher-shard-index=%d' %
                          args.test_launcher_shard_index)
    if args.test_launcher_total_shards != None:
        child_args.append('--test-launcher-total-shards=%d' %
                          args.test_launcher_total_shards)
    if args.single_process_tests:
        child_args.append('--single-process-tests')
    if args.test_launcher_bot_mode:
        child_args.append('--test-launcher-bot-mode')
    if args.test_launcher_batch_limit:
        child_args.append('--test-launcher-batch-limit=%d' %
                          args.test_launcher_batch_limit)

    # Only set --test-launcher-jobs if the caller specifies it, in general.
    # If the caller enables the test-server then we need to launch the right
    # number of instances to match the maximum number of parallel test jobs, so
    # in that case we set --test-launcher-jobs based on the number of CPU cores
    # specified for the emulator to use.
    test_concurrency = None
    if args.test_launcher_jobs:
        test_concurrency = args.test_launcher_jobs
    elif args.enable_test_server:
        if args.device == 'device':
            test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
        else:
            test_concurrency = args.cpu_cores
    if test_concurrency:
        child_args.append('--test-launcher-jobs=%d' % test_concurrency)
    if args.test_launcher_print_test_stdio:
        child_args.append('--test-launcher-print-test-stdio=%s' %
                          args.test_launcher_print_test_stdio)

    if args.gtest_filter:
        child_args.append('--gtest_filter=' + args.gtest_filter)
    if args.gtest_repeat:
        child_args.append('--gtest_repeat=' + args.gtest_repeat)
        child_args.append('--test-launcher-timeout=-1')
    if args.test_launcher_retry_limit:
        child_args.append('--test-launcher-retry-limit=' +
                          args.test_launcher_retry_limit)
    if args.gtest_break_on_failure:
        child_args.append('--gtest_break_on_failure')
    if args.gtest_also_run_disabled_tests:
        child_args.append('--gtest_also_run_disabled_tests')

    if args.child_arg:
        child_args.extend(args.child_arg)
    if args.child_args:
        child_args.extend(args.child_args)

    test_realms = []
    if args.use_run_test_component:
        test_realms = [TEST_REALM_NAME]

    try:
        with GetDeploymentTargetForArgs(args) as target, \
             MakeTestOutputs(args.component_version,
                             target,
                             args.package_name,
                             test_realms) as test_outputs:
            if args.test_launcher_summary_output:
                child_args.append('--test-launcher-summary-output=' +
                                  test_outputs.GetDevicePath(TEST_RESULT_FILE))
            if args.isolated_script_test_output:
                child_args.append('--isolated-script-test-output=' +
                                  test_outputs.GetDevicePath(TEST_RESULT_FILE))
            if args.isolated_script_test_perf_output:
                child_args.append(
                    '--isolated-script-test-perf-output=' +
                    test_outputs.GetDevicePath(TEST_PERF_RESULT_FILE))

            target.Start()
            target.StartSystemLog(args.package)

            if args.test_launcher_filter_file:
                if args.component_version == "2":
                    # TODO(crbug.com/1279803): Until one can send file to the device when
                    # running a test, filter files must be read from the test package.
                    test_launcher_filter_files = map(
                        MapFilterFileToPackageFile,
                        args.test_launcher_filter_file.split(';'))
                    child_args.append('--test-launcher-filter-file=' +
                                      ';'.join(test_launcher_filter_files))
                else:
                    test_launcher_filter_files = args.test_launcher_filter_file.split(
                        ';')
                    with tempfile.NamedTemporaryFile(
                            'a+b') as combined_filter_file:
                        for filter_file in test_launcher_filter_files:
                            with open(filter_file, 'rb') as f:
                                combined_filter_file.write(f.read())
                        combined_filter_file.seek(0)
                        target.PutFile(combined_filter_file.name,
                                       TEST_FILTER_PATH,
                                       for_package=args.package_name,
                                       for_realms=test_realms)
                        child_args.append('--test-launcher-filter-file=' +
                                          TEST_FILTER_PATH)

            test_server = None
            if args.enable_test_server:
                assert test_concurrency
                test_server = SetupTestServer(target, test_concurrency,
                                              args.package_name, test_realms)

            run_package_args = RunTestPackageArgs.FromCommonArgs(args)
            if args.use_run_test_component:
                run_package_args.test_realm_label = TEST_REALM_NAME
                run_package_args.use_run_test_component = True
            if args.component_version == "2":
                run_package_args.output_directory = test_outputs.GetOutputDirectory(
                )
            returncode = RunTestPackage(target, test_outputs.GetFfxSession(),
                                        args.package, args.package_name,
                                        args.component_version, child_args,
                                        run_package_args)

            if test_server:
                test_server.Stop()

            if args.code_coverage:
                test_outputs.GetCoverageProfiles(args.code_coverage_dir)

            if args.test_launcher_summary_output:
                test_outputs.GetFile(TEST_RESULT_FILE,
                                     args.test_launcher_summary_output)

            if args.isolated_script_test_output:
                test_outputs.GetFile(TEST_RESULT_FILE,
                                     args.isolated_script_test_output)

            if args.isolated_script_test_perf_output:
                test_outputs.GetFile(TEST_PERF_RESULT_FILE,
                                     args.isolated_script_test_perf_output)

            return returncode

    except:
        return HandleExceptionAndReturnExitCode()
示例#13
0
def main():
  parser = argparse.ArgumentParser()
  AddCommonArgs(parser)
  parser.add_argument('--gtest_filter',
                      help='GTest filter to use in place of any default.')
  parser.add_argument('--gtest_repeat',
                      help='GTest repeat value to use. This also disables the '
                           'test launcher timeout.')
  # TODO(crbug.com/1046861): Remove qemu-img-retries flag when qemu-img arm64
  # hang bug is fixed.
  parser.add_argument('--qemu-img-retries',
                      default=0,
                      type=int,
                      help='Number of times that the qemu-img command can be '
                           'retried.')
  parser.add_argument('--test-launcher-retry-limit',
                      help='Number of times that test suite will retry failing '
                           'tests. This is multiplicative with --gtest_repeat.')
  parser.add_argument('--gtest_break_on_failure', action='store_true',
                      default=False,
                      help='Should GTest break on failure; useful with '
                           '--gtest_repeat.')
  parser.add_argument('--single-process-tests', action='store_true',
                      default=False,
                      help='Runs the tests and the launcher in the same '
                           'process. Useful for debugging.')
  parser.add_argument('--test-launcher-batch-limit',
                      type=int,
                      help='Sets the limit of test batch to run in a single '
                      'process.')
  # --test-launcher-filter-file is specified relative to --output-directory,
  # so specifying type=os.path.* will break it.
  parser.add_argument('--test-launcher-filter-file',
                      default=None,
                      help='Override default filter file passed to target test '
                      'process. Set an empty path to disable filtering.')
  parser.add_argument('--test-launcher-jobs',
                      type=int,
                      help='Sets the number of parallel test jobs.')
  parser.add_argument('--test-launcher-summary-output',
                      help='Where the test launcher will output its json.')
  parser.add_argument('--enable-test-server', action='store_true',
                      default=False,
                      help='Enable Chrome test server spawner.')
  parser.add_argument('--test-launcher-bot-mode', action='store_true',
                      default=False,
                      help='Informs the TestLauncher to that it should enable '
                      'special allowances for running on a test bot.')
  parser.add_argument('--child-arg', action='append',
                      help='Arguments for the test process.')
  parser.add_argument('child_args', nargs='*',
                      help='Arguments for the test process.')
  args = parser.parse_args()
  ConfigureLogging(args)

  child_args = ['--test-launcher-retry-limit=0']
  if args.single_process_tests:
    child_args.append('--single-process-tests')
  if args.test_launcher_bot_mode:
    child_args.append('--test-launcher-bot-mode')
  if args.test_launcher_batch_limit:
    child_args.append('--test-launcher-batch-limit=%d' %
                       args.test_launcher_batch_limit)

  test_concurrency = args.test_launcher_jobs \
      if args.test_launcher_jobs else DEFAULT_TEST_CONCURRENCY
  child_args.append('--test-launcher-jobs=%d' % test_concurrency)

  if args.gtest_filter:
    child_args.append('--gtest_filter=' + args.gtest_filter)
  if args.gtest_repeat:
    child_args.append('--gtest_repeat=' + args.gtest_repeat)
    child_args.append('--test-launcher-timeout=-1')
  if args.test_launcher_retry_limit:
    child_args.append(
        '--test-launcher-retry-limit=' + args.test_launcher_retry_limit)
  if args.gtest_break_on_failure:
    child_args.append('--gtest_break_on_failure')
  if args.test_launcher_summary_output:
    child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)

  if args.child_arg:
    child_args.extend(args.child_arg)
  if args.child_args:
    child_args.extend(args.child_args)

  # KVM is required on x64 test bots.
  require_kvm = args.test_launcher_bot_mode and args.target_cpu == 'x64'

  with GetDeploymentTargetForArgs(args, require_kvm=require_kvm) as target:
    target.Start()

    if args.test_launcher_filter_file:
      target.PutFile(args.test_launcher_filter_file, TEST_FILTER_PATH,
                     for_package=args.package_name)
      child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)

    test_server = None
    if args.enable_test_server:
      test_server = SetupTestServer(target, test_concurrency,
                                    args.package_name)

    run_package_args = RunPackageArgs.FromCommonArgs(args)
    returncode = RunPackage(
        args.output_directory, target, args.package, args.package_name,
        child_args, run_package_args)

    if test_server:
      test_server.Stop()

    if args.test_launcher_summary_output:
      target.GetFile(TEST_RESULT_PATH, args.test_launcher_summary_output,
                     for_package=args.package_name)

    return returncode
示例#14
0
def RunTestOnFuchsiaDevice(script_cmd):
    """Preps Fuchsia device with pave and package update, then runs script."""

    parser = argparse.ArgumentParser()
    AddCommonArgs(parser)
    AddTargetSpecificArgs(parser)
    runner_script_args, test_args = parser.parse_known_args()
    ConfigureLogging(runner_script_args)

    # If out_dir is not set, assume the script is being launched
    # from the output directory.
    if not runner_script_args.out_dir:
        runner_script_args.out_dir = os.getcwd()

    # Create a temporary log file that Telemetry will look to use to build
    # an artifact when tests fail.
    clean_up_logs_on_exit = False
    if not runner_script_args.logs_dir:
        runner_script_args.logs_dir = tempfile.mkdtemp()

    package_names = ['web_engine_with_webui', 'web_engine_shell']
    web_engine_dir = os.path.join(runner_script_args.out_dir, 'gen', 'fuchsia',
                                  'engine')
    package_paths = map(
        lambda package_name: os.path.join(web_engine_dir, package_name),
        package_names)

    # Pass all other arguments to the gpu integration tests.
    script_cmd.extend(test_args)
    try:
        with GetDeploymentTargetForArgs(runner_script_args) as target:
            target.Start()
            target.StartSystemLog(package_paths)
            fuchsia_device_address, fuchsia_ssh_port = target._GetEndpoint()
            script_cmd.extend(
                ['--chromium-output-directory', runner_script_args.out_dir])
            script_cmd.extend(
                ['--fuchsia-device-address', fuchsia_device_address])
            script_cmd.extend(
                ['--fuchsia-ssh-config',
                 target._GetSshConfigPath()])
            if fuchsia_ssh_port:
                script_cmd.extend(
                    ['--fuchsia-ssh-port',
                     str(fuchsia_ssh_port)])
            script_cmd.extend([
                '--fuchsia-system-log-file',
                os.path.join(runner_script_args.logs_dir, 'system_log')
            ])
            # Add to the script
            if runner_script_args.verbose:
                script_cmd.append('-v')

            # Keep the package repository live while the test runs.
            with target.GetPkgRepo():
                # Install necessary packages on the device.
                far_files = map(
                    lambda package_name: os.path.join(
                        web_engine_dir, package_name, package_name + '.far'),
                    package_names)
                target.InstallPackage(far_files)
                return subprocess.call(script_cmd)
    finally:
        if clean_up_logs_on_exit:
            shutil.rmtree(runner_script_args.logs_dir)