예제 #1
0
    def Start(self, target, package_paths, system_log_file):
        """Start a system log reader as a long-running SSH task."""
        logging.debug('Writing fuchsia system log to %s' % system_log_file)

        self._listener_proc = target.RunCommandPiped(['log_listener'],
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.STDOUT)

        self._system_log = open(system_log_file, 'w', buffering=1)
        self._symbolizer_proc = RunSymbolizer(self._listener_proc.stdout,
                                              self._system_log,
                                              BuildIdsPaths(package_paths))
예제 #2
0
파일: target.py 프로젝트: yue/build-gn
 def StartSystemLog(self, package_paths):
   """Start a system log reader as a long-running SSH task."""
   system_log = self._log_manager.Open('system_log')
   if package_paths:
     self._log_listener_proc = self.RunCommandPiped(['log_listener'],
                                                    stdout=subprocess.PIPE,
                                                    stderr=subprocess.STDOUT)
     self._symbolizer_proc = RunSymbolizer(self._log_listener_proc.stdout,
                                           system_log,
                                           BuildIdsPaths(package_paths))
   else:
     self._log_listener_proc = self.RunCommandPiped(['log_listener'],
                                                    stdout=system_log,
                                                    stderr=subprocess.STDOUT)
예제 #3
0
def main():
    parser = argparse.ArgumentParser()
    AddCommonArgs(parser)

    parser.add_argument('--gtest_filter',
                        help='GTest filter to use in place of any default.')
    parser.add_argument(
        '--gtest_repeat',
        help='GTest repeat value to use. This also disables the '
        'test launcher timeout.')
    parser.add_argument(
        '--test-launcher-retry-limit',
        help='Number of times that test suite will retry failing '
        'tests. This is multiplicative with --gtest_repeat.')
    parser.add_argument('--test-launcher-shard-index',
                        type=int,
                        default=os.environ.get('GTEST_SHARD_INDEX'),
                        help='Index of this instance amongst swarming shards.')
    parser.add_argument('--test-launcher-total-shards',
                        type=int,
                        default=os.environ.get('GTEST_TOTAL_SHARDS'),
                        help='Total number of swarming shards of this suite.')
    parser.add_argument('--gtest_break_on_failure',
                        action='store_true',
                        default=False,
                        help='Should GTest break on failure; useful with '
                        '--gtest_repeat.')
    parser.add_argument('--single-process-tests',
                        action='store_true',
                        default=False,
                        help='Runs the tests and the launcher in the same '
                        'process. Useful for debugging.')
    parser.add_argument('--test-launcher-batch-limit',
                        type=int,
                        help='Sets the limit of test batch to run in a single '
                        'process.')
    # --test-launcher-filter-file is specified relative to --out-dir,
    # so specifying type=os.path.* will break it.
    parser.add_argument(
        '--test-launcher-filter-file',
        default=None,
        help='Override default filter file passed to target test '
        'process. Set an empty path to disable filtering.')
    parser.add_argument('--test-launcher-jobs',
                        type=int,
                        help='Sets the number of parallel test jobs.')
    parser.add_argument('--test-launcher-summary-output',
                        help='Where the test launcher will output its json.')
    parser.add_argument('--enable-test-server',
                        action='store_true',
                        default=False,
                        help='Enable Chrome test server spawner.')
    parser.add_argument(
        '--test-launcher-bot-mode',
        action='store_true',
        default=False,
        help='Informs the TestLauncher to that it should enable '
        'special allowances for running on a test bot.')
    parser.add_argument('--child-arg',
                        action='append',
                        help='Arguments for the test process.')
    parser.add_argument('child_args',
                        nargs='*',
                        help='Arguments for the test process.')
    parser.add_argument('--isolated-script-test-output',
                        help='If present, store test results on this path.')
    parser.add_argument(
        '--isolated-script-test-perf-output',
        help='If present, store chartjson results on this path.')
    parser.add_argument('--use-run-test-component',
                        default=False,
                        action='store_true',
                        help='Run the test package hermetically using '
                        'run-test-component, rather than run.')
    parser.add_argument('--code-coverage',
                        default=False,
                        action='store_true',
                        help='Gather code coverage information.')
    parser.add_argument('--code-coverage-dir',
                        default=os.getcwd(),
                        help='Directory to place code coverage information. '
                        'Only relevant when --code-coverage set to true. '
                        'Defaults to current directory.')
    args = parser.parse_args()

    # Flag out_dir is required for tests launched with this script.
    if not args.out_dir:
        raise ValueError("out-dir must be specified.")

    # Code coverage uses runtests, which calls run_test_component.
    if args.code_coverage:
        args.use_run_test_component = True

    ConfigureLogging(args)

    child_args = []
    if args.test_launcher_shard_index != None:
        child_args.append('--test-launcher-shard-index=%d' %
                          args.test_launcher_shard_index)
    if args.test_launcher_total_shards != None:
        child_args.append('--test-launcher-total-shards=%d' %
                          args.test_launcher_total_shards)
    if args.single_process_tests:
        child_args.append('--single-process-tests')
    if args.test_launcher_bot_mode:
        child_args.append('--test-launcher-bot-mode')
    if args.test_launcher_batch_limit:
        child_args.append('--test-launcher-batch-limit=%d' %
                          args.test_launcher_batch_limit)

    # Only set --test-launcher-jobs if the caller specifies it, in general.
    # If the caller enables the test-server then we need to launch the right
    # number of instances to match the maximum number of parallel test jobs, so
    # in that case we set --test-launcher-jobs based on the number of CPU cores
    # specified for the emulator to use.
    test_concurrency = None
    if args.test_launcher_jobs:
        test_concurrency = args.test_launcher_jobs
    elif args.enable_test_server:
        if args.device == 'device':
            test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
        else:
            test_concurrency = args.cpu_cores
    if test_concurrency:
        child_args.append('--test-launcher-jobs=%d' % test_concurrency)

    if args.gtest_filter:
        child_args.append('--gtest_filter=' + args.gtest_filter)
    if args.gtest_repeat:
        child_args.append('--gtest_repeat=' + args.gtest_repeat)
        child_args.append('--test-launcher-timeout=-1')
    if args.test_launcher_retry_limit:
        child_args.append('--test-launcher-retry-limit=' +
                          args.test_launcher_retry_limit)
    if args.gtest_break_on_failure:
        child_args.append('--gtest_break_on_failure')
    if args.test_launcher_summary_output:
        child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)
    if args.isolated_script_test_output:
        child_args.append('--isolated-script-test-output=' + TEST_RESULT_PATH)
    if args.isolated_script_test_perf_output:
        child_args.append('--isolated-script-test-perf-output=' +
                          TEST_PERF_RESULT_PATH)

    if args.child_arg:
        child_args.extend(args.child_arg)
    if args.child_args:
        child_args.extend(args.child_args)

    test_realms = []
    if args.use_run_test_component:
        test_realms = [TEST_REALM_NAME]

    try:
        with GetDeploymentTargetForArgs() as target, \
             SystemLogReader() as system_logger, \
             RunnerLogManager(args.runner_logs_dir, BuildIdsPaths(args.package)):
            target.Start()

            if args.system_log_file and args.system_log_file != '-':
                system_logger.Start(target, args.package, args.system_log_file)

            if args.test_launcher_filter_file:
                target.PutFile(args.test_launcher_filter_file,
                               TEST_FILTER_PATH,
                               for_package=args.package_name,
                               for_realms=test_realms)
                child_args.append('--test-launcher-filter-file=' +
                                  TEST_FILTER_PATH)

            test_server = None
            if args.enable_test_server:
                assert test_concurrency
                test_server = SetupTestServer(target, test_concurrency,
                                              args.package_name, test_realms)

            run_package_args = RunTestPackageArgs.FromCommonArgs(args)
            if args.use_run_test_component:
                run_package_args.test_realm_label = TEST_REALM_NAME
                run_package_args.use_run_test_component = True
            returncode = RunTestPackage(args.out_dir, target, args.package,
                                        args.package_name, child_args,
                                        run_package_args)

            if test_server:
                test_server.Stop()

            if args.code_coverage:
                # Copy all the files in the profile directory. /* is used instead
                # of recursively copying due to permission issues for the latter.
                target.GetFile(TEST_LLVM_PROFILE_PATH + '/*',
                               args.code_coverage_dir)

            if args.test_launcher_summary_output:
                target.GetFile(TEST_RESULT_PATH,
                               args.test_launcher_summary_output,
                               for_package=args.package_name,
                               for_realms=test_realms)

            if args.isolated_script_test_output:
                target.GetFile(TEST_RESULT_PATH,
                               args.isolated_script_test_output,
                               for_package=args.package_name,
                               for_realms=test_realms)

            if args.isolated_script_test_perf_output:
                target.GetFile(TEST_PERF_RESULT_PATH,
                               args.isolated_script_test_perf_output,
                               for_package=args.package_name,
                               for_realms=test_realms)

            return returncode

    except:
        return HandleExceptionAndReturnExitCode()
예제 #4
0
def main():
  parser = argparse.ArgumentParser()
  AddTestExecutionArgs(parser)
  AddCommonArgs(parser)
  AddTargetSpecificArgs(parser)
  args = parser.parse_args()

  # Flag out_dir is required for tests launched with this script.
  if not args.out_dir:
    raise ValueError("out-dir must be specified.")

  # Code coverage uses runtests, which calls run_test_component.
  if args.code_coverage:
    args.use_run_test_component = True

  ConfigureLogging(args)

  child_args = []
  if args.test_launcher_shard_index != None:
    child_args.append(
        '--test-launcher-shard-index=%d' % args.test_launcher_shard_index)
  if args.test_launcher_total_shards != None:
    child_args.append(
        '--test-launcher-total-shards=%d' % args.test_launcher_total_shards)
  if args.single_process_tests:
    child_args.append('--single-process-tests')
  if args.test_launcher_bot_mode:
    child_args.append('--test-launcher-bot-mode')
  if args.test_launcher_batch_limit:
    child_args.append('--test-launcher-batch-limit=%d' %
                       args.test_launcher_batch_limit)

  # Only set --test-launcher-jobs if the caller specifies it, in general.
  # If the caller enables the test-server then we need to launch the right
  # number of instances to match the maximum number of parallel test jobs, so
  # in that case we set --test-launcher-jobs based on the number of CPU cores
  # specified for the emulator to use.
  test_concurrency = None
  if args.test_launcher_jobs:
    test_concurrency = args.test_launcher_jobs
  elif args.enable_test_server:
    if args.device == 'device':
      test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
    else:
      test_concurrency = args.cpu_cores
  if test_concurrency:
    child_args.append('--test-launcher-jobs=%d' % test_concurrency)

  if args.gtest_filter:
    child_args.append('--gtest_filter=' + args.gtest_filter)
  if args.gtest_repeat:
    child_args.append('--gtest_repeat=' + args.gtest_repeat)
    child_args.append('--test-launcher-timeout=-1')
  if args.test_launcher_retry_limit:
    child_args.append(
        '--test-launcher-retry-limit=' + args.test_launcher_retry_limit)
  if args.gtest_break_on_failure:
    child_args.append('--gtest_break_on_failure')
  if args.test_launcher_summary_output:
    child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)
  if args.isolated_script_test_output:
    child_args.append('--isolated-script-test-output=' + TEST_RESULT_PATH)
  if args.isolated_script_test_perf_output:
    child_args.append('--isolated-script-test-perf-output=' +
                      TEST_PERF_RESULT_PATH)

  if args.child_arg:
    child_args.extend(args.child_arg)
  if args.child_args:
    child_args.extend(args.child_args)

  test_realms = []
  if args.use_run_test_component:
    test_realms = [TEST_REALM_NAME]

  try:
    with GetDeploymentTargetForArgs(args) as target, \
         SystemLogReader() as system_logger, \
         RunnerLogManager(args.runner_logs_dir, BuildIdsPaths(args.package)):
      target.Start()

      if args.system_log_file and args.system_log_file != '-':
        system_logger.Start(target, args.package, args.system_log_file)

      if args.test_launcher_filter_file:
        target.PutFile(args.test_launcher_filter_file,
                       TEST_FILTER_PATH,
                       for_package=args.package_name,
                       for_realms=test_realms)
        child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)

      test_server = None
      if args.enable_test_server:
        assert test_concurrency
        test_server = SetupTestServer(target, test_concurrency,
                                      args.package_name, test_realms)

      run_package_args = RunTestPackageArgs.FromCommonArgs(args)
      if args.use_run_test_component:
        run_package_args.test_realm_label = TEST_REALM_NAME
        run_package_args.use_run_test_component = True
      returncode = RunTestPackage(args.out_dir, target, args.package,
                                  args.package_name, child_args,
                                  run_package_args)

      if test_server:
        test_server.Stop()

      if args.code_coverage:
        # Copy all the files in the profile directory. /* is used instead
        # of recursively copying due to permission issues for the latter.
        target.GetFile(TEST_LLVM_PROFILE_PATH + '/*', args.code_coverage_dir)

      if args.test_launcher_summary_output:
        target.GetFile(TEST_RESULT_PATH,
                       args.test_launcher_summary_output,
                       for_package=args.package_name,
                       for_realms=test_realms)

      if args.isolated_script_test_output:
        target.GetFile(TEST_RESULT_PATH,
                       args.isolated_script_test_output,
                       for_package=args.package_name,
                       for_realms=test_realms)

      if args.isolated_script_test_perf_output:
        target.GetFile(TEST_PERF_RESULT_PATH,
                       args.isolated_script_test_perf_output,
                       for_package=args.package_name,
                       for_realms=test_realms)

      return returncode

  except:
    return HandleExceptionAndReturnExitCode()
예제 #5
0
def RunTestPackage(output_dir, target, package_paths, package_name,
                   package_args, args):
    """Installs the Fuchsia package at |package_path| on the target,
  executes it with |package_args|, and symbolizes its output.

  output_dir: The path containing the build output files.
  target: The deployment Target object that will run the package.
  package_paths: The paths to the .far packages to be installed.
  package_name: The name of the primary package to run.
  package_args: The arguments which will be passed to the Fuchsia process.
  args: RunTestPackageArgs instance configuring how the package will be run.

  Returns the exit code of the remote package process."""

    system_logger = (_AttachKernelLogReader(target)
                     if args.system_logging else None)
    try:
        if system_logger:
            # Spin up a thread to asynchronously dump the system log to stdout
            # for easier diagnoses of early, pre-execution failures.
            log_output_quit_event = multiprocessing.Event()
            log_output_thread = threading.Thread(
                target=lambda: _DrainStreamToStdout(system_logger.stdout,
                                                    log_output_quit_event))
            log_output_thread.daemon = True
            log_output_thread.start()

        with target.GetAmberRepo():
            target.InstallPackage(package_paths)

            if system_logger:
                log_output_quit_event.set()
                log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)

            logging.info('Running application.')
            if args.use_run_test_component:
                command = ['run-test-component']
                if args.test_realm_label:
                    command += ['--realm-label=%s' % args.test_realm_label]
            # TODO(crbug.com/1156768): Deprecate runtests.
            elif args.code_coverage:
                # runtests requires specifying an output directory.
                command = ['runtests', '-o', '/tmp']
            else:
                command = ['run']
            command += [_GetComponentUri(package_name)] + package_args

            process = target.RunCommandPiped(command,
                                             stdin=open(os.devnull, 'r'),
                                             stdout=subprocess.PIPE,
                                             stderr=subprocess.STDOUT)

            if system_logger:
                output_stream = MergedInputStream(
                    [process.stdout, system_logger.stdout]).Start()
            else:
                output_stream = process.stdout

            # Run the log data through the symbolizer process.
            output_stream = SymbolizerFilter(output_stream,
                                             BuildIdsPaths(package_paths))

            for next_line in output_stream:
                print(next_line.rstrip())

            process.wait()
            if process.returncode == 0:
                logging.info('Process exited normally with status code 0.')
            else:
                # The test runner returns an error status code if *any* tests fail,
                # so we should proceed anyway.
                logging.warning('Process exited with status code %d.' %
                                process.returncode)

    finally:
        if system_logger:
            logging.info('Terminating kernel log reader.')
            log_output_quit_event.set()
            log_output_thread.join()
            system_logger.kill()

    return process.returncode
예제 #6
0
def RunTestPackage(target, ffx_session, package_paths, package_name,
                   package_component_version, package_args, args):
  """Installs the Fuchsia package at |package_path| on the target,
  executes it with |package_args|, and symbolizes its output.

  target: The deployment Target object that will run the package.
  ffx_session: An FfxSession object if the test is to be run via ffx, or None.
  package_paths: The paths to the .far packages to be installed.
  package_name: The name of the primary package to run.
  package_component_version: The component version of the primary package to
    run ("1" or "2").
  package_args: The arguments which will be passed to the Fuchsia process.
  args: RunTestPackageArgs instance configuring how the package will be run.

  Returns the exit code of the remote package process."""

  kernel_logger = _AttachKernelLogReader(target)
  try:
    # Spin up a thread to asynchronously dump the system log to stdout
    # for easier diagnoses of early, pre-execution failures.
    log_output_quit_event = multiprocessing.Event()
    log_output_thread = threading.Thread(target=lambda: _DrainStreamToStdout(
        kernel_logger.stdout, log_output_quit_event))
    log_output_thread.daemon = True
    log_output_thread.start()

    with ExitOnSigTerm(), target.GetPkgRepo():
      on_target = True
      start_time = time.time()
      target.InstallPackage(package_paths)
      logging.info('Test installed in {:.2f} seconds.'.format(time.time() -
                                                              start_time))

      log_output_quit_event.set()
      log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)

      logging.info('Running application.')

      component_uri = _GetComponentUri(package_name, package_component_version)
      process = None
      if ffx_session:
        process = ffx_session.test_run(target.GetFfxTarget(), component_uri,
                                       package_args)
      elif args.code_coverage:
        # TODO(crbug.com/1156768): Deprecate runtests.
        # runtests requires specifying an output directory and a double dash
        # before the argument list.
        command = ['runtests', '-o', '/tmp', component_uri]
        if args.test_realm_label:
          command += ['--realm-label', args.test_realm_label]
        command += ['--']
        command.extend(package_args)
      elif args.use_run_test_component:
        command = ['run-test-component']
        if args.test_realm_label:
          command += ['--realm-label=%s' % args.test_realm_label]
        command.append(component_uri)
        command.append('--')
        command.extend(package_args)
      else:
        command = ['run', component_uri]
        command.extend(package_args)

      if process is None:
        process = target.RunCommandPiped(command,
                                         stdin=open(os.devnull, 'r'),
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.STDOUT)

      # Symbolize klog and systemlog as separate streams. The symbolizer
      # protocol is stateful, so comingled raw stack dumps can yield
      # unsymbolizable garbage data.
      ids_txt_paths = BuildIdsPaths(package_paths)
      with _SymbolizeStream(process.stdout, ids_txt_paths) as \
              symbolized_stdout, \
           _SymbolizeStream(kernel_logger.stdout, ids_txt_paths) as \
               symbolized_klog:
        output_stream = MergedInputStream([symbolized_stdout.stdout,
                                           symbolized_klog.stdout]).Start()
        for next_line in output_stream:
          print(next_line.rstrip())
        symbolized_stdout.wait()  # Should return instantly.
        symbolized_klog.kill()    # klog is never-ending and must be killed.

      process.wait()
      if process.returncode == 0:
        logging.info('Process exited normally with status code 0.')
      else:
        # The test runner returns an error status code if *any* tests fail,
        # so we should proceed anyway.
        logging.warning('Process exited with status code %d.' %
                        process.returncode)

  finally:
    logging.info('Terminating kernel log reader.')
    log_output_quit_event.set()
    log_output_thread.join()
    kernel_logger.kill()

  return process.returncode
예제 #7
0
def RunTestPackage(output_dir, target, package_paths, package_name,
                   package_args, args):
    """Installs the Fuchsia package at |package_path| on the target,
  executes it with |package_args|, and symbolizes its output.

  output_dir: The path containing the build output files.
  target: The deployment Target object that will run the package.
  package_paths: The paths to the .far packages to be installed.
  package_name: The name of the primary package to run.
  package_args: The arguments which will be passed to the Fuchsia process.
  args: RunTestPackageArgs instance configuring how the package will be run.

  Returns the exit code of the remote package process."""

    with target.GetPkgRepo():
        start_time = time.time()
        target.InstallPackage(package_paths)
        logging.info('Test installed in {:.2f} seconds.'.format(time.time() -
                                                                start_time))

        # TODO(crbug.com/1156768): Deprecate runtests.
        if args.code_coverage:
            # runtests requires specifying an output directory and a double dash
            # before the argument list.
            command = [
                'runtests', '-o', '/tmp',
                _GetComponentUri(package_name)
            ]
            if args.test_realm_label:
                command += ['--realm-label', args.test_realm_label]
            command += ['--']
        elif args.use_run_test_component:
            command = ['run-test-component']
            if args.test_realm_label:
                command += ['--realm-label=%s' % args.test_realm_label]
            command.append(_GetComponentUri(package_name))
            command.append('--')
        else:
            command = ['run', _GetComponentUri(package_name)]

        command.extend(package_args)

        process = target.RunCommandPiped(command,
                                         stdin=open(os.devnull, 'r'),
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.STDOUT,
                                         text=True)

        # Print the test process' symbolized standard output.
        for next_line in SymbolizerFilter(process.stdout,
                                          BuildIdsPaths(package_paths)):
            print(next_line.rstrip())

        process.wait()
        if process.returncode == 0:
            logging.info('Process exited normally with status code 0.')
        else:
            # The test runner returns an error status code if *any* tests fail,
            # so we should proceed anyway.
            logging.warning('Process exited with status code %d.' %
                            process.returncode)

    return process.returncode