Beispiel #1
0
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local.variants import ALL_VARIANTS
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context


# Base dir of the v8 checkout to be used as cwd.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

DEFAULT_OUT_GN = "out.gn"

ARCH_GUESS = utils.DefaultArch()

# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
  # This needs to stay in sync with test/bot_default.isolate.
  "bot_default": [
    "debugger",
    "mjsunit",
    "cctest",
    "inspector",
    "webkit",
    "fuzzer",
    "message",
    "preparser",
Beispiel #2
0
def Main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--arch',
                        help='The architecture to run tests for. Pass "auto" '
                        'to auto-detect.',
                        default='x64',
                        choices=SUPPORTED_ARCHS + ['auto'])
    parser.add_argument(
        '--buildbot',
        help='Adapt to path structure used on buildbots and adds '
        'timestamps/level to all logged status messages',
        default=False,
        action='store_true')
    parser.add_argument('-d',
                        '--device',
                        help='The device ID to run Android tests on. If not '
                        'given it will be autodetected.')
    parser.add_argument('--extra-flags',
                        help='Additional flags to pass to the test executable',
                        default='')
    parser.add_argument('--json-test-results',
                        help='Path to a file for storing json results.')
    parser.add_argument(
        '--json-test-results-secondary',
        help='Path to a file for storing json results from run '
        'without patch or for reference build run.')
    parser.add_argument('--outdir',
                        help='Base directory with compile output',
                        default='out')
    parser.add_argument(
        '--outdir-secondary',
        help='Base directory with compile output without patch '
        'or for reference build')
    parser.add_argument('--binary-override-path',
                        help='JavaScript engine binary. By default, d8 under '
                        'architecture-specific build dir. '
                        'Not supported in conjunction with outdir-secondary.')
    parser.add_argument('--prioritize',
                        help='Raise the priority to nice -20 for the '
                        'benchmarking process.Requires Linux, schedtool, and '
                        'sudo privileges.',
                        default=False,
                        action='store_true')
    parser.add_argument(
        '--affinitize',
        help='Run benchmarking process on the specified core. '
        'For example: --affinitize=0 will run the benchmark '
        'process on core 0. --affinitize=3 will run the '
        'benchmark process on core 3. Requires Linux, schedtool, '
        'and sudo privileges.',
        default=None)
    parser.add_argument(
        '--noaslr',
        help='Disable ASLR for the duration of the benchmarked '
        'process. Requires Linux and sudo privileges.',
        default=False,
        action='store_true')
    parser.add_argument(
        '--cpu-governor',
        help='Set cpu governor to specified policy for the '
        'duration of the benchmarked process. Typical options: '
        '"powersave" for more stable results, or "performance" '
        'for shorter completion time of suite, with potentially '
        'more noise in results.')
    parser.add_argument(
        '--filter',
        help='Only run the benchmarks beginning with this '
        'string. For example: '
        '--filter=JSTests/TypedArrays/ will run only TypedArray '
        'benchmarks from the JSTests suite.',
        default='')
    parser.add_argument(
        '--confidence-level',
        type=float,
        help='Repeatedly runs each benchmark until specified '
        'confidence level is reached. The value is interpreted '
        'as the number of standard deviations from the mean that '
        'all values must lie within. Typical values are 1, 2 and '
        '3 and correspond to 68%%, 95%% and 99.7%% probability '
        'that the measured value is within 0.1%% of the true '
        'value. Larger values result in more retries and thus '
        'longer runtime, but also provide more reliable results. '
        'Also see --max-total-duration flag.')
    parser.add_argument(
        '--max-total-duration',
        type=int,
        default=7140,  # 1h 59m
        help='Max total duration in seconds allowed for retries '
        'across all tests. This is especially useful in '
        'combination with the --confidence-level flag.')
    parser.add_argument(
        '--dump-logcats-to',
        help='Writes logcat output from each test into specified '
        'directory. Only supported for android targets.')
    parser.add_argument('--run-count',
                        type=int,
                        default=0,
                        help='Override the run count specified by the test '
                        'suite. The default 0 uses the suite\'s config.')
    parser.add_argument('-v',
                        '--verbose',
                        default=False,
                        action='store_true',
                        help='Be verbose and print debug output.')
    parser.add_argument('suite',
                        nargs='+',
                        help='Path to the suite config file.')

    try:
        args = parser.parse_args(argv)
    except SystemExit:
        return INFRA_FAILURE_RETCODE

    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO,
                        format='%(asctime)s %(levelname)-8s  %(message)s')

    if args.arch == 'auto':  # pragma: no cover
        args.arch = utils.DefaultArch()
        if args.arch not in SUPPORTED_ARCHS:
            logging.error('Auto-detected architecture "%s" is not supported.',
                          args.arch)
            return INFRA_FAILURE_RETCODE

    if (args.json_test_results_secondary
            and not args.outdir_secondary):  # pragma: no cover
        logging.error(
            'For writing secondary json test results, a secondary outdir '
            'patch must be specified.')
        return INFRA_FAILURE_RETCODE

    workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))

    if args.buildbot:
        build_config = 'Release'
    else:
        build_config = '%s.release' % args.arch

    if args.binary_override_path == None:
        args.shell_dir = os.path.join(workspace, args.outdir, build_config)
        default_binary_name = 'd8'
    else:
        if not os.path.isfile(args.binary_override_path):
            logging.error('binary-override-path must be a file name')
            return INFRA_FAILURE_RETCODE
        if args.outdir_secondary:
            logging.error(
                'specify either binary-override-path or outdir-secondary')
            return INFRA_FAILURE_RETCODE
        args.shell_dir = os.path.abspath(
            os.path.dirname(args.binary_override_path))
        default_binary_name = os.path.basename(args.binary_override_path)

    if args.outdir_secondary:
        args.shell_dir_secondary = os.path.join(workspace,
                                                args.outdir_secondary,
                                                build_config)
    else:
        args.shell_dir_secondary = None

    if args.json_test_results:
        args.json_test_results = os.path.abspath(args.json_test_results)

    if args.json_test_results_secondary:
        args.json_test_results_secondary = os.path.abspath(
            args.json_test_results_secondary)

    # Ensure all arguments have absolute path before we start changing current
    # directory.
    args.suite = map(os.path.abspath, args.suite)

    prev_aslr = None
    prev_cpu_gov = None
    platform = Platform.GetPlatform(args)

    result_tracker = ResultTracker()
    result_tracker_secondary = ResultTracker()
    have_failed_tests = False
    with CustomMachineConfiguration(governor=args.cpu_governor,
                                    disable_aslr=args.noaslr) as conf:
        for path in args.suite:
            if not os.path.exists(path):  # pragma: no cover
                result_tracker.AddError(
                    'Configuration file %s does not exist.' % path)
                continue

            with open(path) as f:
                suite = json.loads(f.read())

            # If no name is given, default to the file name without .json.
            suite.setdefault('name',
                             os.path.splitext(os.path.basename(path))[0])

            # Setup things common to one test suite.
            platform.PreExecution()

            # Build the graph/trace tree structure.
            default_parent = DefaultSentinel(default_binary_name)
            root = BuildGraphConfigs(suite, args.arch, default_parent)

            # Callback to be called on each node on traversal.
            def NodeCB(node):
                platform.PreTests(node, path)

            # Traverse graph/trace tree and iterate over all runnables.
            start = time.time()
            try:
                for runnable in FlattenRunnables(root, NodeCB):
                    runnable_name = '/'.join(runnable.graphs)
                    if (not runnable_name.startswith(args.filter)
                            and runnable_name + '/' != args.filter):
                        continue
                    logging.info('>>> Running suite: %s', runnable_name)

                    def RunGenerator(runnable):
                        if args.confidence_level:
                            counter = 0
                            while not result_tracker.HasEnoughRuns(
                                    runnable, args.confidence_level):
                                yield counter
                                counter += 1
                        else:
                            for i in range(
                                    0,
                                    max(1, args.run_count
                                        or runnable.run_count)):
                                yield i

                    for i in RunGenerator(runnable):
                        attempts_left = runnable.retry_count + 1
                        while attempts_left:
                            total_duration = time.time() - start
                            if total_duration > args.max_total_duration:
                                logging.info(
                                    '>>> Stopping now since running for too long (%ds > %ds)',
                                    total_duration, args.max_total_duration)
                                raise MaxTotalDurationReachedError()

                            output, output_secondary = platform.Run(
                                runnable,
                                i,
                                secondary=args.shell_dir_secondary)
                            result_tracker.AddRunnableDuration(
                                runnable, output.duration)
                            result_tracker_secondary.AddRunnableDuration(
                                runnable, output_secondary.duration)

                            if output.IsSuccess(
                            ) and output_secondary.IsSuccess():
                                runnable.ProcessOutput(output, result_tracker,
                                                       i)
                                if output_secondary is not NULL_OUTPUT:
                                    runnable.ProcessOutput(
                                        output_secondary,
                                        result_tracker_secondary, i)
                                break

                            attempts_left -= 1
                            if not attempts_left:
                                logging.info(
                                    '>>> Suite %s failed after %d retries',
                                    runnable_name, runnable.retry_count + 1)
                                have_failed_tests = True
                            else:
                                logging.info('>>> Retrying suite: %s',
                                             runnable_name)
            except MaxTotalDurationReachedError:
                have_failed_tests = True

            platform.PostExecution()

        if args.json_test_results:
            result_tracker.WriteToFile(args.json_test_results)
        else:  # pragma: no cover
            print('Primary results:', result_tracker)

    if args.shell_dir_secondary:
        if args.json_test_results_secondary:
            result_tracker_secondary.WriteToFile(
                args.json_test_results_secondary)
        else:  # pragma: no cover
            print('Secondary results:', result_tracker_secondary)

    if (result_tracker.errors or result_tracker_secondary.errors
            or have_failed_tests):
        return 1

    return 0
Beispiel #3
0
def Main(argv):
  parser = argparse.ArgumentParser()
  parser.add_argument('--arch',
                      help='The architecture to run tests for. Pass "auto" '
                      'to auto-detect.', default='x64',
                      choices=SUPPORTED_ARCHS + ['auto'])
  parser.add_argument('--buildbot',
                      help='Adapt to path structure used on buildbots and adds '
                      'timestamps/level to all logged status messages',
                      default=False, action='store_true')
  parser.add_argument('-d', '--device',
                      help='The device ID to run Android tests on. If not '
                      'given it will be autodetected.')
  parser.add_argument('--extra-flags',
                      help='Additional flags to pass to the test executable',
                      default='')
  parser.add_argument('--json-test-results',
                      help='Path to a file for storing json results.')
  parser.add_argument('--json-test-results-secondary',
                      help='Path to a file for storing json results from run '
                      'without patch or for reference build run.')
  parser.add_argument('--outdir', help='Base directory with compile output',
                      default='out')
  parser.add_argument('--outdir-secondary',
                      help='Base directory with compile output without patch '
                      'or for reference build')
  parser.add_argument('--binary-override-path',
                      help='JavaScript engine binary. By default, d8 under '
                      'architecture-specific build dir. '
                      'Not supported in conjunction with outdir-secondary.')
  parser.add_argument('--prioritize',
                      help='Raise the priority to nice -20 for the '
                      'benchmarking process.Requires Linux, schedtool, and '
                      'sudo privileges.', default=False, action='store_true')
  parser.add_argument('--affinitize',
                      help='Run benchmarking process on the specified core. '
                      'For example: --affinitize=0 will run the benchmark '
                      'process on core 0. --affinitize=3 will run the '
                      'benchmark process on core 3. Requires Linux, schedtool, '
                      'and sudo privileges.', default=None)
  parser.add_argument('--noaslr',
                      help='Disable ASLR for the duration of the benchmarked '
                      'process. Requires Linux and sudo privileges.',
                      default=False, action='store_true')
  parser.add_argument('--cpu-governor',
                      help='Set cpu governor to specified policy for the '
                      'duration of the benchmarked process. Typical options: '
                      '"powersave" for more stable results, or "performance" '
                      'for shorter completion time of suite, with potentially '
                      'more noise in results.')
  parser.add_argument('--filter',
                      help='Only run the benchmarks beginning with this '
                      'string. For example: '
                      '--filter=JSTests/TypedArrays/ will run only TypedArray '
                      'benchmarks from the JSTests suite.',
                      default='')
  parser.add_argument('--dump-logcats-to',
                      help='Writes logcat output from each test into specified '
                      'directory. Only supported for android targets.')
  parser.add_argument("--run-count", type=int, default=0,
                      help="Override the run count specified by the test "
                           "suite. The default 0 uses the suite's config.")
  parser.add_argument('suite', nargs='+', help='Path to the suite config file.')

  try:
    args = parser.parse_args(argv)
  except SystemExit:
    return INFRA_FAILURE_RETCODE

  logging.basicConfig(
      level=logging.INFO, format='%(asctime)s %(levelname)-8s  %(message)s')

  if args.arch == 'auto':  # pragma: no cover
    args.arch = utils.DefaultArch()
    if args.arch not in SUPPORTED_ARCHS:
      logging.error(
          'Auto-detected architecture "%s" is not supported.', args.arch)
      return INFRA_FAILURE_RETCODE

  if (args.json_test_results_secondary and
      not args.outdir_secondary):  # pragma: no cover
    logging.error('For writing secondary json test results, a secondary outdir '
                  'patch must be specified.')
    return INFRA_FAILURE_RETCODE

  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))

  if args.buildbot:
    build_config = 'Release'
  else:
    build_config = '%s.release' % args.arch

  if args.binary_override_path == None:
    args.shell_dir = os.path.join(workspace, args.outdir, build_config)
    default_binary_name = 'd8'
  else:
    if not os.path.isfile(args.binary_override_path):
      logging.error('binary-override-path must be a file name')
      return INFRA_FAILURE_RETCODE
    if args.outdir_secondary:
      logging.error('specify either binary-override-path or outdir-secondary')
      return INFRA_FAILURE_RETCODE
    args.shell_dir = os.path.abspath(
        os.path.dirname(args.binary_override_path))
    default_binary_name = os.path.basename(args.binary_override_path)

  if args.outdir_secondary:
    args.shell_dir_secondary = os.path.join(
        workspace, args.outdir_secondary, build_config)
  else:
    args.shell_dir_secondary = None

  if args.json_test_results:
    args.json_test_results = os.path.abspath(args.json_test_results)

  if args.json_test_results_secondary:
    args.json_test_results_secondary = os.path.abspath(
        args.json_test_results_secondary)

  # Ensure all arguments have absolute path before we start changing current
  # directory.
  args.suite = map(os.path.abspath, args.suite)

  prev_aslr = None
  prev_cpu_gov = None
  platform = Platform.GetPlatform(args)

  results = Results()
  results_secondary = Results()
  # We use list here to allow modification in nested function below.
  have_failed_tests = [False]
  with CustomMachineConfiguration(governor = args.cpu_governor,
                                  disable_aslr = args.noaslr) as conf:
    for path in args.suite:
      if not os.path.exists(path):  # pragma: no cover
        results.errors.append('Configuration file %s does not exist.' % path)
        continue

      with open(path) as f:
        suite = json.loads(f.read())

      # If no name is given, default to the file name without .json.
      suite.setdefault('name', os.path.splitext(os.path.basename(path))[0])

      # Setup things common to one test suite.
      platform.PreExecution()

      # Build the graph/trace tree structure.
      default_parent = DefaultSentinel(default_binary_name)
      root = BuildGraphConfigs(suite, args.arch, default_parent)

      # Callback to be called on each node on traversal.
      def NodeCB(node):
        platform.PreTests(node, path)

      # Traverse graph/trace tree and iterate over all runnables.
      for runnable in FlattenRunnables(root, NodeCB):
        runnable_name = '/'.join(runnable.graphs)
        if (not runnable_name.startswith(args.filter) and
            runnable_name + '/' != args.filter):
          continue
        logging.info('>>> Running suite: %s', runnable_name)
        durations = []
        durations_secondary = []

        def Runner():
          """Output generator that reruns several times."""
          for i in range(0, max(1, args.run_count or runnable.run_count)):
            attempts_left = runnable.retry_count + 1
            while attempts_left:
              output, output_secondary = platform.Run(runnable, i)
              if output.IsSuccess() and output_secondary.IsSuccess():
                durations.append(output.duration)
                if output_secondary is not NULL_OUTPUT:
                  durations_secondary.append(output_secondary.duration)
                yield output, output_secondary
                break
              attempts_left -= 1
              if not attempts_left:  # ignore failures until last attempt
                have_failed_tests[0] = True
              else:
                logging.info('>>> Retrying suite: %s', runnable_name)

        # Let runnable iterate over all runs and handle output.
        result, result_secondary = runnable.Run(
          Runner, trybot=args.shell_dir_secondary)
        results += result
        results_secondary += result_secondary
        if runnable.has_timeouts:
          results.timeouts.append(runnable_name)
        if runnable.has_near_timeouts:
          results.near_timeouts.append(runnable_name)
        results.runnable_durations.append({
          'graphs': runnable.graphs,
          'durations': durations,
          'timeout': runnable.timeout,
        })
        if durations_secondary:
          results_secondary.runnable_durations.append({
            'graphs': runnable.graphs,
            'durations': durations_secondary,
            'timeout': runnable.timeout,
          })

      platform.PostExecution()

    if args.json_test_results:
      results.WriteToFile(args.json_test_results)
    else:  # pragma: no cover
      print(results)

  if args.json_test_results_secondary:
    results_secondary.WriteToFile(args.json_test_results_secondary)
  else:  # pragma: no cover
    print(results_secondary)

  if results.errors or have_failed_tests[0]:
    return 1

  return 0