Пример #1
0
    def _load_build_config(self, options):
        for outdir in self._possible_outdirs(options):
            try:
                self.build_config = self._do_load_build_config(
                    outdir, options.verbose)
            except TestRunnerError:
                pass

        if not self.build_config:  # pragma: no cover
            print('Failed to load build config')
            raise TestRunnerError

        print('Build found: %s' % self.outdir)
        if str(self.build_config):
            print('>>> Autodetected:')
            print(self.build_config)

        # Represents the OS where tests are run on. Same as host OS except for
        # Android, which is determined by build output.
        if self.build_config.is_android:
            self.target_os = 'android'
        else:
            self.target_os = utils.GuessOS()
Пример #2
0
    def _Run(self, runnable, count, secondary=False):
        suffix = ' - secondary' if secondary else ''
        shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
        title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
        cmd = runnable.GetCommand(self.command_prefix, shell_dir,
                                  self.extra_flags)
        try:
            output = cmd.execute()
        except OSError:  # pragma: no cover
            logging.exception(title % "OSError")
            raise

        logging.info(title % "Stdout" + "\n%s", output.stdout)
        if output.stderr:  # pragma: no cover
            # Print stderr for debugging.
            logging.info(title % "Stderr" + "\n%s", output.stderr)
        if output.timed_out:
            logging.warning(">>> Test timed out after %ss.", runnable.timeout)
            raise TestFailedError()
        if output.exit_code != 0:
            logging.warning(">>> Test crashed.")
            raise TestFailedError()
        if '--prof' in self.extra_flags:
            os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
            if os_prefix:
                tick_tools = os.path.join(TOOLS_BASE,
                                          "%s-tick-processor" % os_prefix)
                subprocess.check_call(tick_tools + " --only-summary",
                                      shell=True)
            else:  # pragma: no cover
                logging.warning(
                    "Profiler option currently supported on Linux and Mac OS.")

        # time outputs to stderr
        if runnable.process_size:
            return output.stdout + output.stderr
        return output.stdout
Пример #3
0
def Execute(arch, mode, args, options, suites):
  print(">>> Running tests for %s.%s" % (arch, mode))

  shell_dir = options.shell_dir
  if not shell_dir:
    if options.auto_detect:
      # If an output dir with a build was passed, test directly in that
      # directory.
      shell_dir = os.path.join(BASE_DIR, options.outdir)
    elif options.buildbot:
      # TODO(machenbach): Get rid of different output folder location on
      # buildbot. Currently this is capitalized Release and Debug.
      shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
      mode = BuildbotToV8Mode(mode)
    else:
      shell_dir = os.path.join(
          BASE_DIR,
          options.outdir,
          "%s.%s" % (arch, MODES[mode]["output_folder"]),
      )
  if not os.path.exists(shell_dir):
      raise Exception('Could not find shell_dir: "%s"' % shell_dir)

  # Populate context object.
  mode_flags = MODES[mode]["flags"]

  # Simulators are slow, therefore allow a longer timeout.
  if arch in SLOW_ARCHS:
    options.timeout *= 2

  options.timeout *= MODES[mode]["timeout_scalefactor"]

  if options.predictable:
    # Predictable mode is slower.
    options.timeout *= 2

  ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
                        mode_flags, options.verbose,
                        options.timeout,
                        options.isolates,
                        options.command_prefix,
                        options.extra_flags,
                        options.no_i18n,
                        options.random_seed,
                        options.no_sorting,
                        options.rerun_failures_count,
                        options.rerun_failures_max,
                        options.predictable,
                        options.no_harness,
                        use_perf_data=not options.swarming,
                        sancov_dir=options.sancov_dir)

  # TODO(all): Combine "simulator" and "simulator_run".
  # TODO(machenbach): In GN we can derive simulator run from
  # target_arch != v8_target_arch in the dumped build config.
  simulator_run = not options.dont_skip_simulator_slow_tests and \
      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
               'ppc', 'ppc64'] and \
      ARCH_GUESS and arch != ARCH_GUESS
  # Find available test suites and read test cases from them.
  variables = {
    "arch": arch,
    "asan": options.asan,
    "deopt_fuzzer": False,
    "gc_stress": options.gc_stress,
    "gcov_coverage": options.gcov_coverage,
    "isolates": options.isolates,
    "mode": MODES[mode]["status_mode"],
    "no_i18n": options.no_i18n,
    "no_snap": options.no_snap,
    "simulator_run": simulator_run,
    "simulator": utils.UseSimulator(arch),
    "system": utils.GuessOS(),
    "tsan": options.tsan,
    "msan": options.msan,
    "dcheck_always_on": options.dcheck_always_on,
    "novfp3": options.novfp3,
    "predictable": options.predictable,
    "byteorder": sys.byteorder,
  }
  all_tests = []
  num_tests = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
    all_tests += s.tests

    # First filtering by status applying the generic rules (independent of
    # variants).
    s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
                              options.pass_fail_tests)

    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
    variant_gen = s.CreateVariantGenerator(VARIANTS)
    variant_tests = [ t.CopyAddingFlags(v, flags)
                      for t in s.tests
                      for v in variant_gen.FilterVariantsByTest(t)
                      for flags in variant_gen.GetFlagSets(t, v) ]

    if options.random_seed_stress_count > 1:
      # Duplicate test for random seed stress mode.
      def iter_seed_flags():
        for i in range(0, options.random_seed_stress_count):
          # Use given random seed for all runs (set by default in execution.py)
          # or a new random seed if none is specified.
          if options.random_seed:
            yield []
          else:
            yield ["--random-seed=%d" % RandomSeed()]
      s.tests = [
        t.CopyAddingFlags(t.variant, flags)
        for t in variant_tests
        for flags in iter_seed_flags()
      ]
    else:
      s.tests = variant_tests

    # Second filtering by status applying the variant-dependent rules.
    s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
                              options.pass_fail_tests, variants=True)

    s.tests = ShardTests(s.tests, options)
    num_tests += len(s.tests)

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  # Run the tests, either locally or distributed on the network.
  start_time = time.time()
  progress_indicator = progress.IndicatorNotifier()
  progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]())
  if options.junitout:
    progress_indicator.Register(progress.JUnitTestProgressIndicator(
        options.junitout, options.junittestsuite))
  if options.json_test_results:
    progress_indicator.Register(progress.JsonTestProgressIndicator(
        options.json_test_results, arch, MODES[mode]["execution_mode"],
        ctx.random_seed))
  if options.flakiness_results:
    progress_indicator.Register(progress.FlakinessTestProgressIndicator(
        options.flakiness_results))

  run_networked = not options.no_network
  if not run_networked:
    if options.verbose:
      print("Network distribution disabled, running tests locally.")
  elif utils.GuessOS() != "linux":
    print("Network distribution is only supported on Linux, sorry!")
    run_networked = False
  peers = []
  if run_networked:
    peers = network_execution.GetPeers()
    if not peers:
      print("No connection to distribution server; running tests locally.")
      run_networked = False
    elif len(peers) == 1:
      print("No other peers on the network; running tests locally.")
      run_networked = False
    elif num_tests <= 100:
      print("Less than 100 tests, running them locally.")
      run_networked = False

  if run_networked:
    runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                               ctx, peers, BASE_DIR)
  else:
    runner = execution.Runner(suites, progress_indicator, ctx)

  exit_code = runner.Run(options.j)
  overall_duration = time.time() - start_time

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)

  if num_tests == 0:
    print("Warning: no tests were run!")

  if exit_code == 1 and options.json_test_results:
    print("Force exit code 0 after failures. Json test results file generated "
          "with failure information.")
    exit_code = 0

  if options.sancov_dir:
    # If tests ran with sanitizer coverage, merge coverage files in the end.
    try:
      print "Merging sancov files."
      subprocess.check_call([
        sys.executable,
        join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
        "--coverage-dir=%s" % options.sancov_dir])
    except:
      print >> sys.stderr, "Error: Merging sancov files failed."
      exit_code = 1

  return exit_code
Пример #4
0
def BuildOptions():
  result = optparse.OptionParser()
  result.usage = '%prog [options] [tests]'
  result.description = """TESTS: %s""" % (TEST_MAP["default"])
  result.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect: %s" % SUPPORTED_ARCHS),
                    default="ia32,x64,arm")
  result.add_option("--arch-and-mode",
                    help="Architecture and mode in the format 'arch.mode'",
                    default=None)
  result.add_option("--asan",
                    help="Regard test expectations for ASAN",
                    default=False, action="store_true")
  result.add_option("--sancov-dir",
                    help="Directory where to collect coverage data")
  result.add_option("--cfi-vptr",
                    help="Run tests with UBSAN cfi_vptr option.",
                    default=False, action="store_true")
  result.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
  result.add_option("--dcheck-always-on",
                    help="Indicates that V8 was compiled with DCHECKs enabled",
                    default=False, action="store_true")
  result.add_option("--novfp3",
                    help="Indicates that V8 was compiled without VFP3 support",
                    default=False, action="store_true")
  result.add_option("--cat", help="Print the source of the tests",
                    default=False, action="store_true")
  result.add_option("--slow-tests",
                    help="Regard slow tests (run|skip|dontcare)",
                    default="dontcare")
  result.add_option("--pass-fail-tests",
                    help="Regard pass|fail tests (run|skip|dontcare)",
                    default="dontcare")
  result.add_option("--gc-stress",
                    help="Switch on GC stress mode",
                    default=False, action="store_true")
  result.add_option("--gcov-coverage",
                    help="Uses executables instrumented for gcov coverage",
                    default=False, action="store_true")
  result.add_option("--command-prefix",
                    help="Prepended to each shell command used to run a test",
                    default="")
  result.add_option("--download-data", help="Download missing test suite data",
                    default=False, action="store_true")
  result.add_option("--download-data-only",
                    help="Deprecated",
                    default=False, action="store_true")
  result.add_option("--extra-flags",
                    help="Additional flags to pass to each test command",
                    default="")
  result.add_option("--isolates", help="Whether to test isolates",
                    default=False, action="store_true")
  result.add_option("-j", help="The number of parallel tasks to run",
                    default=0, type="int")
  result.add_option("-m", "--mode",
                    help="The test modes in which to run (comma-separated,"
                    " uppercase for ninja and buildbot builds): %s" % MODES.keys(),
                    default="release,debug")
  result.add_option("--no-harness", "--noharness",
                    help="Run without test harness of a given suite",
                    default=False, action="store_true")
  result.add_option("--no-i18n", "--noi18n",
                    help="Skip internationalization tests",
                    default=False, action="store_true")
  result.add_option("--no-network", "--nonetwork",
                    help="Don't distribute tests on the network",
                    default=(utils.GuessOS() != "linux"),
                    dest="no_network", action="store_true")
  result.add_option("--no-presubmit", "--nopresubmit",
                    help='Skip presubmit checks (deprecated)',
                    default=False, dest="no_presubmit", action="store_true")
  result.add_option("--no-snap", "--nosnap",
                    help='Test a build compiled without snapshot.',
                    default=False, dest="no_snap", action="store_true")
  result.add_option("--no-sorting", "--nosorting",
                    help="Don't sort tests according to duration of last run.",
                    default=False, dest="no_sorting", action="store_true")
  result.add_option("--no-variants", "--novariants",
                    help="Don't run any testing variants",
                    default=False, dest="no_variants", action="store_true")
  result.add_option("--variants",
                    help="Comma-separated list of testing variants;"
                    " default: \"%s\"" % ",".join(VARIANTS))
  result.add_option("--exhaustive-variants",
                    default=False, action="store_true",
                    help="Use exhaustive set of default variants:"
                    " \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS))
  result.add_option("--outdir", help="Base directory with compile output",
                    default="out")
  result.add_option("--gn", help="Scan out.gn for the last built configuration",
                    default=False, action="store_true")
  result.add_option("--predictable",
                    help="Compare output of several reruns of each test",
                    default=False, action="store_true")
  result.add_option("-p", "--progress",
                    help=("The style of progress indicator"
                          " (verbose, dots, color, mono)"),
                    choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
  result.add_option("--quickcheck", default=False, action="store_true",
                    help=("Quick check mode (skip slow tests)"))
  result.add_option("--report", help="Print a summary of the tests to be run",
                    default=False, action="store_true")
  result.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
  result.add_option("--flakiness-results",
                    help="Path to a file for storing flakiness json.")
  result.add_option("--rerun-failures-count",
                    help=("Number of times to rerun each failing test case. "
                          "Very slow tests will be rerun only once."),
                    default=0, type="int")
  result.add_option("--rerun-failures-max",
                    help="Maximum number of failing test cases to rerun.",
                    default=100, type="int")
  result.add_option("--shard-count",
                    help="Split testsuites into this number of shards",
                    default=1, type="int")
  result.add_option("--shard-run",
                    help="Run this shard from the split up tests.",
                    default=1, type="int")
  result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
  result.add_option("--shell-dir", help="Directory containing executables",
                    default="")
  result.add_option("--dont-skip-slow-simulator-tests",
                    help="Don't skip more slow tests when using a simulator.",
                    default=False, action="store_true",
                    dest="dont_skip_simulator_slow_tests")
  result.add_option("--swarming",
                    help="Indicates running test driver on swarming.",
                    default=False, action="store_true")
  result.add_option("--time", help="Print timing information after running",
                    default=False, action="store_true")
  result.add_option("-t", "--timeout", help="Timeout in seconds",
                    default=TIMEOUT_DEFAULT, type="int")
  result.add_option("--tsan",
                    help="Regard test expectations for TSAN",
                    default=False, action="store_true")
  result.add_option("-v", "--verbose", help="Verbose output",
                    default=False, action="store_true")
  result.add_option("--valgrind", help="Run tests through valgrind",
                    default=False, action="store_true")
  result.add_option("--warn-unused", help="Report unused rules",
                    default=False, action="store_true")
  result.add_option("--junitout", help="File name of the JUnit output")
  result.add_option("--junittestsuite",
                    help="The testsuite name in the JUnit output file",
                    default="v8tests")
  result.add_option("--random-seed", default=0, dest="random_seed", type="int",
                    help="Default seed for initializing random generator")
  result.add_option("--random-seed-stress-count", default=1, type="int",
                    dest="random_seed_stress_count",
                    help="Number of runs with different random seeds")
  result.add_option("--msan",
                    help="Regard test expectations for MSAN",
                    default=False, action="store_true")
  return result
Пример #5
0
    def _execute(self, args, options, suites):
      print(">>> Running tests for %s.%s" % (self.build_config.arch,
                                             self.mode_name))
      # Populate context object.

      # Simulators are slow, therefore allow a longer timeout.
      if self.build_config.arch in SLOW_ARCHS:
        options.timeout *= 2

      options.timeout *= self.mode_options.timeout_scalefactor

      if self.build_config.predictable:
        # Predictable mode is slower.
        options.timeout *= 2

      ctx = context.Context(self.build_config.arch,
                            self.mode_options.execution_mode,
                            self.outdir,
                            self.mode_options.flags,
                            options.verbose,
                            options.timeout,
                            options.isolates,
                            options.command_prefix,
                            options.extra_flags,
                            self.build_config.no_i18n,
                            options.random_seed,
                            options.no_sorting,
                            options.rerun_failures_count,
                            options.rerun_failures_max,
                            options.no_harness,
                            use_perf_data=not options.swarming,
                            sancov_dir=self.sancov_dir,
                            infra_staging=options.infra_staging)

      # TODO(all): Combine "simulator" and "simulator_run".
      # TODO(machenbach): In GN we can derive simulator run from
      # target_arch != v8_target_arch in the dumped build config.
      simulator_run = (
        not options.dont_skip_simulator_slow_tests and
        self.build_config.arch in [
          'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
          'ppc64', 's390', 's390x'] and
        bool(base_runner.ARCH_GUESS) and
        self.build_config.arch != base_runner.ARCH_GUESS)
      # Find available test suites and read test cases from them.
      variables = {
        "arch": self.build_config.arch,
        "asan": self.build_config.asan,
        "byteorder": sys.byteorder,
        "dcheck_always_on": self.build_config.dcheck_always_on,
        "deopt_fuzzer": False,
        "gc_fuzzer": False,
        "gc_stress": options.gc_stress or options.random_gc_stress,
        "gcov_coverage": self.build_config.gcov_coverage,
        "isolates": options.isolates,
        "mode": self.mode_options.status_mode,
        "msan": self.build_config.msan,
        "no_harness": options.no_harness,
        "no_i18n": self.build_config.no_i18n,
        "no_snap": self.build_config.no_snap,
        "novfp3": options.novfp3,
        "predictable": self.build_config.predictable,
        "simulator": utils.UseSimulator(self.build_config.arch),
        "simulator_run": simulator_run,
        "system": utils.GuessOS(),
        "tsan": self.build_config.tsan,
        "ubsan_vptr": self.build_config.ubsan_vptr,
      }

      progress_indicator = progress.IndicatorNotifier()
      progress_indicator.Register(
        progress.PROGRESS_INDICATORS[options.progress]())
      if options.junitout:  # pragma: no cover
        progress_indicator.Register(progress.JUnitTestProgressIndicator(
            options.junitout, options.junittestsuite))
      if options.json_test_results:
        progress_indicator.Register(progress.JsonTestProgressIndicator(
          options.json_test_results,
          self.build_config.arch,
          self.mode_options.execution_mode,
          ctx.random_seed))
      if options.flakiness_results:  # pragma: no cover
        progress_indicator.Register(progress.FlakinessTestProgressIndicator(
            options.flakiness_results))

      if options.infra_staging:
        for s in suites:
          s.ReadStatusFile(variables)
          s.ReadTestCases(ctx)

        return self._run_test_procs(suites, args, options, progress_indicator,
                                    ctx)

      all_tests = []
      num_tests = 0
      for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
          s.FilterTestCasesByArgs(args)
        all_tests += s.tests

        # First filtering by status applying the generic rules (tests without
        # variants)
        if options.warn_unused:
          tests = [(t.name, t.variant) for t in s.tests]
          s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
        s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)

        if options.cat:
          verbose.PrintTestSource(s.tests)
          continue
        variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
        variant_tests = [ t.create_variant(v, flags)
                          for t in s.tests
                          for v in variant_gen.FilterVariantsByTest(t)
                          for flags in variant_gen.GetFlagSets(t, v) ]

        if options.random_seed_stress_count > 1:
          # Duplicate test for random seed stress mode.
          def iter_seed_flags():
            for _ in range(0, options.random_seed_stress_count):
              # Use given random seed for all runs (set by default in
              # execution.py) or a new random seed if none is specified.
              if options.random_seed:
                yield []
              else:
                yield ["--random-seed=%d" % self._random_seed()]
          s.tests = [
            t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
            for t in variant_tests
            for n, flags in enumerate(iter_seed_flags())
          ]
        else:
          s.tests = variant_tests

        # Second filtering by status applying also the variant-dependent rules.
        if options.warn_unused:
          tests = [(t.name, t.variant) for t in s.tests]
          s.statusfile.warn_unused_rules(tests, check_variant_rules=True)

        s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
        s.tests = self._shard_tests(s.tests, options)

        for t in s.tests:
          t.cmd = t.get_command(ctx)

        num_tests += len(s.tests)

      if options.cat:
        return 0  # We're done here.

      if options.report:
        verbose.PrintReport(all_tests)

      # Run the tests.
      start_time = time.time()

      if self.build_config.predictable:
        outproc_factory = predictable.get_outproc
      else:
        outproc_factory = None

      runner = execution.Runner(suites, progress_indicator, ctx,
                                outproc_factory)
      exit_code = runner.Run(options.j)
      overall_duration = time.time() - start_time

      if options.time:
        verbose.PrintTestDurations(suites, runner.outputs, overall_duration)

      if num_tests == 0:
        exit_code = 3
        print("Warning: no tests were run!")

      if exit_code == 1 and options.json_test_results:
        print("Force exit code 0 after failures. Json test results file "
              "generated with failure information.")
        exit_code = 0

      if self.sancov_dir:
        # If tests ran with sanitizer coverage, merge coverage files in the end.
        try:
          print "Merging sancov files."
          subprocess.check_call([
            sys.executable,
            join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
            "--coverage-dir=%s" % self.sancov_dir])
        except:
          print >> sys.stderr, "Error: Merging sancov files failed."
          exit_code = 1

      return exit_code
Пример #6
0
def Execute(arch, mode, args, options, suites, workspace):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            # TODO(machenbach): Get rid of different output folder location on
            # buildbot. Currently this is capitalized Release and Debug.
            shell_dir = os.path.join(workspace, options.outdir, mode)
            mode = mode.lower()
        else:
            shell_dir = os.path.join(
                workspace,
                options.outdir,
                "%s.%s" % (arch, MODES[mode]["output_folder"]),
            )
    shell_dir = os.path.relpath(shell_dir)

    # Populate context object.
    mode_flags = MODES[mode]["flags"]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= MODES[mode]["timeout_scalefactor"]

    if options.predictable:
        # Predictable mode is slower.
        timeout *= 2

    # TODO(machenbach): Remove temporary verbose output on windows after
    # debugging driver-hung-up on XP.
    verbose_output = (options.verbose
                      or utils.IsWindows() and options.progress == "verbose")
    ctx = context.Context(
        arch, MODES[mode]["execution_mode"], shell_dir, mode_flags,
        verbose_output, timeout, options.isolates, options.command_prefix,
        options.extra_flags, options.no_i18n, options.random_seed,
        options.no_sorting, options.rerun_failures_count,
        options.rerun_failures_max, options.predictable, options.no_harness)

    # TODO(all): Combine "simulator" and "simulator_run".
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \
                 'ppc', 'ppc64'] and \
        ARCH_GUESS and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "isolates": options.isolates,
        "mode": MODES[mode]["status_mode"],
        "no_i18n": options.no_i18n,
        "no_snap": options.no_snap,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
        "tsan": options.tsan,
        "msan": options.msan,
        "dcheck_always_on": options.dcheck_always_on,
        "byteorder": sys.byteorder,
    }
    all_tests = []
    num_tests = 0
    test_id = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                                  options.slow_tests, options.pass_fail_tests)
        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
        s.tests = [
            t.CopyAddingFlags(v) for t in s.tests
            for v in s.VariantFlags(t, variant_flags)
        ]
        s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    if num_tests == 0:
        print "No tests to run."
        return 0

    # Run the tests, either locally or distributed on the network.
    start_time = time.time()
    progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
    if options.junitout:
        progress_indicator = progress.JUnitTestProgressIndicator(
            progress_indicator, options.junitout, options.junittestsuite)
    if options.json_test_results:
        progress_indicator = progress.JsonTestProgressIndicator(
            progress_indicator, options.json_test_results, arch,
            MODES[mode]["execution_mode"])

    run_networked = not options.no_network
    if not run_networked:
        print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
        print("Network distribution is only supported on Linux, sorry!")
        run_networked = False
    peers = []
    if run_networked:
        peers = network_execution.GetPeers()
        if not peers:
            print(
                "No connection to distribution server; running tests locally.")
            run_networked = False
        elif len(peers) == 1:
            print("No other peers on the network; running tests locally.")
            run_networked = False
        elif num_tests <= 100:
            print("Less than 100 tests, running them locally.")
            run_networked = False

    if run_networked:
        runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                   ctx, peers, workspace)
    else:
        runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    overall_duration = time.time() - start_time

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)
    return exit_code
Пример #7
0
def BuildOptions():
    result = optparse.OptionParser()
    result.add_option("--arch",
                      help=("The architecture to run tests for, "
                            "'auto' or 'native' for auto-detect"),
                      default="ia32,x64,arm")
    result.add_option("--arch-and-mode",
                      help="Architecture and mode in the format 'arch.mode'",
                      default=None)
    result.add_option("--asan",
                      help="Regard test expectations for ASAN",
                      default=False,
                      action="store_true")
    result.add_option("--buildbot",
                      help="Adapt to path structure used on buildbots",
                      default=False,
                      action="store_true")
    result.add_option(
        "--dcheck-always-on",
        help="Indicates that V8 was compiled with DCHECKs enabled",
        default=False,
        action="store_true")
    result.add_option("--cat",
                      help="Print the source of the tests",
                      default=False,
                      action="store_true")
    result.add_option("--flaky-tests",
                      help="Regard tests marked as flaky (run|skip|dontcare)",
                      default="dontcare")
    result.add_option("--slow-tests",
                      help="Regard slow tests (run|skip|dontcare)",
                      default="dontcare")
    result.add_option("--pass-fail-tests",
                      help="Regard pass|fail tests (run|skip|dontcare)",
                      default="dontcare")
    result.add_option("--gc-stress",
                      help="Switch on GC stress mode",
                      default=False,
                      action="store_true")
    result.add_option(
        "--command-prefix",
        help="Prepended to each shell command used to run a test",
        default="")
    result.add_option("--download-data",
                      help="Download missing test suite data",
                      default=False,
                      action="store_true")
    result.add_option("--download-data-only",
                      help="Download missing test suite data and exit",
                      default=False,
                      action="store_true")
    result.add_option("--extra-flags",
                      help="Additional flags to pass to each test command",
                      default="")
    result.add_option("--isolates",
                      help="Whether to test isolates",
                      default=False,
                      action="store_true")
    result.add_option("-j",
                      help="The number of parallel tasks to run",
                      default=0,
                      type="int")
    result.add_option("-m",
                      "--mode",
                      help="The test modes in which to run (comma-separated)",
                      default="release,debug")
    result.add_option("--no-harness",
                      "--noharness",
                      help="Run without test harness of a given suite",
                      default=False,
                      action="store_true")
    result.add_option("--no-i18n",
                      "--noi18n",
                      help="Skip internationalization tests",
                      default=False,
                      action="store_true")
    result.add_option("--no-network",
                      "--nonetwork",
                      help="Don't distribute tests on the network",
                      default=(utils.GuessOS() != "linux"),
                      dest="no_network",
                      action="store_true")
    result.add_option("--no-presubmit",
                      "--nopresubmit",
                      help='Skip presubmit checks',
                      default=False,
                      dest="no_presubmit",
                      action="store_true")
    result.add_option("--no-snap",
                      "--nosnap",
                      help='Test a build compiled without snapshot.',
                      default=False,
                      dest="no_snap",
                      action="store_true")
    result.add_option(
        "--no-sorting",
        "--nosorting",
        help="Don't sort tests according to duration of last run.",
        default=False,
        dest="no_sorting",
        action="store_true")
    result.add_option(
        "--no-stress",
        "--nostress",
        help="Don't run crankshaft --always-opt --stress-op test",
        default=False,
        dest="no_stress",
        action="store_true")
    result.add_option("--no-variants",
                      "--novariants",
                      help="Don't run any testing variants",
                      default=False,
                      dest="no_variants",
                      action="store_true")
    result.add_option("--variants",
                      help="Comma-separated list of testing variants")
    result.add_option("--outdir",
                      help="Base directory with compile output",
                      default="out")
    result.add_option("--predictable",
                      help="Compare output of several reruns of each test",
                      default=False,
                      action="store_true")
    result.add_option("-p",
                      "--progress",
                      help=("The style of progress indicator"
                            " (verbose, dots, color, mono)"),
                      choices=progress.PROGRESS_INDICATORS.keys(),
                      default="mono")
    result.add_option("--quickcheck",
                      default=False,
                      action="store_true",
                      help=("Quick check mode (skip slow/flaky tests)"))
    result.add_option("--report",
                      help="Print a summary of the tests to be run",
                      default=False,
                      action="store_true")
    result.add_option("--json-test-results",
                      help="Path to a file for storing json results.")
    result.add_option("--rerun-failures-count",
                      help=("Number of times to rerun each failing test case. "
                            "Very slow tests will be rerun only once."),
                      default=0,
                      type="int")
    result.add_option("--rerun-failures-max",
                      help="Maximum number of failing test cases to rerun.",
                      default=100,
                      type="int")
    result.add_option("--shard-count",
                      help="Split testsuites into this number of shards",
                      default=1,
                      type="int")
    result.add_option("--shard-run",
                      help="Run this shard from the split up tests.",
                      default=1,
                      type="int")
    result.add_option("--shell",
                      help="DEPRECATED! use --shell-dir",
                      default="")
    result.add_option("--shell-dir",
                      help="Directory containing executables",
                      default="")
    result.add_option(
        "--dont-skip-slow-simulator-tests",
        help="Don't skip more slow tests when using a simulator.",
        default=False,
        action="store_true",
        dest="dont_skip_simulator_slow_tests")
    result.add_option("--stress-only",
                      help="Only run tests with --always-opt --stress-opt",
                      default=False,
                      action="store_true")
    result.add_option("--time",
                      help="Print timing information after running",
                      default=False,
                      action="store_true")
    result.add_option("-t",
                      "--timeout",
                      help="Timeout in seconds",
                      default=-1,
                      type="int")
    result.add_option("--tsan",
                      help="Regard test expectations for TSAN",
                      default=False,
                      action="store_true")
    result.add_option("-v",
                      "--verbose",
                      help="Verbose output",
                      default=False,
                      action="store_true")
    result.add_option("--valgrind",
                      help="Run tests through valgrind",
                      default=False,
                      action="store_true")
    result.add_option("--warn-unused",
                      help="Report unused rules",
                      default=False,
                      action="store_true")
    result.add_option("--junitout", help="File name of the JUnit output")
    result.add_option("--junittestsuite",
                      help="The testsuite name in the JUnit output file",
                      default="v8tests")
    result.add_option("--random-seed",
                      default=0,
                      dest="random_seed",
                      help="Default seed for initializing random generator")
    result.add_option("--msan",
                      help="Regard test expectations for MSAN",
                      default=False,
                      action="store_true")
    return result
Пример #8
0
def Execute(arch, mode, args, options, suites, workspace):
    print(">>> Running tests for %s.%s" % (arch, mode))

    dist = Distribution(options)

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            shell_dir = os.path.join(workspace, options.outdir, mode)
            mode = mode.lower()
        else:
            shell_dir = os.path.join(workspace, options.outdir,
                                     "%s.%s" % (arch, mode))
    shell_dir = os.path.relpath(shell_dir)

    # Populate context object.
    mode_flags = MODE_FLAGS[mode]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= TIMEOUT_SCALEFACTOR[mode]
    ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose,
                          timeout, options.isolates, options.command_prefix,
                          options.extra_flags, False)

    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": True,
        "gc_stress": False,
        "isolates": options.isolates,
        "mode": mode,
        "no_i18n": False,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
    }
    all_tests = []
    num_tests = 0
    test_id = 0

    # Remember test case prototypes for the fuzzing phase.
    test_backup = dict((s, []) for s in suites)

    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(False)
        test_backup[s] = s.tests
        analysis_flags = [
            "--deopt-every-n-times",
            "%d" % MAX_DEOPT, "--print-deopt-stress"
        ]
        s.tests = [t.CopyAddingFlags(analysis_flags) for t in s.tests]
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if num_tests == 0:
        print "No tests to run."
        return 0

    try:
        print(">>> Collection phase")
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        exit_code = runner.Run(options.j)
        if runner.terminate:
            return exit_code

    except KeyboardInterrupt:
        return 1

    print(">>> Analysis phase")
    num_tests = 0
    test_id = 0
    for s in suites:
        test_results = {}
        for t in s.tests:
            for line in t.output.stdout.splitlines():
                if line.startswith("=== Stress deopt counter: "):
                    test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
        for t in s.tests:
            if t.path not in test_results:
                print "Missing results for %s" % t.path
        if options.dump_results_file:
            results_dict = dict(
                (t.path, n) for (t, n) in test_results.iteritems())
            with file("%s.%d.txt" % (dump_results_file, time.time()),
                      "w") as f:
                f.write(json.dumps(results_dict))

        # Reset tests and redistribute the prototypes from the collection phase.
        s.tests = []
        if options.verbose:
            print "Test distributions:"
        for t in test_backup[s]:
            max_deopt = test_results.get(t.path, 0)
            if max_deopt == 0:
                continue
            n_deopt = CalculateNTests(max_deopt, options)
            distribution = dist.Distribute(n_deopt, max_deopt)
            if options.verbose:
                print "%s %s" % (t.path, distribution)
            for i in distribution:
                fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
                s.tests.append(t.CopyAddingFlags(fuzzing_flags))
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if num_tests == 0:
        print "No tests to run."
        return 0

    try:
        print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        exit_code = runner.Run(options.j)
        if runner.terminate:
            return exit_code

    except KeyboardInterrupt:
        return 1

    return exit_code
Пример #9
0
def BuildOptions():
  result = optparse.OptionParser()
  result.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="ia32,x64,arm")
  result.add_option("--arch-and-mode",
                    help="Architecture and mode in the format 'arch.mode'",
                    default=None)
  result.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
  result.add_option("--cat", help="Print the source of the tests",
                    default=False, action="store_true")
  result.add_option("--command-prefix",
                    help="Prepended to each shell command used to run a test",
                    default="")
  result.add_option("--download-data", help="Download missing test suite data",
                    default=False, action="store_true")
  result.add_option("--extra-flags",
                    help="Additional flags to pass to each test command",
                    default="")
  result.add_option("--isolates", help="Whether to test isolates",
                    default=False, action="store_true")
  result.add_option("-j", help="The number of parallel tasks to run",
                    default=0, type="int")
  result.add_option("-m", "--mode",
                    help="The test modes in which to run (comma-separated)",
                    default="release,debug")
  result.add_option("--no-network", "--nonetwork",
                    help="Don't distribute tests on the network",
                    default=(utils.GuessOS() != "linux"),
                    dest="no_network", action="store_true")
  result.add_option("--no-presubmit", "--nopresubmit",
                    help='Skip presubmit checks',
                    default=False, dest="no_presubmit", action="store_true")
  result.add_option("--no-stress", "--nostress",
                    help="Don't run crankshaft --always-opt --stress-op test",
                    default=False, dest="no_stress", action="store_true")
  result.add_option("--outdir", help="Base directory with compile output",
                    default="out")
  result.add_option("-p", "--progress",
                    help=("The style of progress indicator"
                          " (verbose, dots, color, mono)"),
                    choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
  result.add_option("--report", help="Print a summary of the tests to be run",
                    default=False, action="store_true")
  result.add_option("--shard-count",
                    help="Split testsuites into this number of shards",
                    default=1, type="int")
  result.add_option("--shard-run",
                    help="Run this shard from the split up tests.",
                    default=1, type="int")
  result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
  result.add_option("--shell-dir", help="Directory containing executables",
                    default="")
  result.add_option("--stress-only",
                    help="Only run tests with --always-opt --stress-opt",
                    default=False, action="store_true")
  result.add_option("--time", help="Print timing information after running",
                    default=False, action="store_true")
  result.add_option("-t", "--timeout", help="Timeout in seconds",
                    default= -1, type="int")
  result.add_option("-v", "--verbose", help="Verbose output",
                    default=False, action="store_true")
  result.add_option("--valgrind", help="Run tests through valgrind",
                    default=False, action="store_true")
  result.add_option("--warn-unused", help="Report unused rules",
                    default=False, action="store_true")
  return result
Пример #10
0
def Execute(arch, mode, args, options, suites, workspace):
  print(">>> Running tests for %s.%s" % (arch, mode))

  shell_dir = options.shell_dir
  if not shell_dir:
    if options.buildbot:
      shell_dir = os.path.join(workspace, options.outdir, mode)
      mode = mode.lower()
    else:
      shell_dir = os.path.join(workspace, options.outdir,
                               "%s.%s" % (arch, mode))
  shell_dir = os.path.relpath(shell_dir)

  # Populate context object.
  mode_flags = MODE_FLAGS[mode]
  options.timeout *= TIMEOUT_SCALEFACTOR[mode]
  ctx = context.Context(arch, mode, shell_dir,
                        mode_flags, options.verbose,
                        options.timeout, options.isolates,
                        options.command_prefix,
                        options.extra_flags)

  # Find available test suites and read test cases from them.
  variables = {
    "mode": mode,
    "arch": arch,
    "system": utils.GuessOS(),
    "isolates": options.isolates
  }
  all_tests = []
  num_tests = 0
  test_id = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    all_tests += s.tests
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
    s.FilterTestCasesByStatus(options.warn_unused)
    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
    variant_flags = s.VariantFlags() or VARIANT_FLAGS
    s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in variant_flags ]
    s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
    num_tests += len(s.tests)
    for t in s.tests:
      t.id = test_id
      test_id += 1

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  if num_tests == 0:
    print "No tests to run."
    return 0

  # Run the tests, either locally or distributed on the network.
  try:
    start_time = time.time()
    progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()

    run_networked = not options.no_network
    if not run_networked:
      print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
      print("Network distribution is only supported on Linux, sorry!")
      run_networked = False
    peers = []
    if run_networked:
      peers = network_execution.GetPeers()
      if not peers:
        print("No connection to distribution server; running tests locally.")
        run_networked = False
      elif len(peers) == 1:
        print("No other peers on the network; running tests locally.")
        run_networked = False
      elif num_tests <= 100:
        print("Less than 100 tests, running them locally.")
        run_networked = False

    if run_networked:
      runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                 ctx, peers, workspace)
    else:
      runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    if runner.terminate:
      return exit_code
    overall_duration = time.time() - start_time
  except KeyboardInterrupt:
    return 1

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)
  return exit_code
"""

# for py2/py3 compatibility
from __future__ import absolute_import
from __future__ import print_function

import sys

from testrunner.local import command
from testrunner.local import utils

MAX_TRIES = 3
TIMEOUT = 120

# Predictable mode works only when run on the host os.
command.setup(utils.GuessOS(), None)


def maybe_decode(message):
    if not isinstance(message, str):
        return message.decode()
    return message


def main(args):
    def allocation_str(stdout):
        for line in reversed((stdout or '').splitlines()):
            if maybe_decode(line).startswith('### Allocations = '):
                return line
        return None
Пример #12
0
    def _setup_env(self, options):
        """Setup additional environment variables."""

        # Many tests assume an English interface.
        os.environ['LANG'] = 'en_US.UTF-8'

        external_symbolizer_path = os.path.join(
            base_runner.BASE_DIR,
            'third_party',
            'llvm-build',
            'Release+Asserts',
            'bin',
            'llvm-symbolizer',
        )
        if utils.IsWindows():
            # Quote, because sanitizers might confuse colon as option separator.
            external_symbolizer_path = '"%s.exe"' % external_symbolizer_path
        symbolizer = 'external_symbolizer_path=%s' % external_symbolizer_path

        if self.build_config.asan:
            asan_options = [symbolizer, "allow_user_segv_handler=1"]
            if not utils.GuessOS() in ['macos', 'windows']:
                # LSAN is not available on mac and windows.
                asan_options.append('detect_leaks=1')
            os.environ['ASAN_OPTIONS'] = ":".join(asan_options)

        if options.sancov_dir:
            assert os.path.exists(options.sancov_dir)
            os.environ['ASAN_OPTIONS'] = ":".join([
                'coverage=1',
                'coverage_dir=%s' % options.sancov_dir,
                symbolizer,
                "allow_user_segv_handler=1",
            ])

        if self.build_config.cfi_vptr:
            os.environ['UBSAN_OPTIONS'] = ":".join([
                'print_stacktrace=1',
                'print_summary=1',
                'symbolize=1',
                symbolizer,
            ])

        if self.build_config.ubsan_vptr:
            os.environ['UBSAN_OPTIONS'] = ":".join([
                'print_stacktrace=1',
                symbolizer,
            ])

        if self.build_config.msan:
            os.environ['MSAN_OPTIONS'] = symbolizer

        if self.build_config.tsan:
            suppressions_file = os.path.join(base_runner.BASE_DIR, 'tools',
                                             'sanitizers',
                                             'tsan_suppressions.txt')
            os.environ['TSAN_OPTIONS'] = " ".join([
                symbolizer,
                'suppressions=%s' % suppressions_file,
                'exit_code=0',
                'report_thread_leaks=0',
                'history_size=7',
                'report_destroy_locked=0',
            ])
Пример #13
0
    def _execute(self, args, options, suites):
        print(">>> Running tests for %s.%s" %
              (self.build_config.arch, self.mode_name))

        dist = self._distribution(options)

        # Populate context object.
        timeout = options.timeout
        if timeout == -1:
            # Simulators are slow, therefore allow a longer default timeout.
            if self.build_config.arch in SLOW_ARCHS:
                timeout = 2 * TIMEOUT_DEFAULT
            else:
                timeout = TIMEOUT_DEFAULT

        timeout *= self.mode_options.timeout_scalefactor
        ctx = context.Context(
            self.build_config.arch,
            self.mode_options.execution_mode,
            self.outdir,
            self.mode_options.flags,
            options.verbose,
            timeout,
            options.isolates,
            options.command_prefix,
            options.extra_flags,
            False,  # Keep i18n on by default.
            options.random_seed,
            True,  # No sorting of test cases.
            0,  # Don't rerun failing tests.
            0,  # No use of a rerun-failing-tests maximum.
            False,  # No predictable mode.
            False,  # No no_harness mode.
            False,  # Don't use perf data.
            False)  # Coverage not supported.

        # Find available test suites and read test cases from them.
        variables = {
            "arch": self.build_config.arch,
            "asan": self.build_config.asan,
            "byteorder": sys.byteorder,
            "dcheck_always_on": self.build_config.dcheck_always_on,
            "deopt_fuzzer": True,
            "gc_fuzzer": False,
            "gc_stress": False,
            "gcov_coverage": self.build_config.gcov_coverage,
            "isolates": options.isolates,
            "mode": self.mode_options.status_mode,
            "msan": self.build_config.msan,
            "no_harness": False,
            "no_i18n": self.build_config.no_i18n,
            "no_snap": self.build_config.no_snap,
            "novfp3": False,
            "predictable": self.build_config.predictable,
            "simulator": utils.UseSimulator(self.build_config.arch),
            "simulator_run": False,
            "system": utils.GuessOS(),
            "tsan": self.build_config.tsan,
            "ubsan_vptr": self.build_config.ubsan_vptr,
        }
        num_tests = 0
        test_id = 0

        # Remember test case prototypes for the fuzzing phase.
        test_backup = dict((s, []) for s in suites)

        for s in suites:
            s.ReadStatusFile(variables)
            s.ReadTestCases(ctx)
            if len(args) > 0:
                s.FilterTestCasesByArgs(args)
            s.FilterTestCasesByStatus(False)
            test_backup[s] = s.tests
            analysis_flags = [
                "--deopt-every-n-times",
                "%d" % MAX_DEOPT, "--print-deopt-stress"
            ]
            s.tests = [
                t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests
            ]
            num_tests += len(s.tests)
            for t in s.tests:
                t.id = test_id
                test_id += 1

        if num_tests == 0:
            print "No tests to run."
            return 0

        print(">>> Collection phase")
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        exit_code = runner.Run(options.j)

        print(">>> Analysis phase")
        num_tests = 0
        test_id = 0
        for s in suites:
            test_results = {}
            for t in s.tests:
                for line in t.output.stdout.splitlines():
                    if line.startswith("=== Stress deopt counter: "):
                        test_results[t.path] = MAX_DEOPT - int(
                            line.split(" ")[-1])
            for t in s.tests:
                if t.path not in test_results:
                    print "Missing results for %s" % t.path
            if options.dump_results_file:
                results_dict = dict(
                    (t.path, n) for (t, n) in test_results.iteritems())
                with file(
                        "%s.%d.txt" % (options.dump_results_file, time.time()),
                        "w") as f:
                    f.write(json.dumps(results_dict))

            # Reset tests and redistribute the prototypes from the collection phase.
            s.tests = []
            if options.verbose:
                print "Test distributions:"
            for t in test_backup[s]:
                max_deopt = test_results.get(t.path, 0)
                if max_deopt == 0:
                    continue
                n_deopt = self._calculate_n_tests(max_deopt, options)
                distribution = dist.Distribute(n_deopt, max_deopt)
                if options.verbose:
                    print "%s %s" % (t.path, distribution)
                for i in distribution:
                    fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
                    s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
            num_tests += len(s.tests)
            for t in s.tests:
                t.id = test_id
                test_id += 1

        if num_tests == 0:
            print "No tests to run."
            return 0

        print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        code = runner.Run(options.j)
        return exit_code or code
Пример #14
0
def Execute(arch, mode, args, options, suites, workspace):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            shell_dir = os.path.join(workspace, options.outdir, mode)
            mode = mode.lower()
        else:
            shell_dir = os.path.join(workspace, options.outdir,
                                     "%s.%s" % (arch, mode))
    shell_dir = os.path.relpath(shell_dir)

    if mode == "optdebug":
        mode = "debug"  # "optdebug" is just an alias.

    # Populate context object.
    mode_flags = MODE_FLAGS[mode]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= TIMEOUT_SCALEFACTOR[mode]
    ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose,
                          timeout, options.isolates, options.command_prefix,
                          options.extra_flags, options.no_i18n)

    # TODO(all): Combine "simulator" and "simulator_run".
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['a64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "isolates": options.isolates,
        "mode": mode,
        "no_i18n": options.no_i18n,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
    }
    all_tests = []
    num_tests = 0
    test_id = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                                  options.slow_tests, options.pass_fail_tests)
        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
        s.tests = [
            t.CopyAddingFlags(v) for t in s.tests
            for v in s.VariantFlags(t, variant_flags)
        ]
        s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    if num_tests == 0:
        print "No tests to run."
        return 0

    # Run the tests, either locally or distributed on the network.
    try:
        start_time = time.time()
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        if options.junitout:
            progress_indicator = progress.JUnitTestProgressIndicator(
                progress_indicator, options.junitout, options.junittestsuite)

        run_networked = not options.no_network
        if not run_networked:
            print("Network distribution disabled, running tests locally.")
        elif utils.GuessOS() != "linux":
            print("Network distribution is only supported on Linux, sorry!")
            run_networked = False
        peers = []
        if run_networked:
            peers = network_execution.GetPeers()
            if not peers:
                print(
                    "No connection to distribution server; running tests locally."
                )
                run_networked = False
            elif len(peers) == 1:
                print("No other peers on the network; running tests locally.")
                run_networked = False
            elif num_tests <= 100:
                print("Less than 100 tests, running them locally.")
                run_networked = False

        if run_networked:
            runner = network_execution.NetworkedRunner(suites,
                                                       progress_indicator, ctx,
                                                       peers, workspace)
        else:
            runner = execution.Runner(suites, progress_indicator, ctx)

        exit_code = runner.Run(options.j)
        if runner.terminate:
            return exit_code
        overall_duration = time.time() - start_time
    except KeyboardInterrupt:
        raise

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)
    return exit_code
Пример #15
0
    def _do_execute(self, options, args):
      # Use the v8 root as cwd as some test cases use "load" with relative
      # paths.
      os.chdir(base_runner.BASE_DIR)

      self._setup_env(options)

      if options.swarming:
        # Swarming doesn't print how isolated commands are called. Lets make
        # this less cryptic by printing it ourselves.
        print ' '.join(sys.argv)

        if utils.GuessOS() == "macos":
          # TODO(machenbach): Temporary output for investigating hanging test
          # driver on mac.
          print "V8 related processes running on this host:"
          try:
            print subprocess.check_output(
              "ps -e | egrep 'd8|cctest|unittests'", shell=True)
          except Exception:
            pass

      exit_code = 0

      suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))

      # Use default tests if no test configuration was provided at the cmd line.
      if len(args) == 0:
        args = ["default"]

      # Expand arguments with grouped tests. The args should reflect the list
      # of suites as otherwise filters would break.
      def ExpandTestGroups(name):
        if name in base_runner.TEST_MAP:
          return [suite for suite in base_runner.TEST_MAP[name]]
        else:
          return [name]
      args = reduce(lambda x, y: x + y,
            [ExpandTestGroups(arg) for arg in args],
            [])

      args_suites = OrderedDict() # Used as set
      for arg in args:
        args_suites[arg.split('/')[0]] = True
      suite_paths = [ s for s in args_suites if s in suite_paths ]

      suites = []
      for root in suite_paths:
        suite = testsuite.TestSuite.LoadTestSuite(
            os.path.join(base_runner.BASE_DIR, "test", root))
        if suite:
          suites.append(suite)

      for s in suites:
        s.PrepareSources()

      for (arch, mode) in self.arch_and_mode:
        try:
          code = self._execute(arch, mode, args, options, suites)
        except KeyboardInterrupt:
          return 2
        exit_code = exit_code or code
      return exit_code
Пример #16
0
"""

# for py2/py3 compatibility
from __future__ import print_function
from __future__ import absolute_import

import sys

from testrunner.local import command
from testrunner.local import utils

MAX_TRIES = 3
TIMEOUT = 120

# Predictable mode works only when run on the host os.
command.setup(utils.GuessOS())


def main(args):
    def allocation_str(stdout):
        for line in reversed((stdout or '').splitlines()):
            if line.startswith('### Allocations = '):
                return line
        return None

    cmd = command.Command(args[0], args[1:], timeout=TIMEOUT)

    previous_allocations = None
    for run in range(1, MAX_TRIES + 1):
        print('### Predictable run #%d' % run)
        output = cmd.execute()