예제 #1
0
  def _create_context(self, options):
    # Populate context object.
    timeout = options.timeout
    if timeout == -1:
      # Simulators are slow, therefore allow a longer default timeout.
      if self.build_config.arch in SLOW_ARCHS:
        timeout = 2 * TIMEOUT_DEFAULT;
      else:
        timeout = TIMEOUT_DEFAULT;

    timeout *= self.mode_options.timeout_scalefactor
    ctx = context.Context(self.build_config.arch,
                          self.mode_options.execution_mode,
                          self.outdir,
                          self.mode_options.flags, options.verbose,
                          timeout, options.isolates,
                          options.command_prefix,
                          options.extra_flags,
                          False,  # Keep i18n on by default.
                          options.random_seed,
                          True,  # No sorting of test cases.
                          options.rerun_failures_count,
                          options.rerun_failures_max,
                          False,  # No no_harness mode.
                          False,  # Don't use perf data.
                          False)  # Coverage not supported.
    return ctx
예제 #2
0
def Execute(arch, mode, args, options, suites):
  print(">>> Running tests for %s.%s" % (arch, mode))

  shell_dir = options.shell_dir
  if not shell_dir:
    if options.auto_detect:
      # If an output dir with a build was passed, test directly in that
      # directory.
      shell_dir = os.path.join(BASE_DIR, options.outdir)
    elif options.buildbot:
      # TODO(machenbach): Get rid of different output folder location on
      # buildbot. Currently this is capitalized Release and Debug.
      shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
      mode = BuildbotToV8Mode(mode)
    else:
      shell_dir = os.path.join(
          BASE_DIR,
          options.outdir,
          "%s.%s" % (arch, MODES[mode]["output_folder"]),
      )
  if not os.path.exists(shell_dir):
      raise Exception('Could not find shell_dir: "%s"' % shell_dir)

  # Populate context object.
  mode_flags = MODES[mode]["flags"]

  # Simulators are slow, therefore allow a longer timeout.
  if arch in SLOW_ARCHS:
    options.timeout *= 2

  options.timeout *= MODES[mode]["timeout_scalefactor"]

  if options.predictable:
    # Predictable mode is slower.
    options.timeout *= 2

  ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
                        mode_flags, options.verbose,
                        options.timeout,
                        options.isolates,
                        options.command_prefix,
                        options.extra_flags,
                        options.no_i18n,
                        options.random_seed,
                        options.no_sorting,
                        options.rerun_failures_count,
                        options.rerun_failures_max,
                        options.predictable,
                        options.no_harness,
                        use_perf_data=not options.swarming,
                        sancov_dir=options.sancov_dir)

  # TODO(all): Combine "simulator" and "simulator_run".
  # TODO(machenbach): In GN we can derive simulator run from
  # target_arch != v8_target_arch in the dumped build config.
  simulator_run = not options.dont_skip_simulator_slow_tests and \
      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
               'ppc', 'ppc64'] and \
      ARCH_GUESS and arch != ARCH_GUESS
  # Find available test suites and read test cases from them.
  variables = {
    "arch": arch,
    "asan": options.asan,
    "deopt_fuzzer": False,
    "gc_stress": options.gc_stress,
    "gcov_coverage": options.gcov_coverage,
    "isolates": options.isolates,
    "mode": MODES[mode]["status_mode"],
    "no_i18n": options.no_i18n,
    "no_snap": options.no_snap,
    "simulator_run": simulator_run,
    "simulator": utils.UseSimulator(arch),
    "system": utils.GuessOS(),
    "tsan": options.tsan,
    "msan": options.msan,
    "dcheck_always_on": options.dcheck_always_on,
    "novfp3": options.novfp3,
    "predictable": options.predictable,
    "byteorder": sys.byteorder,
  }
  all_tests = []
  num_tests = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
    all_tests += s.tests

    # First filtering by status applying the generic rules (independent of
    # variants).
    s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
                              options.pass_fail_tests)

    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
    variant_gen = s.CreateVariantGenerator(VARIANTS)
    variant_tests = [ t.CopyAddingFlags(v, flags)
                      for t in s.tests
                      for v in variant_gen.FilterVariantsByTest(t)
                      for flags in variant_gen.GetFlagSets(t, v) ]

    if options.random_seed_stress_count > 1:
      # Duplicate test for random seed stress mode.
      def iter_seed_flags():
        for i in range(0, options.random_seed_stress_count):
          # Use given random seed for all runs (set by default in execution.py)
          # or a new random seed if none is specified.
          if options.random_seed:
            yield []
          else:
            yield ["--random-seed=%d" % RandomSeed()]
      s.tests = [
        t.CopyAddingFlags(t.variant, flags)
        for t in variant_tests
        for flags in iter_seed_flags()
      ]
    else:
      s.tests = variant_tests

    # Second filtering by status applying the variant-dependent rules.
    s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
                              options.pass_fail_tests, variants=True)

    s.tests = ShardTests(s.tests, options)
    num_tests += len(s.tests)

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  # Run the tests, either locally or distributed on the network.
  start_time = time.time()
  progress_indicator = progress.IndicatorNotifier()
  progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]())
  if options.junitout:
    progress_indicator.Register(progress.JUnitTestProgressIndicator(
        options.junitout, options.junittestsuite))
  if options.json_test_results:
    progress_indicator.Register(progress.JsonTestProgressIndicator(
        options.json_test_results, arch, MODES[mode]["execution_mode"],
        ctx.random_seed))
  if options.flakiness_results:
    progress_indicator.Register(progress.FlakinessTestProgressIndicator(
        options.flakiness_results))

  run_networked = not options.no_network
  if not run_networked:
    if options.verbose:
      print("Network distribution disabled, running tests locally.")
  elif utils.GuessOS() != "linux":
    print("Network distribution is only supported on Linux, sorry!")
    run_networked = False
  peers = []
  if run_networked:
    peers = network_execution.GetPeers()
    if not peers:
      print("No connection to distribution server; running tests locally.")
      run_networked = False
    elif len(peers) == 1:
      print("No other peers on the network; running tests locally.")
      run_networked = False
    elif num_tests <= 100:
      print("Less than 100 tests, running them locally.")
      run_networked = False

  if run_networked:
    runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                               ctx, peers, BASE_DIR)
  else:
    runner = execution.Runner(suites, progress_indicator, ctx)

  exit_code = runner.Run(options.j)
  overall_duration = time.time() - start_time

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)

  if num_tests == 0:
    print("Warning: no tests were run!")

  if exit_code == 1 and options.json_test_results:
    print("Force exit code 0 after failures. Json test results file generated "
          "with failure information.")
    exit_code = 0

  if options.sancov_dir:
    # If tests ran with sanitizer coverage, merge coverage files in the end.
    try:
      print "Merging sancov files."
      subprocess.check_call([
        sys.executable,
        join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
        "--coverage-dir=%s" % options.sancov_dir])
    except:
      print >> sys.stderr, "Error: Merging sancov files failed."
      exit_code = 1

  return exit_code
예제 #3
0
파일: gc_fuzzer.py 프로젝트: YYKLIT/v8
    def _execute(self, args, options, suites):
        print(">>> Running tests for %s.%s" %
              (self.build_config.arch, self.mode_name))

        # Populate context object.
        timeout = options.timeout
        if timeout == -1:
            # Simulators are slow, therefore allow a longer default timeout.
            if self.build_config.arch in SLOW_ARCHS:
                timeout = 2 * TIMEOUT_DEFAULT
            else:
                timeout = TIMEOUT_DEFAULT

        timeout *= self.mode_options.timeout_scalefactor
        ctx = context.Context(
            self.build_config.arch,
            self.mode_options.execution_mode,
            self.outdir,
            self.mode_options.flags,
            options.verbose,
            timeout,
            options.isolates,
            options.command_prefix,
            options.extra_flags,
            False,  # Keep i18n on by default.
            options.random_seed,
            True,  # No sorting of test cases.
            0,  # Don't rerun failing tests.
            0,  # No use of a rerun-failing-tests maximum.
            False,  # No predictable mode.
            False,  # No no_harness mode.
            False,  # Don't use perf data.
            False)  # Coverage not supported.

        num_tests = self._load_tests(args, options, suites, ctx)
        if num_tests == 0:
            print "No tests to run."
            return 0

        test_backup = dict(map(lambda s: (s, s.tests), suites))

        print('>>> Collection phase')
        for s in suites:
            analysis_flags = [
                # > 100% to not influence default incremental marking, but we need this
                # flag to print reached incremental marking limit.
                '--stress_marking',
                '1000',
                '--trace_incremental_marking',
            ]
            s.tests = map(
                lambda t: t.CopyAddingFlags(t.variant, analysis_flags),
                s.tests)
            for t in s.tests:
                t.cmd = s.GetCommand(t, ctx)

        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)
        exit_code = runner.Run(options.j)

        print('>>> Analysis phase')
        test_results = dict()
        for s in suites:
            for t in s.tests:
                # Skip failed tests.
                if s.HasUnexpectedOutput(t):
                    print '%s failed, skipping' % t.path
                    continue
                max_limit = self._get_max_limit_reached(t)
                if max_limit:
                    test_results[t.path] = max_limit

        if options.dump_results_file:
            with file("%s.%d.txt" % (options.dump_results_file, time.time()),
                      "w") as f:
                f.write(json.dumps(test_results))

        num_tests = 0
        for s in suites:
            s.tests = []
            for t in test_backup[s]:
                max_percent = test_results.get(t.path, 0)
                if not max_percent or max_percent < 1.0:
                    continue
                max_percent = int(max_percent)

                subtests_count = self._calculate_n_tests(max_percent, options)

                if options.verbose:
                    print('%s [x%d] (max marking limit=%.02f)' %
                          (t.path, subtests_count, max_percent))
                for _ in xrange(0, subtests_count):
                    fuzzer_seed = self._next_fuzzer_seed()
                    fuzzing_flags = [
                        '--stress_marking',
                        str(max_percent),
                        '--fuzzer_random_seed',
                        str(fuzzer_seed),
                    ]
                    if options.stress_compaction:
                        fuzzing_flags.append('--stress_compaction_random')
                    s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))

            for t in s.tests:
                t.cmd = s.GetCommand(t, ctx)
            num_tests += len(s.tests)

        if num_tests == 0:
            print "No tests to run."
            return exit_code

        print(">>> Fuzzing phase (%d test cases)" % num_tests)
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        return runner.Run(options.j) or exit_code
예제 #4
0
    def _execute(self, args, options, suites):
      print(">>> Running tests for %s.%s" % (self.build_config.arch,
                                             self.mode_name))
      # Populate context object.

      # Simulators are slow, therefore allow a longer timeout.
      if self.build_config.arch in SLOW_ARCHS:
        options.timeout *= 2

      options.timeout *= self.mode_options.timeout_scalefactor

      if self.build_config.predictable:
        # Predictable mode is slower.
        options.timeout *= 2

      ctx = context.Context(self.build_config.arch,
                            self.mode_options.execution_mode,
                            self.outdir,
                            self.mode_options.flags,
                            options.verbose,
                            options.timeout,
                            options.isolates,
                            options.command_prefix,
                            options.extra_flags,
                            self.build_config.no_i18n,
                            options.random_seed,
                            options.no_sorting,
                            options.rerun_failures_count,
                            options.rerun_failures_max,
                            options.no_harness,
                            use_perf_data=not options.swarming,
                            sancov_dir=self.sancov_dir,
                            infra_staging=options.infra_staging)

      # TODO(all): Combine "simulator" and "simulator_run".
      # TODO(machenbach): In GN we can derive simulator run from
      # target_arch != v8_target_arch in the dumped build config.
      simulator_run = (
        not options.dont_skip_simulator_slow_tests and
        self.build_config.arch in [
          'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
          'ppc64', 's390', 's390x'] and
        bool(base_runner.ARCH_GUESS) and
        self.build_config.arch != base_runner.ARCH_GUESS)
      # Find available test suites and read test cases from them.
      variables = {
        "arch": self.build_config.arch,
        "asan": self.build_config.asan,
        "byteorder": sys.byteorder,
        "dcheck_always_on": self.build_config.dcheck_always_on,
        "deopt_fuzzer": False,
        "gc_fuzzer": False,
        "gc_stress": options.gc_stress or options.random_gc_stress,
        "gcov_coverage": self.build_config.gcov_coverage,
        "isolates": options.isolates,
        "mode": self.mode_options.status_mode,
        "msan": self.build_config.msan,
        "no_harness": options.no_harness,
        "no_i18n": self.build_config.no_i18n,
        "no_snap": self.build_config.no_snap,
        "novfp3": options.novfp3,
        "predictable": self.build_config.predictable,
        "simulator": utils.UseSimulator(self.build_config.arch),
        "simulator_run": simulator_run,
        "system": utils.GuessOS(),
        "tsan": self.build_config.tsan,
        "ubsan_vptr": self.build_config.ubsan_vptr,
      }

      progress_indicator = progress.IndicatorNotifier()
      progress_indicator.Register(
        progress.PROGRESS_INDICATORS[options.progress]())
      if options.junitout:  # pragma: no cover
        progress_indicator.Register(progress.JUnitTestProgressIndicator(
            options.junitout, options.junittestsuite))
      if options.json_test_results:
        progress_indicator.Register(progress.JsonTestProgressIndicator(
          options.json_test_results,
          self.build_config.arch,
          self.mode_options.execution_mode,
          ctx.random_seed))
      if options.flakiness_results:  # pragma: no cover
        progress_indicator.Register(progress.FlakinessTestProgressIndicator(
            options.flakiness_results))

      if options.infra_staging:
        for s in suites:
          s.ReadStatusFile(variables)
          s.ReadTestCases(ctx)

        return self._run_test_procs(suites, args, options, progress_indicator,
                                    ctx)

      all_tests = []
      num_tests = 0
      for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
          s.FilterTestCasesByArgs(args)
        all_tests += s.tests

        # First filtering by status applying the generic rules (tests without
        # variants)
        if options.warn_unused:
          tests = [(t.name, t.variant) for t in s.tests]
          s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
        s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)

        if options.cat:
          verbose.PrintTestSource(s.tests)
          continue
        variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
        variant_tests = [ t.create_variant(v, flags)
                          for t in s.tests
                          for v in variant_gen.FilterVariantsByTest(t)
                          for flags in variant_gen.GetFlagSets(t, v) ]

        if options.random_seed_stress_count > 1:
          # Duplicate test for random seed stress mode.
          def iter_seed_flags():
            for _ in range(0, options.random_seed_stress_count):
              # Use given random seed for all runs (set by default in
              # execution.py) or a new random seed if none is specified.
              if options.random_seed:
                yield []
              else:
                yield ["--random-seed=%d" % self._random_seed()]
          s.tests = [
            t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
            for t in variant_tests
            for n, flags in enumerate(iter_seed_flags())
          ]
        else:
          s.tests = variant_tests

        # Second filtering by status applying also the variant-dependent rules.
        if options.warn_unused:
          tests = [(t.name, t.variant) for t in s.tests]
          s.statusfile.warn_unused_rules(tests, check_variant_rules=True)

        s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
        s.tests = self._shard_tests(s.tests, options)

        for t in s.tests:
          t.cmd = t.get_command(ctx)

        num_tests += len(s.tests)

      if options.cat:
        return 0  # We're done here.

      if options.report:
        verbose.PrintReport(all_tests)

      # Run the tests.
      start_time = time.time()

      if self.build_config.predictable:
        outproc_factory = predictable.get_outproc
      else:
        outproc_factory = None

      runner = execution.Runner(suites, progress_indicator, ctx,
                                outproc_factory)
      exit_code = runner.Run(options.j)
      overall_duration = time.time() - start_time

      if options.time:
        verbose.PrintTestDurations(suites, runner.outputs, overall_duration)

      if num_tests == 0:
        exit_code = 3
        print("Warning: no tests were run!")

      if exit_code == 1 and options.json_test_results:
        print("Force exit code 0 after failures. Json test results file "
              "generated with failure information.")
        exit_code = 0

      if self.sancov_dir:
        # If tests ran with sanitizer coverage, merge coverage files in the end.
        try:
          print "Merging sancov files."
          subprocess.check_call([
            sys.executable,
            join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
            "--coverage-dir=%s" % self.sancov_dir])
        except:
          print >> sys.stderr, "Error: Merging sancov files failed."
          exit_code = 1

      return exit_code
예제 #5
0
def Execute(arch, mode, args, options, suites, workspace):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            # TODO(machenbach): Get rid of different output folder location on
            # buildbot. Currently this is capitalized Release and Debug.
            shell_dir = os.path.join(workspace, options.outdir, mode)
            mode = mode.lower()
        else:
            shell_dir = os.path.join(
                workspace,
                options.outdir,
                "%s.%s" % (arch, MODES[mode]["output_folder"]),
            )
    shell_dir = os.path.relpath(shell_dir)

    # Populate context object.
    mode_flags = MODES[mode]["flags"]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= MODES[mode]["timeout_scalefactor"]

    if options.predictable:
        # Predictable mode is slower.
        timeout *= 2

    # TODO(machenbach): Remove temporary verbose output on windows after
    # debugging driver-hung-up on XP.
    verbose_output = (options.verbose
                      or utils.IsWindows() and options.progress == "verbose")
    ctx = context.Context(
        arch, MODES[mode]["execution_mode"], shell_dir, mode_flags,
        verbose_output, timeout, options.isolates, options.command_prefix,
        options.extra_flags, options.no_i18n, options.random_seed,
        options.no_sorting, options.rerun_failures_count,
        options.rerun_failures_max, options.predictable, options.no_harness)

    # TODO(all): Combine "simulator" and "simulator_run".
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \
                 'ppc', 'ppc64'] and \
        ARCH_GUESS and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "isolates": options.isolates,
        "mode": MODES[mode]["status_mode"],
        "no_i18n": options.no_i18n,
        "no_snap": options.no_snap,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
        "tsan": options.tsan,
        "msan": options.msan,
        "dcheck_always_on": options.dcheck_always_on,
        "byteorder": sys.byteorder,
    }
    all_tests = []
    num_tests = 0
    test_id = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                                  options.slow_tests, options.pass_fail_tests)
        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
        s.tests = [
            t.CopyAddingFlags(v) for t in s.tests
            for v in s.VariantFlags(t, variant_flags)
        ]
        s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    if num_tests == 0:
        print "No tests to run."
        return 0

    # Run the tests, either locally or distributed on the network.
    start_time = time.time()
    progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
    if options.junitout:
        progress_indicator = progress.JUnitTestProgressIndicator(
            progress_indicator, options.junitout, options.junittestsuite)
    if options.json_test_results:
        progress_indicator = progress.JsonTestProgressIndicator(
            progress_indicator, options.json_test_results, arch,
            MODES[mode]["execution_mode"])

    run_networked = not options.no_network
    if not run_networked:
        print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
        print("Network distribution is only supported on Linux, sorry!")
        run_networked = False
    peers = []
    if run_networked:
        peers = network_execution.GetPeers()
        if not peers:
            print(
                "No connection to distribution server; running tests locally.")
            run_networked = False
        elif len(peers) == 1:
            print("No other peers on the network; running tests locally.")
            run_networked = False
        elif num_tests <= 100:
            print("Less than 100 tests, running them locally.")
            run_networked = False

    if run_networked:
        runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                   ctx, peers, workspace)
    else:
        runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    overall_duration = time.time() - start_time

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)
    return exit_code
예제 #6
0
def Execute(arch, mode, args, options, suites, workspace):
    print(">>> Running tests for %s.%s" % (arch, mode))

    dist = Distribution(options)

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            shell_dir = os.path.join(workspace, options.outdir, mode)
            mode = mode.lower()
        else:
            shell_dir = os.path.join(workspace, options.outdir,
                                     "%s.%s" % (arch, mode))
    shell_dir = os.path.relpath(shell_dir)

    # Populate context object.
    mode_flags = MODE_FLAGS[mode]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= TIMEOUT_SCALEFACTOR[mode]
    ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose,
                          timeout, options.isolates, options.command_prefix,
                          options.extra_flags, False)

    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": True,
        "gc_stress": False,
        "isolates": options.isolates,
        "mode": mode,
        "no_i18n": False,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
    }
    all_tests = []
    num_tests = 0
    test_id = 0

    # Remember test case prototypes for the fuzzing phase.
    test_backup = dict((s, []) for s in suites)

    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(False)
        test_backup[s] = s.tests
        analysis_flags = [
            "--deopt-every-n-times",
            "%d" % MAX_DEOPT, "--print-deopt-stress"
        ]
        s.tests = [t.CopyAddingFlags(analysis_flags) for t in s.tests]
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if num_tests == 0:
        print "No tests to run."
        return 0

    try:
        print(">>> Collection phase")
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        exit_code = runner.Run(options.j)
        if runner.terminate:
            return exit_code

    except KeyboardInterrupt:
        return 1

    print(">>> Analysis phase")
    num_tests = 0
    test_id = 0
    for s in suites:
        test_results = {}
        for t in s.tests:
            for line in t.output.stdout.splitlines():
                if line.startswith("=== Stress deopt counter: "):
                    test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
        for t in s.tests:
            if t.path not in test_results:
                print "Missing results for %s" % t.path
        if options.dump_results_file:
            results_dict = dict(
                (t.path, n) for (t, n) in test_results.iteritems())
            with file("%s.%d.txt" % (dump_results_file, time.time()),
                      "w") as f:
                f.write(json.dumps(results_dict))

        # Reset tests and redistribute the prototypes from the collection phase.
        s.tests = []
        if options.verbose:
            print "Test distributions:"
        for t in test_backup[s]:
            max_deopt = test_results.get(t.path, 0)
            if max_deopt == 0:
                continue
            n_deopt = CalculateNTests(max_deopt, options)
            distribution = dist.Distribute(n_deopt, max_deopt)
            if options.verbose:
                print "%s %s" % (t.path, distribution)
            for i in distribution:
                fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
                s.tests.append(t.CopyAddingFlags(fuzzing_flags))
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if num_tests == 0:
        print "No tests to run."
        return 0

    try:
        print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        exit_code = runner.Run(options.j)
        if runner.terminate:
            return exit_code

    except KeyboardInterrupt:
        return 1

    return exit_code
예제 #7
0
def Execute(arch, mode, args, options, suites, workspace):
  print(">>> Running tests for %s.%s" % (arch, mode))

  shell_dir = options.shell_dir
  if not shell_dir:
    if options.buildbot:
      shell_dir = os.path.join(workspace, options.outdir, mode)
      mode = mode.lower()
    else:
      shell_dir = os.path.join(workspace, options.outdir,
                               "%s.%s" % (arch, mode))
  shell_dir = os.path.relpath(shell_dir)

  # Populate context object.
  mode_flags = MODE_FLAGS[mode]
  options.timeout *= TIMEOUT_SCALEFACTOR[mode]
  ctx = context.Context(arch, mode, shell_dir,
                        mode_flags, options.verbose,
                        options.timeout, options.isolates,
                        options.command_prefix,
                        options.extra_flags)

  # Find available test suites and read test cases from them.
  variables = {
    "mode": mode,
    "arch": arch,
    "system": utils.GuessOS(),
    "isolates": options.isolates
  }
  all_tests = []
  num_tests = 0
  test_id = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    all_tests += s.tests
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
    s.FilterTestCasesByStatus(options.warn_unused)
    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
    variant_flags = s.VariantFlags() or VARIANT_FLAGS
    s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in variant_flags ]
    s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
    num_tests += len(s.tests)
    for t in s.tests:
      t.id = test_id
      test_id += 1

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  if num_tests == 0:
    print "No tests to run."
    return 0

  # Run the tests, either locally or distributed on the network.
  try:
    start_time = time.time()
    progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()

    run_networked = not options.no_network
    if not run_networked:
      print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
      print("Network distribution is only supported on Linux, sorry!")
      run_networked = False
    peers = []
    if run_networked:
      peers = network_execution.GetPeers()
      if not peers:
        print("No connection to distribution server; running tests locally.")
        run_networked = False
      elif len(peers) == 1:
        print("No other peers on the network; running tests locally.")
        run_networked = False
      elif num_tests <= 100:
        print("Less than 100 tests, running them locally.")
        run_networked = False

    if run_networked:
      runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                 ctx, peers, workspace)
    else:
      runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    if runner.terminate:
      return exit_code
    overall_duration = time.time() - start_time
  except KeyboardInterrupt:
    return 1

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)
  return exit_code
예제 #8
0
파일: deopt_fuzzer.py 프로젝트: nuragic/v8
    def _execute(self, args, options, suites):
        print(">>> Running tests for %s.%s" %
              (self.build_config.arch, self.mode_name))

        dist = self._distribution(options)

        # Populate context object.
        timeout = options.timeout
        if timeout == -1:
            # Simulators are slow, therefore allow a longer default timeout.
            if self.build_config.arch in SLOW_ARCHS:
                timeout = 2 * TIMEOUT_DEFAULT
            else:
                timeout = TIMEOUT_DEFAULT

        timeout *= self.mode_options.timeout_scalefactor
        ctx = context.Context(
            self.build_config.arch,
            self.mode_options.execution_mode,
            self.outdir,
            self.mode_options.flags,
            options.verbose,
            timeout,
            options.isolates,
            options.command_prefix,
            options.extra_flags,
            False,  # Keep i18n on by default.
            options.random_seed,
            True,  # No sorting of test cases.
            0,  # Don't rerun failing tests.
            0,  # No use of a rerun-failing-tests maximum.
            False,  # No predictable mode.
            False,  # No no_harness mode.
            False,  # Don't use perf data.
            False)  # Coverage not supported.

        # Find available test suites and read test cases from them.
        variables = {
            "arch": self.build_config.arch,
            "asan": self.build_config.asan,
            "byteorder": sys.byteorder,
            "dcheck_always_on": self.build_config.dcheck_always_on,
            "deopt_fuzzer": True,
            "gc_fuzzer": False,
            "gc_stress": False,
            "gcov_coverage": self.build_config.gcov_coverage,
            "isolates": options.isolates,
            "mode": self.mode_options.status_mode,
            "msan": self.build_config.msan,
            "no_harness": False,
            "no_i18n": self.build_config.no_i18n,
            "no_snap": self.build_config.no_snap,
            "novfp3": False,
            "predictable": self.build_config.predictable,
            "simulator": utils.UseSimulator(self.build_config.arch),
            "simulator_run": False,
            "system": utils.GuessOS(),
            "tsan": self.build_config.tsan,
            "ubsan_vptr": self.build_config.ubsan_vptr,
        }
        num_tests = 0
        test_id = 0

        # Remember test case prototypes for the fuzzing phase.
        test_backup = dict((s, []) for s in suites)

        for s in suites:
            s.ReadStatusFile(variables)
            s.ReadTestCases(ctx)
            if len(args) > 0:
                s.FilterTestCasesByArgs(args)
            s.FilterTestCasesByStatus(False)
            test_backup[s] = s.tests
            analysis_flags = [
                "--deopt-every-n-times",
                "%d" % MAX_DEOPT, "--print-deopt-stress"
            ]
            s.tests = [
                t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests
            ]
            num_tests += len(s.tests)
            for t in s.tests:
                t.id = test_id
                test_id += 1

        if num_tests == 0:
            print "No tests to run."
            return 0

        print(">>> Collection phase")
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        exit_code = runner.Run(options.j)

        print(">>> Analysis phase")
        num_tests = 0
        test_id = 0
        for s in suites:
            test_results = {}
            for t in s.tests:
                for line in t.output.stdout.splitlines():
                    if line.startswith("=== Stress deopt counter: "):
                        test_results[t.path] = MAX_DEOPT - int(
                            line.split(" ")[-1])
            for t in s.tests:
                if t.path not in test_results:
                    print "Missing results for %s" % t.path
            if options.dump_results_file:
                results_dict = dict(
                    (t.path, n) for (t, n) in test_results.iteritems())
                with file(
                        "%s.%d.txt" % (options.dump_results_file, time.time()),
                        "w") as f:
                    f.write(json.dumps(results_dict))

            # Reset tests and redistribute the prototypes from the collection phase.
            s.tests = []
            if options.verbose:
                print "Test distributions:"
            for t in test_backup[s]:
                max_deopt = test_results.get(t.path, 0)
                if max_deopt == 0:
                    continue
                n_deopt = self._calculate_n_tests(max_deopt, options)
                distribution = dist.Distribute(n_deopt, max_deopt)
                if options.verbose:
                    print "%s %s" % (t.path, distribution)
                for i in distribution:
                    fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
                    s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
            num_tests += len(s.tests)
            for t in s.tests:
                t.id = test_id
                test_id += 1

        if num_tests == 0:
            print "No tests to run."
            return 0

        print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        code = runner.Run(options.j)
        return exit_code or code
예제 #9
0
파일: run-tests.py 프로젝트: adzhou/oragle
def Execute(arch, mode, args, options, suites, workspace):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            shell_dir = os.path.join(workspace, options.outdir, mode)
            mode = mode.lower()
        else:
            shell_dir = os.path.join(workspace, options.outdir,
                                     "%s.%s" % (arch, mode))
    shell_dir = os.path.relpath(shell_dir)

    if mode == "optdebug":
        mode = "debug"  # "optdebug" is just an alias.

    # Populate context object.
    mode_flags = MODE_FLAGS[mode]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= TIMEOUT_SCALEFACTOR[mode]
    ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose,
                          timeout, options.isolates, options.command_prefix,
                          options.extra_flags, options.no_i18n)

    # TODO(all): Combine "simulator" and "simulator_run".
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['a64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "isolates": options.isolates,
        "mode": mode,
        "no_i18n": options.no_i18n,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
    }
    all_tests = []
    num_tests = 0
    test_id = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                                  options.slow_tests, options.pass_fail_tests)
        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
        s.tests = [
            t.CopyAddingFlags(v) for t in s.tests
            for v in s.VariantFlags(t, variant_flags)
        ]
        s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    if num_tests == 0:
        print "No tests to run."
        return 0

    # Run the tests, either locally or distributed on the network.
    try:
        start_time = time.time()
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        if options.junitout:
            progress_indicator = progress.JUnitTestProgressIndicator(
                progress_indicator, options.junitout, options.junittestsuite)

        run_networked = not options.no_network
        if not run_networked:
            print("Network distribution disabled, running tests locally.")
        elif utils.GuessOS() != "linux":
            print("Network distribution is only supported on Linux, sorry!")
            run_networked = False
        peers = []
        if run_networked:
            peers = network_execution.GetPeers()
            if not peers:
                print(
                    "No connection to distribution server; running tests locally."
                )
                run_networked = False
            elif len(peers) == 1:
                print("No other peers on the network; running tests locally.")
                run_networked = False
            elif num_tests <= 100:
                print("Less than 100 tests, running them locally.")
                run_networked = False

        if run_networked:
            runner = network_execution.NetworkedRunner(suites,
                                                       progress_indicator, ctx,
                                                       peers, workspace)
        else:
            runner = execution.Runner(suites, progress_indicator, ctx)

        exit_code = runner.Run(options.j)
        if runner.terminate:
            return exit_code
        overall_duration = time.time() - start_time
    except KeyboardInterrupt:
        raise

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)
    return exit_code
예제 #10
0
파일: gc_fuzzer.py 프로젝트: swkhnb/v8
    def _execute(self, args, options, suites):
        print(">>> Running tests for %s.%s" %
              (self.build_config.arch, self.mode_name))

        dist = self._distribution(options)

        # Populate context object.
        timeout = options.timeout
        if timeout == -1:
            # Simulators are slow, therefore allow a longer default timeout.
            if self.build_config.arch in SLOW_ARCHS:
                timeout = 2 * TIMEOUT_DEFAULT
            else:
                timeout = TIMEOUT_DEFAULT

        timeout *= self.mode_options.timeout_scalefactor
        ctx = context.Context(
            self.build_config.arch,
            self.mode_options.execution_mode,
            self.outdir,
            self.mode_options.flags,
            options.verbose,
            timeout,
            options.isolates,
            options.command_prefix,
            options.extra_flags,
            False,  # Keep i18n on by default.
            options.random_seed,
            True,  # No sorting of test cases.
            0,  # Don't rerun failing tests.
            0,  # No use of a rerun-failing-tests maximum.
            False,  # No predictable mode.
            False,  # No no_harness mode.
            False,  # Don't use perf data.
            False)  # Coverage not supported.

        num_tests = self._load_tests(args, options, suites, ctx)
        if num_tests == 0:
            print "No tests to run."
            return 0

        test_backup = dict(map(lambda s: (s, s.tests), suites))

        print('>>> Collection phase')
        for s in suites:
            analysis_flags = [
                # > 100% to not influence default incremental marking, but we need this
                # flag to print reached incremental marking limit.
                '--stress_incremental_marking_percentage',
                '200',
                '--trace_incremental_marking',
            ]
            s.tests = map(
                lambda t: t.CopyAddingFlags(t.variant, analysis_flags),
                s.tests)

        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)
        exit_code = runner.Run(options.j)

        print('>>> Analysis phase')
        test_results = dict()
        for s in suites:
            for t in s.tests:
                max_limit = self._get_max_limit_reached(t)
                if max_limit:
                    test_results[t.path] = max_limit

        if options.dump_results_file:
            with file("%s.%d.txt" % (options.dump_results_file, time.time()),
                      "w") as f:
                f.write(json.dumps(test_results))

        num_tests = 0
        for s in suites:
            s.tests = []
            for t in test_backup[s]:
                max_percent = test_results.get(t.path, 0)
                if not max_percent or max_percent < 1.0:
                    continue
                max_percent = int(max_percent)

                # Calculate distribution.
                im_count = self._calculate_n_tests(max_percent, options)
                im_distribution = dist.Distribute(im_count, max_percent)
                if options.stress_compaction:
                    compaction_count = self._calculate_n_tests(100, options)
                    compaction_distribution = dist.Distribute(
                        compaction_count, 100)
                    distribution = itertools.product(im_distribution,
                                                     compaction_distribution)
                else:
                    # 0 disables the second flag.
                    distribution = itertools.product(im_distribution, [0])

                if options.verbose:
                    distribution = list(distribution)
                    print "%s %s (max=%.02f)" % (t.path, distribution,
                                                 max_percent)
                for im, compaction in distribution:
                    fuzzing_flags = [
                        "--stress_incremental_marking_percentage",
                        str(im),
                        "--stress_compaction_percentage",
                        str(compaction),
                    ]
                    if options.random_seed:
                        fuzzing_flags += [
                            '--fuzzer_random_seed',
                            str(options.fuzzer_random_seed)
                        ]
                    s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
            num_tests += len(s.tests)

        if num_tests == 0:
            print "No tests to run."
            return 0

        print(">>> Fuzzing phase (%d test cases)" % num_tests)
        progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
        runner = execution.Runner(suites, progress_indicator, ctx)

        return runner.Run(options.j) or exit_code