Пример #1
0
def Execute(arch, mode, args, options, suites):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.auto_detect:
            # If an output dir with a build was passed, test directly in that
            # directory.
            shell_dir = os.path.join(BASE_DIR, options.outdir)
        elif options.buildbot:
            # TODO(machenbach): Get rid of different output folder location on
            # buildbot. Currently this is capitalized Release and Debug.
            shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
            mode = BuildbotToV8Mode(mode)
        else:
            shell_dir = os.path.join(
                BASE_DIR,
                options.outdir,
                "%s.%s" % (arch, MODES[mode]["output_folder"]),
            )
    if not os.path.exists(shell_dir):
        raise Exception('Could not find shell_dir: "%s"' % shell_dir)

    # Populate context object.
    mode_flags = MODES[mode]["flags"]

    # Simulators are slow, therefore allow a longer timeout.
    if arch in SLOW_ARCHS:
        options.timeout *= 2

    options.timeout *= MODES[mode]["timeout_scalefactor"]

    if options.predictable:
        # Predictable mode is slower.
        options.timeout *= 2

    ctx = context.Context(arch,
                          MODES[mode]["execution_mode"],
                          shell_dir,
                          mode_flags,
                          options.verbose,
                          options.timeout,
                          options.isolates,
                          options.command_prefix,
                          options.extra_flags,
                          options.no_i18n,
                          options.random_seed,
                          options.no_sorting,
                          options.rerun_failures_count,
                          options.rerun_failures_max,
                          options.predictable,
                          options.no_harness,
                          use_perf_data=not options.swarming,
                          sancov_dir=options.sancov_dir)

    # TODO(all): Combine "simulator" and "simulator_run".
    # TODO(machenbach): In GN we can derive simulator run from
    # target_arch != v8_target_arch in the dumped build config.
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
                 'ppc', 'ppc64', 's390', 's390x'] and \
        bool(ARCH_GUESS) and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "gcov_coverage": options.gcov_coverage,
        "isolates": options.isolates,
        "mode": MODES[mode]["status_mode"],
        "no_i18n": options.no_i18n,
        "no_snap": options.no_snap,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
        "tsan": options.tsan,
        "msan": options.msan,
        "dcheck_always_on": options.dcheck_always_on,
        "novfp3": options.novfp3,
        "predictable": options.predictable,
        "byteorder": sys.byteorder,
        "no_harness": options.no_harness,
        "ubsan_vptr": options.ubsan_vptr,
    }
    all_tests = []
    num_tests = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests

        # First filtering by status applying the generic rules (independent of
        # variants).
        s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
                                  options.pass_fail_tests)

        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_gen = s.CreateVariantGenerator(VARIANTS)
        variant_tests = [
            t.CopyAddingFlags(v, flags) for t in s.tests
            for v in variant_gen.FilterVariantsByTest(t)
            for flags in variant_gen.GetFlagSets(t, v)
        ]

        if options.random_seed_stress_count > 1:
            # Duplicate test for random seed stress mode.
            def iter_seed_flags():
                for i in range(0, options.random_seed_stress_count):
                    # Use given random seed for all runs (set by default in execution.py)
                    # or a new random seed if none is specified.
                    if options.random_seed:
                        yield []
                    else:
                        yield ["--random-seed=%d" % RandomSeed()]

            s.tests = [
                t.CopyAddingFlags(t.variant, flags) for t in variant_tests
                for flags in iter_seed_flags()
            ]
        else:
            s.tests = variant_tests

        # Second filtering by status applying the variant-dependent rules.
        s.FilterTestCasesByStatus(options.warn_unused,
                                  options.slow_tests,
                                  options.pass_fail_tests,
                                  variants=True)

        s.tests = ShardTests(s.tests, options)
        num_tests += len(s.tests)

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    # Run the tests, either locally or distributed on the network.
    start_time = time.time()
    progress_indicator = progress.IndicatorNotifier()
    progress_indicator.Register(
        progress.PROGRESS_INDICATORS[options.progress]())
    if options.junitout:
        progress_indicator.Register(
            progress.JUnitTestProgressIndicator(options.junitout,
                                                options.junittestsuite))
    if options.json_test_results:
        progress_indicator.Register(
            progress.JsonTestProgressIndicator(options.json_test_results, arch,
                                               MODES[mode]["execution_mode"],
                                               ctx.random_seed))
    if options.flakiness_results:
        progress_indicator.Register(
            progress.FlakinessTestProgressIndicator(options.flakiness_results))

    run_networked = not options.no_network
    if not run_networked:
        if options.verbose:
            print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
        print("Network distribution is only supported on Linux, sorry!")
        run_networked = False
    peers = []
    if run_networked:
        peers = network_execution.GetPeers()
        if not peers:
            print(
                "No connection to distribution server; running tests locally.")
            run_networked = False
        elif len(peers) == 1:
            print("No other peers on the network; running tests locally.")
            run_networked = False
        elif num_tests <= 100:
            print("Less than 100 tests, running them locally.")
            run_networked = False

    if run_networked:
        runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                   ctx, peers, BASE_DIR)
    else:
        runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    overall_duration = time.time() - start_time

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)

    if num_tests == 0:
        print("Warning: no tests were run!")

    if exit_code == 1 and options.json_test_results:
        print(
            "Force exit code 0 after failures. Json test results file generated "
            "with failure information.")
        exit_code = 0

    if options.sancov_dir:
        # If tests ran with sanitizer coverage, merge coverage files in the end.
        try:
            print "Merging sancov files."
            subprocess.check_call([
                sys.executable,
                join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
                "--coverage-dir=%s" % options.sancov_dir
            ])
        except:
            print >> sys.stderr, "Error: Merging sancov files failed."
            exit_code = 1

    return exit_code
Пример #2
0
def Execute(arch, mode, args, options, suites):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            # TODO(machenbach): Get rid of different output folder location on
            # buildbot. Currently this is capitalized Release and Debug.
            shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
            mode = BuildbotToV8Mode(mode)
        else:
            shell_dir = os.path.join(
                BASE_DIR,
                options.outdir,
                "%s.%s" % (arch, MODES[mode]["output_folder"]),
            )
    if not os.path.exists(shell_dir):
        raise Exception('Could not find shell_dir: "%s"' % shell_dir)

    # Populate context object.
    mode_flags = MODES[mode]["flags"]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= MODES[mode]["timeout_scalefactor"]

    if options.predictable:
        # Predictable mode is slower.
        timeout *= 2

    # TODO(machenbach): Remove temporary verbose output on windows after
    # debugging driver-hung-up on XP.
    verbose_output = (options.verbose
                      or utils.IsWindows() and options.progress == "verbose")
    ctx = context.Context(arch,
                          MODES[mode]["execution_mode"],
                          shell_dir,
                          mode_flags,
                          verbose_output,
                          timeout,
                          options.isolates,
                          options.command_prefix,
                          options.extra_flags,
                          options.no_i18n,
                          options.random_seed,
                          options.no_sorting,
                          options.rerun_failures_count,
                          options.rerun_failures_max,
                          options.predictable,
                          options.no_harness,
                          use_perf_data=not options.swarming)

    # TODO(all): Combine "simulator" and "simulator_run".
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
                 'ppc', 'ppc64'] and \
        ARCH_GUESS and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "ignition": options.ignition,
        "isolates": options.isolates,
        "mode": MODES[mode]["status_mode"],
        "no_i18n": options.no_i18n,
        "no_snap": options.no_snap,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
        "tsan": options.tsan,
        "msan": options.msan,
        "dcheck_always_on": options.dcheck_always_on,
        "novfp3": options.novfp3,
        "predictable": options.predictable,
        "byteorder": sys.byteorder,
    }
    all_tests = []
    num_tests = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                                  options.slow_tests, options.pass_fail_tests)
        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_gen = s.CreateVariantGenerator(VARIANTS)
        variant_tests = [
            t.CopyAddingFlags(v, flags) for t in s.tests
            for v in variant_gen.FilterVariantsByTest(t)
            for flags in variant_gen.GetFlagSets(t, v)
        ]

        if options.random_seed_stress_count > 1:
            # Duplicate test for random seed stress mode.
            def iter_seed_flags():
                for i in range(0, options.random_seed_stress_count):
                    # Use given random seed for all runs (set by default in execution.py)
                    # or a new random seed if none is specified.
                    if options.random_seed:
                        yield []
                    else:
                        yield ["--random-seed=%d" % RandomSeed()]

            s.tests = [
                t.CopyAddingFlags(t.variant, flags) for t in variant_tests
                for flags in iter_seed_flags()
            ]
        else:
            s.tests = variant_tests

        s.tests = ShardTests(s.tests, options)
        num_tests += len(s.tests)

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    # Run the tests, either locally or distributed on the network.
    start_time = time.time()
    progress_indicator = progress.IndicatorNotifier()
    progress_indicator.Register(
        progress.PROGRESS_INDICATORS[options.progress]())
    if options.junitout:
        progress_indicator.Register(
            progress.JUnitTestProgressIndicator(options.junitout,
                                                options.junittestsuite))
    if options.json_test_results:
        progress_indicator.Register(
            progress.JsonTestProgressIndicator(options.json_test_results, arch,
                                               MODES[mode]["execution_mode"],
                                               ctx.random_seed))

    run_networked = not options.no_network
    if not run_networked:
        if verbose_output:
            print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
        print("Network distribution is only supported on Linux, sorry!")
        run_networked = False
    peers = []
    if run_networked:
        peers = network_execution.GetPeers()
        if not peers:
            print(
                "No connection to distribution server; running tests locally.")
            run_networked = False
        elif len(peers) == 1:
            print("No other peers on the network; running tests locally.")
            run_networked = False
        elif num_tests <= 100:
            print("Less than 100 tests, running them locally.")
            run_networked = False

    if run_networked:
        runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                   ctx, peers, BASE_DIR)
    else:
        runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    overall_duration = time.time() - start_time

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)

    if num_tests == 0:
        print("Warning: no tests were run!")

    if exit_code == 1 and options.json_test_results:
        print(
            "Force exit code 0 after failures. Json test results file generated "
            "with failure information.")
        exit_code = 0

    return exit_code
Пример #3
0
def Execute(arch, mode, args, options, suites, workspace):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            shell_dir = os.path.join(workspace, options.outdir, mode)
            mode = mode.lower()
        else:
            shell_dir = os.path.join(workspace, options.outdir,
                                     "%s.%s" % (arch, mode))
    shell_dir = os.path.relpath(shell_dir)

    if mode == "optdebug":
        mode = "debug"  # "optdebug" is just an alias.

    # Populate context object.
    mode_flags = MODE_FLAGS[mode]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= TIMEOUT_SCALEFACTOR[mode]

    if options.predictable:
        # Predictable mode is slower.
        timeout *= 2

    ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose,
                          timeout, options.isolates, options.command_prefix,
                          options.extra_flags, options.no_i18n,
                          options.random_seed, options.no_sorting,
                          options.rerun_failures_count,
                          options.rerun_failures_max, options.predictable)

    # TODO(all): Combine "simulator" and "simulator_run".
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "isolates": options.isolates,
        "mode": mode,
        "no_i18n": options.no_i18n,
        "no_snap": options.no_snap,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
        "tsan": options.tsan,
        "msan": options.msan,
    }
    all_tests = []
    num_tests = 0
    test_id = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                                  options.slow_tests, options.pass_fail_tests)
        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
        s.tests = [
            t.CopyAddingFlags(v) for t in s.tests
            for v in s.VariantFlags(t, variant_flags)
        ]
        s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    if num_tests == 0:
        print "No tests to run."
        return 0

    # Run the tests, either locally or distributed on the network.
    start_time = time.time()
    progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
    if options.junitout:
        progress_indicator = progress.JUnitTestProgressIndicator(
            progress_indicator, options.junitout, options.junittestsuite)
    if options.json_test_results:
        progress_indicator = progress.JsonTestProgressIndicator(
            progress_indicator, options.json_test_results, arch, mode)

    run_networked = not options.no_network
    if not run_networked:
        print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
        print("Network distribution is only supported on Linux, sorry!")
        run_networked = False
    peers = []
    if run_networked:
        peers = network_execution.GetPeers()
        if not peers:
            print(
                "No connection to distribution server; running tests locally.")
            run_networked = False
        elif len(peers) == 1:
            print("No other peers on the network; running tests locally.")
            run_networked = False
        elif num_tests <= 100:
            print("Less than 100 tests, running them locally.")
            run_networked = False

    if run_networked:
        runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                   ctx, peers, workspace)
    else:
        runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    overall_duration = time.time() - start_time

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)
    return exit_code
Пример #4
0
def Execute(arch, mode, args, options, suites, workspace):
  print(">>> Running tests for %s.%s" % (arch, mode))

  shell_dir = options.shell_dir
  if not shell_dir:
    if options.buildbot:
      shell_dir = os.path.join(workspace, options.outdir, mode)
      mode = mode.lower()
    else:
      shell_dir = os.path.join(workspace, options.outdir,
                               "%s.%s" % (arch, mode))
  shell_dir = os.path.relpath(shell_dir)

  # Populate context object.
  mode_flags = MODE_FLAGS[mode]
  timeout = options.timeout
  if timeout == -1:
    # Simulators are slow, therefore allow a longer default timeout.
    if arch in SLOW_ARCHS:
      timeout = 2 * TIMEOUT_DEFAULT;
    else:
      timeout = TIMEOUT_DEFAULT;

  timeout *= TIMEOUT_SCALEFACTOR[mode]
  ctx = context.Context(arch, mode, shell_dir,
                        mode_flags, options.verbose,
                        timeout, options.isolates,
                        options.command_prefix,
                        options.extra_flags)

  # Find available test suites and read test cases from them.
  variables = {
    "mode": mode,
    "arch": arch,
    "system": utils.GuessOS(),
    "isolates": options.isolates,
    "deopt_fuzzer": False,
  }
  all_tests = []
  num_tests = 0
  test_id = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
    all_tests += s.tests
    s.FilterTestCasesByStatus(options.warn_unused)
    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
    variant_flags = s.VariantFlags() or VARIANT_FLAGS
    s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in variant_flags ]
    s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
    num_tests += len(s.tests)
    for t in s.tests:
      t.id = test_id
      test_id += 1

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  if num_tests == 0:
    print "No tests to run."
    return 0

  # Run the tests, either locally or distributed on the network.
  try:
    start_time = time.time()
    progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
    if options.junitout:
      progress_indicator = progress.JUnitTestProgressIndicator(
          progress_indicator, options.junitout, options.junittestsuite)

    run_networked = not options.no_network
    if not run_networked:
      print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
      print("Network distribution is only supported on Linux, sorry!")
      run_networked = False
    peers = []
    if run_networked:
      peers = network_execution.GetPeers()
      if not peers:
        print("No connection to distribution server; running tests locally.")
        run_networked = False
      elif len(peers) == 1:
        print("No other peers on the network; running tests locally.")
        run_networked = False
      elif num_tests <= 100:
        print("Less than 100 tests, running them locally.")
        run_networked = False

    if run_networked:
      runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                 ctx, peers, workspace)
    else:
      runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    if runner.terminate:
      return exit_code
    overall_duration = time.time() - start_time
  except KeyboardInterrupt:
    return 1

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)
  return exit_code