Esempio n. 1
0
    def ListTests(self, context):
        shell = os.path.abspath(os.path.join(context.shell_dir, self.name))
        if utils.IsWindows():
            shell += ".exe"

        output = None
        for i in xrange(3):  # Try 3 times in case of errors.
            cmd = command.Command(cmd_prefix=context.command_prefix,
                                  shell=shell,
                                  args=['--gtest_list_tests'] +
                                  context.extra_flags)
            output = cmd.execute()
            if output.exit_code == 0:
                break
            print "Test executable failed to list the tests (try %d).\n\nCmd:" % i
            print cmd
            print "\nStdout:"
            print output.stdout
            print "\nStderr:"
            print output.stderr
            print "\nExit code: %d" % output.exit_code
        else:
            raise Exception("Test executable failed to list the tests.")

        tests = []
        test_case = ''
        for line in output.stdout.splitlines():
            test_desc = line.strip().split()[0]
            if test_desc.endswith('.'):
                test_case = test_desc
            elif test_case and test_desc:
                test_path = test_case + test_desc
                tests.append(self._create_test(test_path))
        tests.sort(key=lambda t: t.path)
        return tests
Esempio n. 2
0
 def ListTests(self, context):
     shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
     if utils.IsWindows():
         shell += ".exe"
     output = commands.Execute(context.command_prefix + [
         shell, "--allow-natives-syntax", "-e",
         "try { var natives = %ListNatives();"
         "  for (var n in natives) { print(natives[n]); }"
         "} catch(e) {}"
     ] + context.extra_flags)
     if output.exit_code != 0:
         print output.stdout
         print output.stderr
         assert False, "Failed to get natives list."
     tests = []
     for line in output.stdout.strip().split():
         try:
             (name, argc) = line.split(",")
             flags = [
                 "--allow-natives-syntax", "-e",
                 "var NAME = '%s', ARGC = %s;" % (name, argc)
             ]
             test = testcase.TestCase(self, name, flags)
             tests.append(test)
         except:
             # Work-around: If parsing didn't work, it might have been due to output
             # caused by other d8 flags.
             pass
     return tests
Esempio n. 3
0
 def ListTests(self, context):
     shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
     if utils.IsWindows():
         shell += ".exe"
     output = commands.Execute(context.command_prefix + [shell, "--list"] +
                               context.extra_flags)
     if output.exit_code != 0:
         print output.stdout
         print output.stderr
         return []
     tests = []
     for test_desc in output.stdout.strip().split():
         if test_desc.find('<') < 0:
             # Native Client output can contain a few non-test arguments
             # before the tests. Skip these.
             continue
         raw_test, dependency = test_desc.split('<')
         if dependency != '':
             dependency = raw_test.split('/')[0] + '/' + dependency
         else:
             dependency = None
         test = testcase.TestCase(self, raw_test, dependency=dependency)
         tests.append(test)
     tests.sort()
     return tests
Esempio n. 4
0
 def __init__(self, name, root):
     super(CcTestSuite, self).__init__(name, root)
     if utils.IsWindows():
         build_dir = "build"
     else:
         build_dir = "out"
     self.serdes_dir = os.path.normpath(
         os.path.join(root, "..", "..", build_dir, ".serdes"))
Esempio n. 5
0
 def get_command(self):
   params = self._get_cmd_params()
   env = self._get_cmd_env()
   shell = self.get_shell()
   if utils.IsWindows():
     shell += '.exe'
   shell_flags = self._get_shell_flags()
   timeout = self._get_timeout(params)
   return self._create_cmd(shell, shell_flags + params, env, timeout)
Esempio n. 6
0
 def __init__(self, name, root):
     super(CcTestSuite, self).__init__(name, root)
     if utils.IsWindows():
         build_dir = "build"
     else:
         build_dir = "out"
     self.serdes_dir = os.path.normpath(
         os.path.join(root, "..", "..", build_dir, ".serdes"))
     if os.path.exists(self.serdes_dir):
         shutil.rmtree(self.serdes_dir, True)
     os.makedirs(self.serdes_dir)
    def _get_external_symbolizer_option(self):
        external_symbolizer_path = os.path.join(
            self.basedir,
            'third_party',
            'llvm-build',
            'Release+Asserts',
            'bin',
            'llvm-symbolizer',
        )

        if utils.IsWindows():
            # Quote, because sanitizers might confuse colon as option separator.
            external_symbolizer_path = '"%s.exe"' % external_symbolizer_path

        return 'external_symbolizer_path=%s' % external_symbolizer_path
Esempio n. 8
0
 def ListTests(self, context):
     shell = os.path.abspath(os.path.join(context.shell_dir, SHELL))
     if utils.IsWindows():
         shell += ".exe"
     cmd = command.Command(cmd_prefix=context.command_prefix,
                           shell=shell,
                           args=["--list"] + context.extra_flags)
     output = cmd.execute()
     if output.exit_code != 0:
         print cmd
         print output.stdout
         print output.stderr
         return []
     tests = map(self._create_test, output.stdout.strip().split())
     tests.sort(key=lambda t: t.path)
     return tests
Esempio n. 9
0
 def ListTests(self, context):
     shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
     if utils.IsWindows():
         shell += ".exe"
     output = commands.Execute(context.command_prefix + [shell, "--list"] +
                               context.extra_flags)
     if output.exit_code != 0:
         print output.stdout
         print output.stderr
         return []
     tests = []
     for test_desc in output.stdout.strip().split():
         test = testcase.TestCase(self, test_desc)
         tests.append(test)
     tests.sort()
     return tests
Esempio n. 10
0
    def _list_test_filenames(self):
        shell = os.path.abspath(os.path.join(self.test_config.shell_dir,
                                             SHELL))
        if utils.IsWindows():
            shell += ".exe"
        cmd = command.Command(cmd_prefix=self.test_config.command_prefix,
                              shell=shell,
                              args=["--list"] + self.test_config.extra_flags)
        output = cmd.execute()
        # TODO make errors visible (see duplicated code in 'unittests')
        if output.exit_code != 0:
            print(cmd)
            print(output.stdout)
            print(output.stderr)
            return []

        return sorted(output.stdout.strip().split())
Esempio n. 11
0
 def ListTests(self, context):
     shell = os.path.abspath(os.path.join(context.shell_dir, self.SHELL))
     if utils.IsWindows():
         shell += ".exe"
     cmd = command.Command(cmd_prefix=context.command_prefix,
                           shell=shell,
                           args=["--list"] + context.extra_flags)
     output = cmd.execute()
     if output.exit_code != 0:
         print cmd
         print output.stdout
         print output.stderr
         return []
     tests = []
     for test_desc in output.stdout.strip().split():
         test = testcase.TestCase(self, test_desc)
         tests.append(test)
     tests.sort(key=lambda t: t.path)
     return tests
Esempio n. 12
0
 def ListTests(self, context):
     shell = join(context.shell_dir, self.shell())
     if utils.IsWindows():
         shell += '.exe'
     output = commands.Execute([shell, '--list'])
     if output.exit_code != 0:
         print output.stdout
         print output.stderr
         return []
     tests = []
     for test_desc in output.stdout.strip().split():
         raw_test, dependency = test_desc.split('<')
         if dependency != '':
             dependency = raw_test.split('/')[0] + '/' + dependency
         else:
             dependency = None
         test = testcase.TestCase(self, raw_test, dependency=dependency)
         tests.append(test)
     tests.sort()
     return tests
Esempio n. 13
0
  def _list_test_filenames(self):
    shell = os.path.abspath(
      os.path.join(self.test_config.shell_dir, SHELL))
    if utils.IsWindows():
      shell += ".exe"
    cmd = command.Command(
      cmd_prefix=self.test_config.command_prefix,
      shell=shell,
      args=['--list'] + self.test_config.extra_flags)
    output = cmd.execute()

    if output.exit_code != 0:
      print("Test executable failed to list the tests.\n\nCmd:")
      print(cmd)
      print("\nStdout:")
      print(output.stdout)
      print("\nStderr:")
      print(output.stderr)
      print("\nExit code: %d" % output.exit_code)

    return sorted(output.stdout.strip().split())
Esempio n. 14
0
    def _list_test_filenames(self):
        shell = os.path.abspath(
            os.path.join(self.test_config.shell_dir, "wasm_api_tests"))
        if utils.IsWindows():
            shell += ".exe"

        output = None
        for i in range(3):  # Try 3 times in case of errors.
            cmd = command.Command(cmd_prefix=self.test_config.command_prefix,
                                  shell=shell,
                                  args=['--gtest_list_tests'] +
                                  self.test_config.extra_flags)
            output = cmd.execute()
            if output.exit_code == 0:
                break

            print(
                "Test executable failed to list the tests (try %d).\n\nCmd:" %
                i)
            print(cmd)
            print("\nStdout:")
            print(output.stdout)
            print("\nStderr:")
            print(output.stderr)
            print("\nExit code: %d" % output.exit_code)
        else:
            raise Exception("Test executable failed to list the tests.")

        # TODO create an ExecutableTestLoader for refactoring this similar to
        # JSTestLoader.
        test_names = []
        test_case = ''
        for line in output.stdout.splitlines():
            test_desc = line.strip().split()[0]
            if test_desc.endswith('.'):
                test_case = test_desc
            elif test_case and test_desc:
                test_names.append(test_case + test_desc)

        return sorted(test_names)
Esempio n. 15
0
 def __init__(self, name, root):
   super(CcTestSuite, self).__init__(name, root)
   if utils.IsWindows():
     build_dir = "build"
   else:
     build_dir = "out"
Esempio n. 16
0
    def _setup_env(self, options):
        """Setup additional environment variables."""

        # Many tests assume an English interface.
        os.environ['LANG'] = 'en_US.UTF-8'

        external_symbolizer_path = os.path.join(
            base_runner.BASE_DIR,
            'third_party',
            'llvm-build',
            'Release+Asserts',
            'bin',
            'llvm-symbolizer',
        )
        if utils.IsWindows():
            # Quote, because sanitizers might confuse colon as option separator.
            external_symbolizer_path = '"%s.exe"' % external_symbolizer_path
        symbolizer = 'external_symbolizer_path=%s' % external_symbolizer_path

        if self.build_config.asan:
            asan_options = [symbolizer, "allow_user_segv_handler=1"]
            if not utils.GuessOS() in ['macos', 'windows']:
                # LSAN is not available on mac and windows.
                asan_options.append('detect_leaks=1')
            os.environ['ASAN_OPTIONS'] = ":".join(asan_options)

        if options.sancov_dir:
            assert os.path.exists(options.sancov_dir)
            os.environ['ASAN_OPTIONS'] = ":".join([
                'coverage=1',
                'coverage_dir=%s' % options.sancov_dir,
                symbolizer,
                "allow_user_segv_handler=1",
            ])

        if self.build_config.cfi_vptr:
            os.environ['UBSAN_OPTIONS'] = ":".join([
                'print_stacktrace=1',
                'print_summary=1',
                'symbolize=1',
                symbolizer,
            ])

        if self.build_config.ubsan_vptr:
            os.environ['UBSAN_OPTIONS'] = ":".join([
                'print_stacktrace=1',
                symbolizer,
            ])

        if self.build_config.msan:
            os.environ['MSAN_OPTIONS'] = symbolizer

        if self.build_config.tsan:
            suppressions_file = os.path.join(base_runner.BASE_DIR, 'tools',
                                             'sanitizers',
                                             'tsan_suppressions.txt')
            os.environ['TSAN_OPTIONS'] = " ".join([
                symbolizer,
                'suppressions=%s' % suppressions_file,
                'exit_code=0',
                'report_thread_leaks=0',
                'history_size=7',
                'report_destroy_locked=0',
            ])
Esempio n. 17
0
def Execute(arch, mode, args, options, suites):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            # TODO(machenbach): Get rid of different output folder location on
            # buildbot. Currently this is capitalized Release and Debug.
            shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
            mode = BuildbotToV8Mode(mode)
        else:
            shell_dir = os.path.join(
                BASE_DIR,
                options.outdir,
                "%s.%s" % (arch, MODES[mode]["output_folder"]),
            )
    if not os.path.exists(shell_dir):
        raise Exception('Could not find shell_dir: "%s"' % shell_dir)

    # Populate context object.
    mode_flags = MODES[mode]["flags"]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= MODES[mode]["timeout_scalefactor"]

    if options.predictable:
        # Predictable mode is slower.
        timeout *= 2

    # TODO(machenbach): Remove temporary verbose output on windows after
    # debugging driver-hung-up on XP.
    verbose_output = (options.verbose
                      or utils.IsWindows() and options.progress == "verbose")
    ctx = context.Context(arch,
                          MODES[mode]["execution_mode"],
                          shell_dir,
                          mode_flags,
                          verbose_output,
                          timeout,
                          options.isolates,
                          options.command_prefix,
                          options.extra_flags,
                          options.no_i18n,
                          options.random_seed,
                          options.no_sorting,
                          options.rerun_failures_count,
                          options.rerun_failures_max,
                          options.predictable,
                          options.no_harness,
                          use_perf_data=not options.swarming)

    # TODO(all): Combine "simulator" and "simulator_run".
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
                 'ppc', 'ppc64'] and \
        ARCH_GUESS and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "gcov_coverage": options.gcov_coverage,
        "ignition": options.ignition,
        "isolates": options.isolates,
        "mode": MODES[mode]["status_mode"],
        "no_i18n": options.no_i18n,
        "no_snap": options.no_snap,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
        "tsan": options.tsan,
        "msan": options.msan,
        "dcheck_always_on": options.dcheck_always_on,
        "novfp3": options.novfp3,
        "predictable": options.predictable,
        "byteorder": sys.byteorder,
    }
    all_tests = []
    num_tests = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                                  options.slow_tests, options.pass_fail_tests)
        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_gen = s.CreateVariantGenerator(VARIANTS)
        variant_tests = [
            t.CopyAddingFlags(v, flags) for t in s.tests
            for v in variant_gen.FilterVariantsByTest(t)
            for flags in variant_gen.GetFlagSets(t, v)
        ]

        if options.random_seed_stress_count > 1:
            # Duplicate test for random seed stress mode.
            def iter_seed_flags():
                for i in range(0, options.random_seed_stress_count):
                    # Use given random seed for all runs (set by default in execution.py)
                    # or a new random seed if none is specified.
                    if options.random_seed:
                        yield []
                    else:
                        yield ["--random-seed=%d" % RandomSeed()]

            s.tests = [
                t.CopyAddingFlags(t.variant, flags) for t in variant_tests
                for flags in iter_seed_flags()
            ]
        else:
            s.tests = variant_tests

        s.tests = ShardTests(s.tests, options)
        num_tests += len(s.tests)

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    # Run the tests, either locally or distributed on the network.
    start_time = time.time()
    progress_indicator = progress.IndicatorNotifier()
    progress_indicator.Register(
        progress.PROGRESS_INDICATORS[options.progress]())
    if options.junitout:
        progress_indicator.Register(
            progress.JUnitTestProgressIndicator(options.junitout,
                                                options.junittestsuite))
    if options.json_test_results:
        progress_indicator.Register(
            progress.JsonTestProgressIndicator(options.json_test_results, arch,
                                               MODES[mode]["execution_mode"],
                                               ctx.random_seed))

    run_networked = not options.no_network
    if not run_networked:
        if verbose_output:
            print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
        print("Network distribution is only supported on Linux, sorry!")
        run_networked = False
    peers = []
    if run_networked:
        peers = network_execution.GetPeers()
        if not peers:
            print(
                "No connection to distribution server; running tests locally.")
            run_networked = False
        elif len(peers) == 1:
            print("No other peers on the network; running tests locally.")
            run_networked = False
        elif num_tests <= 100:
            print("Less than 100 tests, running them locally.")
            run_networked = False

    if run_networked:
        runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                   ctx, peers, BASE_DIR)
    else:
        runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    overall_duration = time.time() - start_time

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)

    if num_tests == 0:
        print("Warning: no tests were run!")

    if exit_code == 1 and options.json_test_results:
        print(
            "Force exit code 0 after failures. Json test results file generated "
            "with failure information.")
        exit_code = 0

    return exit_code
Esempio n. 18
0
def Execute(arch, mode, args, options, suites, workspace):
    print(">>> Running tests for %s.%s" % (arch, mode))

    shell_dir = options.shell_dir
    if not shell_dir:
        if options.buildbot:
            # TODO(machenbach): Get rid of different output folder location on
            # buildbot. Currently this is capitalized Release and Debug.
            shell_dir = os.path.join(workspace, options.outdir, mode)
            mode = mode.lower()
        else:
            shell_dir = os.path.join(
                workspace,
                options.outdir,
                "%s.%s" % (arch, MODES[mode]["output_folder"]),
            )
    shell_dir = os.path.relpath(shell_dir)

    # Populate context object.
    mode_flags = MODES[mode]["flags"]
    timeout = options.timeout
    if timeout == -1:
        # Simulators are slow, therefore allow a longer default timeout.
        if arch in SLOW_ARCHS:
            timeout = 2 * TIMEOUT_DEFAULT
        else:
            timeout = TIMEOUT_DEFAULT

    timeout *= MODES[mode]["timeout_scalefactor"]

    if options.predictable:
        # Predictable mode is slower.
        timeout *= 2

    # TODO(machenbach): Remove temporary verbose output on windows after
    # debugging driver-hung-up on XP.
    verbose_output = (options.verbose
                      or utils.IsWindows() and options.progress == "verbose")
    ctx = context.Context(
        arch, MODES[mode]["execution_mode"], shell_dir, mode_flags,
        verbose_output, timeout, options.isolates, options.command_prefix,
        options.extra_flags, options.no_i18n, options.random_seed,
        options.no_sorting, options.rerun_failures_count,
        options.rerun_failures_max, options.predictable, options.no_harness)

    # TODO(all): Combine "simulator" and "simulator_run".
    simulator_run = not options.dont_skip_simulator_slow_tests and \
        arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \
                 'ppc', 'ppc64'] and \
        ARCH_GUESS and arch != ARCH_GUESS
    # Find available test suites and read test cases from them.
    variables = {
        "arch": arch,
        "asan": options.asan,
        "deopt_fuzzer": False,
        "gc_stress": options.gc_stress,
        "isolates": options.isolates,
        "mode": MODES[mode]["status_mode"],
        "no_i18n": options.no_i18n,
        "no_snap": options.no_snap,
        "simulator_run": simulator_run,
        "simulator": utils.UseSimulator(arch),
        "system": utils.GuessOS(),
        "tsan": options.tsan,
        "msan": options.msan,
        "dcheck_always_on": options.dcheck_always_on,
        "byteorder": sys.byteorder,
    }
    all_tests = []
    num_tests = 0
    test_id = 0
    for s in suites:
        s.ReadStatusFile(variables)
        s.ReadTestCases(ctx)
        if len(args) > 0:
            s.FilterTestCasesByArgs(args)
        all_tests += s.tests
        s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                                  options.slow_tests, options.pass_fail_tests)
        if options.cat:
            verbose.PrintTestSource(s.tests)
            continue
        variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
        s.tests = [
            t.CopyAddingFlags(v) for t in s.tests
            for v in s.VariantFlags(t, variant_flags)
        ]
        s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
        num_tests += len(s.tests)
        for t in s.tests:
            t.id = test_id
            test_id += 1

    if options.cat:
        return 0  # We're done here.

    if options.report:
        verbose.PrintReport(all_tests)

    if num_tests == 0:
        print "No tests to run."
        return 0

    # Run the tests, either locally or distributed on the network.
    start_time = time.time()
    progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
    if options.junitout:
        progress_indicator = progress.JUnitTestProgressIndicator(
            progress_indicator, options.junitout, options.junittestsuite)
    if options.json_test_results:
        progress_indicator = progress.JsonTestProgressIndicator(
            progress_indicator, options.json_test_results, arch,
            MODES[mode]["execution_mode"])

    run_networked = not options.no_network
    if not run_networked:
        print("Network distribution disabled, running tests locally.")
    elif utils.GuessOS() != "linux":
        print("Network distribution is only supported on Linux, sorry!")
        run_networked = False
    peers = []
    if run_networked:
        peers = network_execution.GetPeers()
        if not peers:
            print(
                "No connection to distribution server; running tests locally.")
            run_networked = False
        elif len(peers) == 1:
            print("No other peers on the network; running tests locally.")
            run_networked = False
        elif num_tests <= 100:
            print("Less than 100 tests, running them locally.")
            run_networked = False

    if run_networked:
        runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                                   ctx, peers, workspace)
    else:
        runner = execution.Runner(suites, progress_indicator, ctx)

    exit_code = runner.Run(options.j)
    overall_duration = time.time() - start_time

    if options.time:
        verbose.PrintTestDurations(suites, overall_duration)
    return exit_code