Esempio n. 1
0
def start_runner(runner_command_queue, runner_result_queue,
                 executor_cls, executor_kwargs,
                 executor_browser_cls, executor_browser_kwargs,
                 capture_stdio, stop_flag):
    """Launch a TestRunner in a new process"""

    def send_message(command, *args):
        runner_result_queue.put((command, args))

    def handle_error(e):
        logger.critical(traceback.format_exc())
        stop_flag.set()

    # Ensure that when we start this in a new process we have the global lock
    # in the logging module unlocked
    release_mozlog_lock()

    logger = MessageLogger(send_message)

    with capture.CaptureIO(logger, capture_stdio):
        try:
            browser = executor_browser_cls(**executor_browser_kwargs)
            executor = executor_cls(browser, **executor_kwargs)
            with TestRunner(logger, runner_command_queue, runner_result_queue, executor) as runner:
                try:
                    runner.run()
                except KeyboardInterrupt:
                    stop_flag.set()
                except Exception as e:
                    handle_error(e)
        except Exception as e:
            handle_error(e)
Esempio n. 2
0
def start_runner(runner_command_queue, runner_result_queue,
                 executor_cls, executor_kwargs,
                 executor_browser_cls, executor_browser_kwargs,
                 capture_stdio, stop_flag, recording):
    """Launch a TestRunner in a new process"""

    def send_message(command, *args):
        runner_result_queue.put((command, args))

    def handle_error(e):
        logger.critical(traceback.format_exc())
        stop_flag.set()

    # Ensure that when we start this in a new process we have the global lock
    # in the logging module unlocked
    release_mozlog_lock()

    proc_name = mpcontext.get_context().current_process().name
    logger = structuredlog.StructuredLogger(proc_name)
    logger.add_handler(LogMessageHandler(send_message))

    with capture.CaptureIO(logger, capture_stdio):
        try:
            browser = executor_browser_cls(**executor_browser_kwargs)
            executor = executor_cls(logger, browser, **executor_kwargs)
            with TestRunner(logger, runner_command_queue, runner_result_queue, executor, recording) as runner:
                try:
                    runner.run()
                except KeyboardInterrupt:
                    stop_flag.set()
                except Exception as e:
                    handle_error(e)
        except Exception as e:
            handle_error(e)
Esempio n. 3
0
 def test_captureio_log(self):
     """
     CaptureIO takes in two arguments. The second argument must
     be truthy in order for the code to run. Hence, the string
     "capture_stdio" has been used in this test case.
     """
     with capture.CaptureIO(self.logger, "capture_stdio"):
         print("message 1")
         sys.stdout.write("message 2")
         sys.stderr.write("message 3")
         sys.stdout.write("\xff")
     log = self.handler.items
     messages = [item["message"] for item in log]
     self.assertIn("STDOUT: message 1", messages)
     self.assertIn("STDOUT: message 2", messages)
     self.assertIn("STDERR: message 3", messages)
     self.assertIn(u"STDOUT: \xff", messages)
Esempio n. 4
0
def run_tests(config, test_paths, product, **kwargs):
    """Set up the test environment, load the list of tests to be executed, and
    invoke the remainder of the code to execute tests"""
    mp = mpcontext.get_context()
    if kwargs["instrument_to_file"] is None:
        recorder = instruments.NullInstrument()
    else:
        recorder = instruments.Instrument(kwargs["instrument_to_file"])
    with recorder as recording, capture.CaptureIO(logger,
                                                  not kwargs["no_capture_stdio"],
                                                  mp_context=mp):
        recording.set(["startup"])
        env.do_delayed_imports(logger, test_paths)

        product = products.load_product(config, product, load_cls=True)

        env_extras = product.get_env_extras(**kwargs)

        product.check_args(**kwargs)

        if kwargs["install_fonts"]:
            env_extras.append(FontInstaller(
                logger,
                font_dir=kwargs["font_dir"],
                ahem=os.path.join(test_paths["/"]["tests_path"], "fonts/Ahem.ttf")
            ))

        recording.set(["startup", "load_tests"])

        test_groups = (testloader.TestGroupsFile(logger, kwargs["test_groups_file"])
                       if kwargs["test_groups_file"] else None)

        (test_source_cls,
         test_source_kwargs,
         chunker_kwargs) = testloader.get_test_src(logger=logger,
                                                   test_groups=test_groups,
                                                   **kwargs)
        run_info, test_loader = get_loader(test_paths,
                                           product.name,
                                           run_info_extras=product.run_info_extras(**kwargs),
                                           chunker_kwargs=chunker_kwargs,
                                           test_groups=test_groups,
                                           **kwargs)

        logger.info("Using %i client processes" % kwargs["processes"])

        skipped_tests = 0
        test_total = 0
        unexpected_total = 0

        if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
            logger.critical("Unable to find any tests at the path(s):")
            for path in kwargs["test_list"]:
                logger.critical("  %s" % path)
            logger.critical("Please check spelling and make sure there are tests in the specified path(s).")
            return False
        kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)

        ssl_config = {"type": kwargs["ssl_type"],
                      "openssl": {"openssl_binary": kwargs["openssl_binary"]},
                      "pregenerated": {"host_key_path": kwargs["host_key_path"],
                                       "host_cert_path": kwargs["host_cert_path"],
                                       "ca_cert_path": kwargs["ca_cert_path"]}}

        testharness_timeout_multipler = product.get_timeout_multiplier("testharness",
                                                                       run_info,
                                                                       **kwargs)

        mojojs_path = kwargs["mojojs_path"] if kwargs["enable_mojojs"] else None

        recording.set(["startup", "start_environment"])
        with env.TestEnvironment(test_paths,
                                 testharness_timeout_multipler,
                                 kwargs["pause_after_test"],
                                 kwargs["debug_test"],
                                 kwargs["debug_info"],
                                 product.env_options,
                                 ssl_config,
                                 env_extras,
                                 kwargs["enable_quic"],
                                 mojojs_path) as test_environment:
            recording.set(["startup", "ensure_environment"])
            try:
                test_environment.ensure_started()
            except env.TestEnvironmentError as e:
                logger.critical("Error starting test environment: %s" % e.message)
                raise

            recording.set(["startup"])

            repeat = kwargs["repeat"]
            repeat_count = 0
            repeat_until_unexpected = kwargs["repeat_until_unexpected"]

            while repeat_count < repeat or repeat_until_unexpected:
                repeat_count += 1
                if repeat_until_unexpected:
                    logger.info("Repetition %i" % (repeat_count))
                elif repeat > 1:
                    logger.info("Repetition %i / %i" % (repeat_count, repeat))

                test_count = 0
                unexpected_count = 0

                tests = []
                for test_type in test_loader.test_types:
                    tests.extend(test_loader.tests[test_type])

                try:
                    test_groups = test_source_cls.tests_by_group(tests, **test_source_kwargs)
                except Exception:
                    logger.critical("Loading tests failed")
                    return False

                logger.suite_start(test_groups,
                                   name='web-platform-test',
                                   run_info=run_info,
                                   extra={"run_by_dir": kwargs["run_by_dir"]})
                for test_type in kwargs["test_types"]:
                    logger.info("Running %s tests" % test_type)

                    # WebDriver tests may create and destroy multiple browser
                    # processes as part of their expected behavior. These
                    # processes are managed by a WebDriver server binary. This
                    # obviates the need for wptrunner to provide a browser, so
                    # the NullBrowser is used in place of the "target" browser
                    if test_type == "wdspec":
                        browser_cls = NullBrowser
                    else:
                        browser_cls = product.browser_cls

                    browser_kwargs = product.get_browser_kwargs(logger,
                                                                test_type,
                                                                run_info,
                                                                config=test_environment.config,
                                                                num_test_groups=len(test_groups),
                                                                **kwargs)

                    executor_cls = product.executor_classes.get(test_type)
                    executor_kwargs = product.get_executor_kwargs(logger,
                                                                  test_type,
                                                                  test_environment.config,
                                                                  test_environment.cache_manager,
                                                                  run_info,
                                                                  **kwargs)

                    if executor_cls is None:
                        logger.error("Unsupported test type %s for product %s" %
                                     (test_type, product.name))
                        continue

                    for test in test_loader.disabled_tests[test_type]:
                        logger.test_start(test.id)
                        logger.test_end(test.id, status="SKIP")
                        skipped_tests += 1

                    if test_type == "testharness":
                        run_tests = {"testharness": []}
                        for test in test_loader.tests["testharness"]:
                            if ((test.testdriver and not executor_cls.supports_testdriver) or
                                (test.jsshell and not executor_cls.supports_jsshell)):
                                logger.test_start(test.id)
                                logger.test_end(test.id, status="SKIP")
                                skipped_tests += 1
                            else:
                                run_tests["testharness"].append(test)
                    else:
                        run_tests = test_loader.tests

                    recording.pause()
                    with ManagerGroup("web-platform-tests",
                                      kwargs["processes"],
                                      test_source_cls,
                                      test_source_kwargs,
                                      browser_cls,
                                      browser_kwargs,
                                      executor_cls,
                                      executor_kwargs,
                                      kwargs["rerun"],
                                      kwargs["pause_after_test"],
                                      kwargs["pause_on_unexpected"],
                                      kwargs["restart_on_unexpected"],
                                      kwargs["debug_info"],
                                      not kwargs["no_capture_stdio"],
                                      recording=recording) as manager_group:
                        try:
                            manager_group.run(test_type, run_tests)
                        except KeyboardInterrupt:
                            logger.critical("Main thread got signal")
                            manager_group.stop()
                            raise
                        test_count += manager_group.test_count()
                        unexpected_count += manager_group.unexpected_count()
                recording.set(["after-end"])
                test_total += test_count
                unexpected_total += unexpected_count
                logger.info("Got %i unexpected results" % unexpected_count)
                logger.suite_end()
                if repeat_until_unexpected and unexpected_total > 0:
                    break
                if repeat_count == 1 and len(test_loader.test_ids) == skipped_tests:
                    break

    if test_total == 0:
        if skipped_tests > 0:
            logger.warning("All requested tests were skipped")
        else:
            if kwargs["default_exclude"]:
                logger.info("No tests ran")
                return True
            else:
                logger.critical("No tests ran")
                return False

    if unexpected_total and not kwargs["fail_on_unexpected"]:
        logger.info("Tolerating %s unexpected results" % unexpected_total)
        return True

    return unexpected_total == 0
Esempio n. 5
0
def run_tests(config, test_paths, product, **kwargs):
    """Set up the test environment, load the list of tests to be executed, and
    invoke the remainder of the code to execute tests"""
    mp = mpcontext.get_context()
    if kwargs["instrument_to_file"] is None:
        recorder = instruments.NullInstrument()
    else:
        recorder = instruments.Instrument(kwargs["instrument_to_file"])
    with recorder as recording, capture.CaptureIO(
            logger, not kwargs["no_capture_stdio"], mp_context=mp):
        recording.set(["startup"])
        env.do_delayed_imports(logger, test_paths)

        product = products.Product(config, product)

        env_extras = product.get_env_extras(**kwargs)

        product.check_args(**kwargs)

        if kwargs["install_fonts"]:
            env_extras.append(
                FontInstaller(logger,
                              font_dir=kwargs["font_dir"],
                              ahem=os.path.join(test_paths["/"]["tests_path"],
                                                "fonts/Ahem.ttf")))

        recording.set(["startup", "load_tests"])

        test_groups = (testloader.TestGroupsFile(logger,
                                                 kwargs["test_groups_file"])
                       if kwargs["test_groups_file"] else None)

        (test_source_cls, test_source_kwargs,
         chunker_kwargs) = testloader.get_test_src(logger=logger,
                                                   test_groups=test_groups,
                                                   **kwargs)
        run_info, test_loader = get_loader(
            test_paths,
            product.name,
            run_info_extras=product.run_info_extras(**kwargs),
            chunker_kwargs=chunker_kwargs,
            test_groups=test_groups,
            **kwargs)

        logger.info("Using %i client processes" % kwargs["processes"])

        test_status = TestStatus()
        repeat = kwargs["repeat"]
        test_status.expected_repeat = repeat

        if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
            logger.critical("Unable to find any tests at the path(s):")
            for path in kwargs["test_list"]:
                logger.critical("  %s" % path)
            logger.critical(
                "Please check spelling and make sure there are tests in the specified path(s)."
            )
            return False, test_status
        kwargs["pause_after_test"] = get_pause_after_test(
            test_loader, **kwargs)

        ssl_config = {
            "type": kwargs["ssl_type"],
            "openssl": {
                "openssl_binary": kwargs["openssl_binary"]
            },
            "pregenerated": {
                "host_key_path": kwargs["host_key_path"],
                "host_cert_path": kwargs["host_cert_path"],
                "ca_cert_path": kwargs["ca_cert_path"]
            }
        }

        testharness_timeout_multipler = product.get_timeout_multiplier(
            "testharness", run_info, **kwargs)

        mojojs_path = kwargs["mojojs_path"] if kwargs["enable_mojojs"] else None

        recording.set(["startup", "start_environment"])
        with env.TestEnvironment(test_paths, testharness_timeout_multipler,
                                 kwargs["pause_after_test"],
                                 kwargs["debug_test"], kwargs["debug_info"],
                                 product.env_options, ssl_config, env_extras,
                                 kwargs["enable_webtransport_h3"],
                                 mojojs_path) as test_environment:
            recording.set(["startup", "ensure_environment"])
            try:
                test_environment.ensure_started()
                start_time = datetime.now()
            except env.TestEnvironmentError as e:
                logger.critical("Error starting test environment: %s" % e)
                raise

            recording.set(["startup"])

            max_time = None
            if "repeat_max_time" in kwargs:
                max_time = timedelta(minutes=kwargs["repeat_max_time"])

            repeat_until_unexpected = kwargs["repeat_until_unexpected"]

            # keep track of longest time taken to complete a test suite iteration
            # so that the runs can be stopped to avoid a possible TC timeout.
            longest_iteration_time = timedelta()

            while test_status.repeated_runs < repeat or repeat_until_unexpected:
                # if the next repeat run could cause the TC timeout to be reached,
                # stop now and use the test results we have.
                # Pad the total time by 10% to ensure ample time for the next iteration(s).
                estimate = (datetime.now() + timedelta(
                    seconds=(longest_iteration_time.total_seconds() * 1.1)))
                if not repeat_until_unexpected and max_time and estimate >= start_time + max_time:
                    logger.info(
                        f"Ran {test_status.repeated_runs} of {repeat} iterations."
                    )
                    break

                # begin tracking runtime of the test suite
                iteration_start = datetime.now()
                test_status.repeated_runs += 1
                if repeat_until_unexpected:
                    logger.info(f"Repetition {test_status.repeated_runs}")
                elif repeat > 1:
                    logger.info(
                        f"Repetition {test_status.repeated_runs} / {repeat}")

                iter_success = run_test_iteration(test_status, test_loader,
                                                  test_source_kwargs,
                                                  test_source_cls, run_info,
                                                  recording, test_environment,
                                                  product, kwargs)
                # if there were issues with the suite run(tests not loaded, etc.) return
                if not iter_success:
                    return False, test_status
                recording.set(["after-end"])
                logger.info(
                    f"Got {test_status.unexpected} unexpected results, "
                    f"with {test_status.unexpected_pass} unexpected passes")
                logger.suite_end()

                # Note this iteration's runtime
                iteration_runtime = datetime.now() - iteration_start
                # determine the longest test suite runtime seen.
                longest_iteration_time = max(longest_iteration_time,
                                             iteration_runtime)

                if repeat_until_unexpected and test_status.unexpected > 0:
                    break
                if test_status.repeated_runs == 1 and len(
                        test_loader.test_ids) == test_status.skipped:
                    test_status.all_skipped = True
                    break

    # Return the evaluation of the runs and the number of repeated iterations that were run.
    return evaluate_runs(test_status, kwargs), test_status