示例#1
0
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'

        # disable wss server. need to install pyOpenSSL on buildbots.
        # self._websocket_secure_server = websocket_server.PyWebSocket(
        #        options.results_directory, use_tls=True, port=9323)

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._test_is_slow)
示例#2
0
文件: manager.py 项目: DrovioHQ/Qt
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join([
            'virtual', 'stable', 'http'])
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self.ARCHIVED_RESULTS_LIMIT = 25
        self._http_server_started = False
        self._wptserve_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
示例#3
0
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = "http" + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = "perf"
        self.WEBSOCKET_SUBDIR = "websocket" + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = "LayoutTests"
        self._http_server_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(
            self._options, self._port, self._printer, self._results_directory, self._test_is_slow
        )
示例#4
0
文件: manager.py 项目: kseo/webkit
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'

        # disable wss server. need to install pyOpenSSL on buildbots.
        # self._websocket_secure_server = websocket_server.PyWebSocket(
        #        options.results_directory, use_tls=True, port=9323)

        self._paths = set()
        self._test_names = None
        self._retrying = False
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._expectations, self._test_is_slow)
示例#5
0
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: An object implementing platform-specific functionality.
          options: An options argument which contains command line options.
          printer: A Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self.ARCHIVED_RESULTS_LIMIT = 25
        self._http_server_started = False
        self._wptserve_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._test_is_slow)
示例#6
0
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: An object implementing platform-specific functionality.
          options: An options argument which contains command line options.
          printer: A Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self.ARCHIVED_RESULTS_LIMIT = 25
        self._http_server_started = False
        self._wptserve_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
    def _runner(self, port=None):
        # FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need.
        options = run_webkit_tests.parse_args(['--platform', 'test-mac-snowleopard'])[0]
        options.child_processes = '1'

        host = MockHost()
        port = port or host.port_factory.get(options.platform, options=options)
        return LayoutTestRunner(options, port, FakePrinter(), port.results_directory())
示例#8
0
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None
        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
示例#9
0
    def __init__(self, port, options, printer):
        """Initializes test runner data structures.

        Args:
            port: An object implementing platform-specific functionality.
            options: An options argument which contains command line options.
            printer: A Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer

        self._expectations = None
        self._http_server_started = False
        self._wptserve_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._path_finder = PathFinder(port.host.filesystem)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
示例#10
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = "http" + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = "perf"
        self.WEBSOCKET_SUBDIR = "websocket" + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = "LayoutTests"
        self._http_server_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(
            self._options, self._port, self._printer, self._results_directory, self._test_is_slow
        )

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
        tests_to_run = [test for test in test_names if test not in tests_to_skip]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == "natural":
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == "random":
            random.shuffle(tests_to_run)
        elif self._options.order == "random-seeded":
            rnd = random.Random()
            rnd.seed(4)  # http://xkcd.com/221/
            rnd.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
        self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(
            test_file,
            self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file),
            should_add_missing_baselines=(
                self._options.new_test_results and not self._test_is_expected_missing(test_file)
            ),
        )

    def _test_requires_lock(self, test_file):
        """Return True if the test needs to be locked when
        running multiple copies of NRWTs. Perf tests are locked
        because heavy load caused by running other tests in parallel
        might cause some of them to timeout."""
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_expected_missing(self, test_file):
        expectations = self._expectations.model().get_expectations(test_file)
        return (
            test_expectations.MISSING in expectations
            or test_expectations.NEEDS_REBASELINE in expectations
            or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
        )

    def _test_is_slow(self, test_file):
        return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)

    def needs_servers(self, test_names):
        return any(self._test_requires_lock(test_name) for test_name in test_names)

    def _set_up_run(self, test_names):
        self._printer.write_update("Checking build ...")
        if self._options.build:
            exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
            if exit_code:
                _log.error("Build check failed")
                return exit_code

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        if self._options.pixel_tests:
            self._printer.write_update("Starting pixel test helper ...")
            self._port.start_helper()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
            if exit_code:
                self._port.stop_helper()
                return exit_code

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        self._port.setup_test_run()
        return test_run_results.OK_EXIT_STATUS

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(
            len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations
        )

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical("No tests to run.")
            return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            initial_results = self._run_tests(
                tests_to_run,
                tests_to_skip,
                self._options.repeat_each,
                self._options.iterations,
                self._port.num_workers(int(self._options.child_processes)),
                retrying=False,
            )

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted or initial_results.keyboard_interrupted
            )

            tests_to_retry = self._tests_to_retry(initial_results)
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                _log.info("")
                _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
                _log.info("")
                retry_results = self._run_tests(
                    tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1, num_workers=1, retrying=True
                )

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry
        )
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True,
        )

        exit_code = summarized_failing_results["num_regressions"]
        if not self._options.dry_run:
            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (
                    exit_code or (self._options.full_results_html and initial_results.total_failures)
                ):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
        return test_run_results.RunDetails(
            exit_code,
            summarized_full_results,
            summarized_failing_results,
            initial_results,
            retry_results,
            enabled_pixel_tests_in_retry,
        )

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):

        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retrying)

    def _start_servers(self, tests_to_run):
        if self._port.requires_http_server() or any(self._is_http_test(test) for test in tests_to_run):
            self._printer.write_update("Starting HTTP server ...")
            self._port.start_http_server(number_of_drivers=self._options.max_locked_shards)
            self._http_server_started = True

        if any(self._is_websocket_test(test) for test in tests_to_run):
            self._printer.write_update("Starting WebSocket server ...")
            self._port.start_websocket_server()
            self._websockets_server_started = True

    def _stop_servers(self):
        if self._http_server_started:
            self._printer.write_update("Stopping HTTP server ...")
            self._http_server_started = False
            self._port.stop_http_server()
        if self._websockets_server_started:
            self._printer.write_update("Stopping WebSocket server ...")
            self._websockets_server_started = False
            self._port.stop_websocket_server()

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        self._port.start_helper()

        return True

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if result.type != test_expectations.CRASH:
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append([test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" % self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))

        # Port specific clean-up.
        self._port.clobber_old_port_specific_results()

    def _tests_to_retry(self, run_results):
        return [
            result.test_name
            for result in run_results.unexpected_results_by_name.values()
            if result.type != test_expectations.PASS
        ]

    def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results):
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory, "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
        json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)

        full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
        # We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
        json_results_generator.write_json(
            self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS"
        )

        _log.debug("Finished writing JSON files.")

    def _upload_json_files(self):
        if not self._options.test_results_server:
            return

        if not self._options.master_name:
            _log.error("--test-results-server was set, but --master-name was not.  Not uploading JSON files.")
            return

        _log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
        attrs = [
            ("builder", self._options.builder_name),
            ("testtype", "layout-tests"),
            ("master", self._options.master_name),
        ]

        files = [
            (file, self._filesystem.join(self._results_directory, file))
            for file in ["failing_results.json", "full_results.json", "times_ms.json"]
        ]

        url = "http://%s/testfile/upload" % self._options.test_results_server
        # Set uploading timeout in case appengine server is having problems.
        # 120 seconds are more than enough to upload test results.
        uploader = FileUploader(url, 120)
        try:
            response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
            if response:
                if response.code == 200:
                    _log.debug("JSON uploaded.")
                else:
                    _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
            else:
                _log.error("JSON upload failed; no response returned")
        except Exception, err:
            _log.error("Upload failed: %s" % err)
示例#11
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'

        # disable wss server. need to install pyOpenSSL on buildbots.
        # self._websocket_secure_server = websocket_server.PyWebSocket(
        #        options.results_directory, use_tls=True, port=9323)

        self._paths = set()
        self._test_names = None
        self._retrying = False
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._expectations, self._test_is_slow)

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _http_tests(self):
        return set(test for test in self._test_names
                   if self._is_http_test(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (
            self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self):
        tests_to_skip = self._finder.skip_tests(self._paths, self._test_names,
                                                self._expectations,
                                                self._http_tests())
        self._test_names = [
            test for test in self._test_names if test not in tests_to_skip
        ]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            self._test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(self._test_names)

        self._test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            self._test_names)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        if self._options.repeat_each > 1:
            list_with_repetitions = []
            for test in self._test_names:
                list_with_repetitions += ([test] * self._options.repeat_each)
            self._test_names = list_with_repetitions

        if self._options.iterations > 1:
            self._test_names = self._test_names * self._options.iterations

        iterations = self._options.repeat_each * self._options.iterations
        return ResultSummary(self._expectations, set(self._test_names),
                             iterations, tests_to_skip)

    def _test_input_for_file(self, test_file):
        return TestInput(
            test_file, self._options.slow_time_out_ms
            if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file))

    def _test_requires_lock(self, test_file):
        """Return True if the test needs to be locked when
        running multiple copies of NRWTs. Perf tests are locked
        because heavy load caused by running other tests in parallel
        might cause some of them to timeout."""
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_slow(self, test_file):
        return self._expectations.has_modifier(test_file,
                                               test_expectations.SLOW)

    def needs_servers(self):
        return any(
            self._test_requires_lock(test_name)
            for test_name in self._test_names) and self._options.http

    def _set_up_run(self):
        self._printer.write_update("Checking build ...")
        if not self._port.check_build(self.needs_servers()):
            _log.error("Build check failed")
            return False

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        if self._options.pixel_tests:
            self._printer.write_update("Starting pixel test helper ...")
            self._port.start_helper()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps(self.needs_servers()):
                self._port.stop_helper()
                return False

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        self._port.setup_test_run()
        return True

    def run(self, args):
        """Run all our tests on all our test files and return the number of unexpected results (0 == success)."""
        self._printer.write_update("Collecting tests ...")
        try:
            self._paths, self._test_names = self._collect_tests(args)
        except IOError as exception:
            # This is raised if --test-list doesn't exist
            return -1

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, self._test_names)

        num_all_test_files_found = len(self._test_names)
        result_summary = self._prepare_lists()

        # Check to make sure we're not skipping every test.
        if not self._test_names:
            _log.critical('No tests to run.')
            return -1

        self._printer.print_found(num_all_test_files_found,
                                  len(self._test_names),
                                  self._options.repeat_each,
                                  self._options.iterations)
        self._printer.print_expected(
            result_summary, self._expectations.get_tests_with_result_type)

        if not self._set_up_run():
            return -1

        start_time = time.time()

        interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = \
            self._run_tests(self._test_names, result_summary, int(self._options.child_processes))

        # We exclude the crashes from the list of results to retry, because
        # we want to treat even a potentially flaky crash as an error.

        failures = self._get_failures(
            result_summary,
            include_crashes=self._port.should_retry_crashes(),
            include_missing=False)
        retry_summary = result_summary
        while (len(failures) and self._options.retry_failures
               and not self._retrying and not interrupted
               and not keyboard_interrupted):
            _log.info('')
            _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
            _log.info('')
            self._retrying = True
            retry_summary = ResultSummary(self._expectations, failures.keys(),
                                          1, set())
            # Note that we intentionally ignore the return value here.
            self._run_tests(failures.keys(), retry_summary, 1)
            failures = self._get_failures(retry_summary,
                                          include_crashes=True,
                                          include_missing=True)

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._look_for_new_crash_logs(result_summary, start_time)
        self._look_for_new_crash_logs(retry_summary, start_time)
        self._clean_up_run()

        unexpected_results = summarize_results(self._port,
                                               self._expectations,
                                               result_summary,
                                               retry_summary,
                                               individual_test_timings,
                                               only_unexpected=True,
                                               interrupted=interrupted)

        self._printer.print_results(end_time - start_time, thread_timings,
                                    test_timings, individual_test_timings,
                                    result_summary, unexpected_results)

        # Re-raise a KeyboardInterrupt if necessary so the caller can handle it.
        if keyboard_interrupted:
            raise KeyboardInterrupt

        # FIXME: remove record_results. It's just used for testing. There's no need
        # for it to be a commandline argument.
        if (self._options.record_results and not self._options.dry_run
                and not keyboard_interrupted):
            self._port.print_leaks_summary()
            # Write the same data to log files and upload generated JSON files to appengine server.
            summarized_results = summarize_results(self._port,
                                                   self._expectations,
                                                   result_summary,
                                                   retry_summary,
                                                   individual_test_timings,
                                                   only_unexpected=False,
                                                   interrupted=interrupted)
            self._upload_json_files(summarized_results, result_summary,
                                    individual_test_timings)

        # Write the summary to disk (results.html) and display it if requested.
        if not self._options.dry_run:
            self._copy_results_html_file()
            if self._options.show_results:
                self._show_results_html_file(result_summary)

        return self._port.exit_code_from_summarized_results(unexpected_results)

    def _run_tests(self, tests, result_summary, num_workers):
        test_inputs = [self._test_input_for_file(test) for test in tests]
        needs_http = self._port.requires_http_server() or any(
            self._is_http_test(test) for test in tests)
        needs_websockets = any(self._is_websocket_test(test) for test in tests)
        return self._runner.run_tests(test_inputs, self._expectations,
                                      result_summary, num_workers, needs_http,
                                      needs_websockets, self._retrying)

    def _clean_up_run(self):
        """Restores the system after we're done running tests."""
        _log.debug("flushing stdout")
        sys.stdout.flush()
        _log.debug("flushing stderr")
        sys.stderr.flush()
        _log.debug("stopping helper")
        self._port.stop_helper()
        _log.debug("cleaning up port")
        self._port.clean_up_test_run()

    def _look_for_new_crash_logs(self, result_summary, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           result_summary: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in result_summary.unexpected_results.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" %
                                   self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(
                    self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(
                    self._filesystem.join(self._results_directory, dirname))

    def _get_failures(self, result_summary, include_crashes, include_missing):
        """Filters a dict of results and returns only the failures.

        Args:
          result_summary: the results of the test run
          include_crashes: whether crashes are included in the output.
            We use False when finding the list of failures to retry
            to see if the results were flaky. Although the crashes may also be
            flaky, we treat them as if they aren't so that they're not ignored.
        Returns:
          a dict of files -> results
        """
        failed_results = {}
        for test, result in result_summary.unexpected_results.iteritems():
            if (result.type == test_expectations.PASS
                    or (result.type == test_expectations.CRASH
                        and not include_crashes)
                    or (result.type == test_expectations.MISSING
                        and not include_missing)):
                continue
            failed_results[test] = result.type

        return failed_results

    def _char_for_result(self, result):
        result = result.lower()
        if result in TestExpectations.EXPECTATIONS:
            result_enum_value = TestExpectations.EXPECTATIONS[result]
        else:
            result_enum_value = TestExpectations.MODIFIERS[result]
        return json_layout_results_generator.JSONLayoutResultsGenerator.FAILURE_TO_CHAR[
            result_enum_value]

    def _upload_json_files(self, summarized_results, result_summary,
                           individual_test_timings):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          unexpected_results: dict of unexpected results
          summarized_results: dict of results
          result_summary: full summary object
          individual_test_timings: list of test times (used by the flakiness
            dashboard).
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        times_trie = json_results_generator.test_timings_trie(
            self._port, individual_test_timings)
        times_json_path = self._filesystem.join(self._results_directory,
                                                "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem,
                                          summarized_results,
                                          full_results_path,
                                          callback="ADD_RESULTS")

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            BUILDER_BASE_URL, individual_test_timings, self._expectations,
            result_summary, self._test_names,
            self._options.test_results_server, "layout-tests",
            self._options.master_name)

        _log.debug("Finished writing JSON files.")

        json_files = [
            "incremental_results.json", "full_results.json", "times_ms.json"
        ]

        generator.upload_json_files(json_files)

        incremental_results_path = self._filesystem.join(
            self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)

    def _num_digits(self, num):
        """Returns the number of digits needed to represent the length of a
        sequence."""
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def _copy_results_html_file(self):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast',
                                                    'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # FIXME: What should we do if this doesn't exist (e.g., in unit tests)?
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(
                results_file,
                self._filesystem.join(self._results_directory, "results.html"))

    def _show_results_html_file(self, result_summary):
        """Shows the results.html page."""
        if self._options.full_results_html:
            test_files = result_summary.failures.keys()
        else:
            unexpected_failures = self._get_failures(result_summary,
                                                     include_crashes=True,
                                                     include_missing=True)
            test_files = unexpected_failures.keys()

        if not len(test_files):
            return

        results_filename = self._filesystem.join(self._results_directory,
                                                 "results.html")
        self._port.show_results_html_file(results_filename)
示例#12
0
class Manager(object):
    """A class for managing running a series of layout tests."""

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: An object implementing platform-specific functionality.
          options: An options argument which contains command line options.
          printer: A Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self.ARCHIVED_RESULTS_LIMIT = 25
        self._http_server_started = False
        self._wptserve_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        running_all_tests = False
        try:
            paths, test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            num_workers = self._port.num_workers(int(self._options.child_processes))

            initial_results = self._run_tests(
                tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                num_workers)

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted or initial_results.keyboard_interrupted)

            tests_to_retry = self._tests_to_retry(initial_results)
            all_retry_results = []
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                for retry_attempt in xrange(1, self._options.num_retries + 1):
                    if not tests_to_retry:
                        break

                    _log.info('')
                    _log.info('Retrying %s, attempt %d of %d...',
                              grammar.pluralize('unexpected failure', len(tests_to_retry)),
                              retry_attempt, self._options.num_retries)

                    retry_results = self._run_tests(tests_to_retry,
                                                    tests_to_skip=set(),
                                                    repeat_each=1,
                                                    iterations=1,
                                                    num_workers=num_workers,
                                                    retry_attempt=retry_attempt)
                    all_retry_results.append(retry_results)

                    tests_to_retry = self._tests_to_retry(retry_results)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry, only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
            exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)

            if self._options.write_full_results_to:
                self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
                                          self._options.write_full_results_to)

            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = test_run_results.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)

        return test_run_results.RunDetails(
            exit_code, summarized_full_results, summarized_failing_results,
            initial_results, all_retry_results, enabled_pixel_tests_in_retry)

    def _collect_tests(self, args):
        return self._finder.find_tests(args, test_list=self._options.test_list,
                                       fastest_percentile=self._options.fastest)

    def _is_http_test(self, test):
        return (
            test.startswith(self.HTTP_SUBDIR) or
            self._is_websocket_test(test) or
            self._port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR in test
        )

    def _is_inspector_test(self, test):
        return self.INSPECTOR_SUBDIR in test

    def _is_websocket_test(self, test):
        if self._port.should_use_wptserve(test):
            return False

        return self.WEBSOCKET_SUBDIR in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
        tests_to_run = [test for test in test_names if test not in tests_to_skip]

        if not tests_to_run:
            return tests_to_run, tests_to_skip

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            tests_to_run.sort()
            random.Random(self._options.seed).shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
        self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(test_file,
                         self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
                         self._test_requires_lock(test_file),
                         should_add_missing_baselines=(self._options.new_test_results and
                                                       not self._test_is_expected_missing(test_file)))

    def _test_requires_lock(self, test_file):
        """Return True if the test needs to be locked when running multiple
        instances of this test runner.

        Perf tests are locked because heavy load caused by running other
        tests in parallel might cause some of them to time out.
        """
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_expected_missing(self, test_file):
        expectations = self._expectations.model().get_expectations(test_file)
        return (test_expectations.MISSING in expectations or
                test_expectations.NEEDS_REBASELINE in expectations or
                test_expectations.NEEDS_MANUAL_REBASELINE in expectations)

    def _test_is_slow(self, test_file):
        return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)

    def _needs_servers(self, test_names):
        return any(self._test_requires_lock(test_name) for test_name in test_names)

    def _rename_results_folder(self):
        try:
            timestamp = time.strftime(
                "%Y-%m-%d-%H-%M-%S", time.localtime(
                    self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
        except (IOError, OSError) as e:
            # It might be possible that results.html was not generated in previous run, because the test
            # run was interrupted even before testing started. In those cases, don't archive the folder.
            # Simply override the current folder contents with new results.
            import errno
            if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
                self._printer.write_update("No results.html file found in previous run, skipping it.")
            return None
        archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
        archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
        self._filesystem.move(self._results_directory, archived_path)

    def _delete_dirs(self, dir_list):
        for dir in dir_list:
            self._filesystem.rmtree(dir)

    def _limit_archived_results_count(self):
        results_directory_path = self._filesystem.dirname(self._results_directory)
        file_list = self._filesystem.listdir(results_directory_path)
        results_directories = []
        for dir in file_list:
            file_path = self._filesystem.join(results_directory_path, dir)
            if self._filesystem.isdir(file_path) and self._results_directory in file_path:
                results_directories.append(file_path)
        results_directories.sort(key=lambda x: self._filesystem.mtime(x))
        self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
        self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])

    def _set_up_run(self, test_names):
        self._printer.write_update("Checking build ...")
        if self._options.build:
            exit_code = self._port.check_build(self._needs_servers(test_names), self._printer)
            if exit_code:
                _log.error("Build check failed")
                return exit_code

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            exit_code = self._port.check_sys_deps(self._needs_servers(test_names))
            if exit_code:
                return exit_code

        if self._options.clobber_old_results:
            self._clobber_old_results()
        elif self._filesystem.exists(self._results_directory):
            self._limit_archived_results_count()
            # Rename the existing results folder for archiving.
            self._rename_results_folder()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        self._port.setup_test_run()
        return test_run_results.OK_EXIT_STATUS

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
                   num_workers, retry_attempt=0):

        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return self._runner.run_tests(self._expectations, test_inputs,
                                      tests_to_skip, num_workers, retry_attempt)

    def _start_servers(self, tests_to_run):
        if self._port.is_wptserve_enabled() and any(self._port.is_wptserve_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WPTServe ...')
            self._port.start_wptserve()
            self._wptserve_started = True

        if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test))
                                                    for test in tests_to_run):
            self._printer.write_update('Starting HTTP server ...')
            self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
            self._http_server_started = True

        if any(self._is_websocket_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WebSocket server ...')
            self._port.start_websocket_server()
            self._websockets_server_started = True

    def _stop_servers(self):
        if self._wptserve_started:
            self._printer.write_update('Stopping WPTServe ...')
            self._wptserve_started = False
            self._port.stop_wptserve()
        if self._http_server_started:
            self._printer.write_update('Stopping HTTP server ...')
            self._http_server_started = False
            self._port.stop_http_server()
        if self._websockets_server_started:
            self._printer.write_update('Stopping WebSocket server ...')
            self._websockets_server_started = False
            self._port.stop_websocket_server()

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False
        self._options.pixel_tests = True
        return True

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Looks for and writes new crash logs, at the end of the test run.

        Since crash logs can take a long time to be written out if the system is
        under stress, do a second pass at the end of the test run.

        Args:
          run_results: The results of the test run.
          start_time: Time the tests started at. We're looking for crash
              logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if result.type != test_expectations.CRASH:
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                if failure.has_log:
                    continue
                crashed_processes.append([test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._filesystem, self._port, self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._filesystem, self._port, self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

    def _clobber_old_results(self):
        dir_above_results_path = self._filesystem.dirname(self._results_directory)
        self._printer.write_update("Clobbering old results in %s" % dir_above_results_path)
        if not self._filesystem.exists(dir_above_results_path):
            return
        file_list = self._filesystem.listdir(dir_above_results_path)
        results_directories = []
        for dir in file_list:
            file_path = self._filesystem.join(dir_above_results_path, dir)
            if self._filesystem.isdir(file_path) and self._results_directory in file_path:
                results_directories.append(file_path)
        self._delete_dirs(results_directories)

        # Port specific clean-up.
        self._port.clobber_old_port_specific_results()

    def _tests_to_retry(self, run_results):
        # TODO(ojan): This should also check that result.type != test_expectations.MISSING
        # since retrying missing expectations is silly. But that's a bit tricky since we
        # only consider the last retry attempt for the count of unexpected regressions.
        return [result.test_name for result in run_results.unexpected_results_by_name.values(
        ) if result.type != test_expectations.PASS]

    def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results, running_all_tests):
        _log.debug("Writing JSON files in %s.", self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)

        # Save out the times data so we can use it for --fastest in the future.
        if running_all_tests:
            bot_test_times_path = self._port.bot_test_times_path()
            self._filesystem.maybe_make_directory(self._filesystem.dirname(bot_test_times_path))
            json_results_generator.write_json(self._filesystem, times_trie, bot_test_times_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory, "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
        json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)

        full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
        # We write failing_results.json out as jsonp because we need to load it
        # from a file url for results.html and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")

        if self._options.json_test_results:
            json_results_generator.write_json(self._filesystem, summarized_failing_results, self._options.json_test_results)

        _log.debug("Finished writing JSON files.")

    def _upload_json_files(self):
        if not self._options.test_results_server:
            return

        if not self._options.master_name:
            _log.error("--test-results-server was set, but --master-name was not.  Not uploading JSON files.")
            return

        _log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
        attrs = [("builder", self._options.builder_name),
                 ("testtype", self._options.step_name),
                 ("master", self._options.master_name)]

        files = [(file, self._filesystem.join(self._results_directory, file))
                 for file in ["failing_results.json", "full_results.json", "times_ms.json"]]

        url = "http://%s/testfile/upload" % self._options.test_results_server
        # Set uploading timeout in case appengine server is having problems.
        # 120 seconds are more than enough to upload test results.
        uploader = FileUploader(url, 120)
        try:
            response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
            if response:
                if response.code == 200:
                    _log.debug("JSON uploaded.")
                else:
                    _log.debug("JSON upload failed, %d: '%s'", response.code, response.read())
            else:
                _log.error("JSON upload failed; no response returned")
        except Exception as err:
            _log.error("Upload failed: %s", err)

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(
                    result.test_run_time * 1000), int(result.total_run_time * 1000))}
        stats_trie = {}
        for name, value in stats.iteritems():
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie
示例#13
0
class Manager(object):
    """A class for managing running a series of layout tests."""

    HTTP_SUBDIR = 'http'
    PERF_SUBDIR = 'perf'
    WEBSOCKET_SUBDIR = 'websocket'
    ARCHIVED_RESULTS_LIMIT = 25

    def __init__(self, port, options, printer):
        """Initializes test runner data structures.

        Args:
            port: An object implementing platform-specific functionality.
            options: An options argument which contains command line options.
            printer: A Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer

        self._expectations = None
        self._http_server_started = False
        self._wptserve_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._path_finder = PathFinder(port.host.filesystem)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._test_is_slow)

    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        if not args or any('external' in path for path in args):
            self._printer.write_update(
                'Generating MANIFEST.json for web-platform-tests ...')
            WPTManifest.ensure_manifest(self._port.host)
            self._printer.write_update('Completed generating manifest.')

        self._printer.write_update('Collecting tests ...')
        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            all_test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            all_test_names.sort()
            random.Random(self._options.seed).shuffle(all_test_names)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            all_test_names)

        self._printer.write_update('Parsing expectations ...')
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests_from_expectations(
            tests_in_other_chunks)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._results_directory,
                                         'results.html')
            self._copy_results_html_file(self._results_directory,
                                         'legacy-results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or initial_results.total_failures):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._results_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)

    def _run_test_loop(self, tests_to_run, tests_to_skip):
        # Don't show results in a new browser window because we're already
        # printing the link to diffs in the loop
        self._options.show_results = False

        while True:
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = self._run_test_once(
                tests_to_run, tests_to_skip, should_retry_failures=False)
            for name in initial_results.failures_by_name:
                failure = initial_results.failures_by_name[name][0]
                if isinstance(failure, test_failures.FailureTextMismatch):
                    full_test_path = self._filesystem.join(
                        self._results_directory, name)
                    filename, _ = self._filesystem.splitext(full_test_path)
                    pretty_diff_path = 'file://' + filename + '-pretty-diff.html'
                    self._printer.writeln('Link to pretty diff:')
                    self._printer.writeln(pretty_diff_path + '\n')
            self._printer.writeln('Finished running tests')

            user_input = self._port.host.user.prompt(
                'Interactive watch mode: (q)uit (r)etry\n').lower()

            if user_input == 'q' or user_input == 'quit':
                return (initial_results, all_retry_results,
                        enabled_pixel_tests_in_retry)

    def _run_test_once(self, tests_to_run, tests_to_skip,
                       should_retry_failures):
        enabled_pixel_tests_in_retry = False

        num_workers = self._port.num_workers(int(
            self._options.child_processes))

        initial_results = self._run_tests(tests_to_run, tests_to_skip,
                                          self._options.repeat_each,
                                          self._options.iterations,
                                          num_workers)

        # Don't retry failures when interrupted by user or failures limit exception.
        should_retry_failures = should_retry_failures and not (
            initial_results.interrupted
            or initial_results.keyboard_interrupted)

        tests_to_retry = self._tests_to_retry(initial_results)
        all_retry_results = []
        if should_retry_failures and tests_to_retry:
            enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

            for retry_attempt in xrange(1, self._options.num_retries + 1):
                if not tests_to_retry:
                    break

                _log.info('')
                _log.info(
                    'Retrying %s, attempt %d of %d...',
                    grammar.pluralize('unexpected failure',
                                      len(tests_to_retry)), retry_attempt,
                    self._options.num_retries)

                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=num_workers,
                                                retry_attempt=retry_attempt)
                all_retry_results.append(retry_results)

                tests_to_retry = self._tests_to_retry(retry_results)

            if enabled_pixel_tests_in_retry:
                self._options.pixel_tests = False
        return (initial_results, all_retry_results,
                enabled_pixel_tests_in_retry)

    def _collect_tests(self, args):
        return self._finder.find_tests(
            args,
            test_list=self._options.test_list,
            fastest_percentile=self._options.fastest)

    def _is_http_test(self, test):
        return (test.startswith(self.HTTP_SUBDIR +
                                self._port.TEST_PATH_SEPARATOR)
                or self._is_websocket_test(test)
                or self._port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR +
                self._port.TEST_PATH_SEPARATOR in test)

    def _is_websocket_test(self, test):
        if self._port.should_use_wptserve(test):
            return False

        return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (
            self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names,
                                                self._expectations,
                                                self._http_tests(test_names))
        tests_to_run = [
            test for test in test_names if test not in tests_to_skip
        ]

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(
            test_file, self._options.slow_time_out_ms
            if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file))

    def _test_requires_lock(self, test_file):
        """Returns True if the test needs to be locked when running multiple
        instances of this test runner.

        Perf tests are locked because heavy load caused by running other
        tests in parallel might cause some of them to time out.
        """
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_slow(self, test_file):
        expectations = self._expectations.model().get_expectations(test_file)
        return (test_expectations.SLOW in expectations
                or self._port.is_slow_wpt_test(test_file))

    def _needs_servers(self, test_names):
        return any(
            self._test_requires_lock(test_name) for test_name in test_names)

    def _rename_results_folder(self):
        try:
            timestamp = time.strftime(
                "%Y-%m-%d-%H-%M-%S",
                time.localtime(
                    self._filesystem.mtime(
                        self._filesystem.join(self._results_directory,
                                              'results.html'))))
        except (IOError, OSError) as error:
            # It might be possible that results.html was not generated in previous run, because the test
            # run was interrupted even before testing started. In those cases, don't archive the folder.
            # Simply override the current folder contents with new results.
            import errno
            if error.errno in (errno.EEXIST, errno.ENOENT):
                self._printer.write_update(
                    'No results.html file found in previous run, skipping it.')
            return None
        archived_name = ''.join(
            (self._filesystem.basename(self._results_directory), '_',
             timestamp))
        archived_path = self._filesystem.join(
            self._filesystem.dirname(self._results_directory), archived_name)
        self._filesystem.move(self._results_directory, archived_path)

    def _delete_dirs(self, dir_list):
        for dir_path in dir_list:
            self._filesystem.rmtree(dir_path)

    def _limit_archived_results_count(self):
        results_directory_path = self._filesystem.dirname(
            self._results_directory)
        file_list = self._filesystem.listdir(results_directory_path)
        results_directories = []
        for name in file_list:
            file_path = self._filesystem.join(results_directory_path, name)
            if self._filesystem.isdir(
                    file_path) and self._results_directory in file_path:
                results_directories.append(file_path)
        results_directories.sort(key=self._filesystem.mtime)
        self._printer.write_update('Clobbering excess archived results in %s' %
                                   results_directory_path)
        self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])

    def _set_up_run(self, test_names):
        self._printer.write_update('Checking build ...')
        if self._options.build:
            exit_code = self._port.check_build(self._needs_servers(test_names),
                                               self._printer)
            if exit_code:
                _log.error('Build check failed')
                return exit_code

        if self._options.clobber_old_results:
            self._clobber_old_results()
        elif self._filesystem.exists(self._results_directory):
            self._limit_archived_results_count()
            # Rename the existing results folder for archiving.
            self._rename_results_folder()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        exit_code = self._port.setup_test_run()
        if exit_code:
            _log.error('Build setup failed')
            return exit_code

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update('Checking system dependencies ...')
            exit_code = self._port.check_sys_deps(
                self._needs_servers(test_names))
            if exit_code:
                return exit_code

        return exit_codes.OK_EXIT_STATUS

    def _run_tests(self,
                   tests_to_run,
                   tests_to_skip,
                   repeat_each,
                   iterations,
                   num_workers,
                   retry_attempt=0):

        test_inputs = []
        for _ in xrange(iterations):
            # TODO(crbug.com/650747): We may want to switch the two loops below
            # to make the behavior consistent with gtest runner (--gtest_repeat
            # is an alias for --repeat-each now), which looks like "ABCABCABC".
            # And remember to update the help text when we do so.
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return self._runner.run_tests(self._expectations, test_inputs,
                                      tests_to_skip, num_workers,
                                      retry_attempt)

    def _start_servers(self, tests_to_run):
        if any(self._port.is_wpt_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WPTServe ...')
            self._port.start_wptserve()
            self._wptserve_started = True

        if self._port.requires_http_server() or any(
                self._is_http_test(test) for test in tests_to_run):
            self._printer.write_update('Starting HTTP server ...')
            self._port.start_http_server(
                additional_dirs={},
                number_of_drivers=self._options.max_locked_shards)
            self._http_server_started = True

        if any(self._is_websocket_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WebSocket server ...')
            self._port.start_websocket_server()
            self._websockets_server_started = True

    def _stop_servers(self):
        if self._wptserve_started:
            self._printer.write_update('Stopping WPTServe ...')
            self._wptserve_started = False
            self._port.stop_wptserve()
        if self._http_server_started:
            self._printer.write_update('Stopping HTTP server ...')
            self._http_server_started = False
            self._port.stop_http_server()
        if self._websockets_server_started:
            self._printer.write_update('Stopping WebSocket server ...')
            self._websockets_server_started = False
            self._port.stop_websocket_server()

    def _clean_up_run(self):
        _log.debug('Flushing stdout')
        sys.stdout.flush()
        _log.debug('Flushing stderr')
        sys.stderr.flush()
        _log.debug('Cleaning up port')
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False
        self._options.pixel_tests = True
        return True

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Looks for and writes new crash logs, at the end of the test run.

        Since crash logs can take a long time to be written out if the system is
        under stress, do a second pass at the end of the test run.

        Args:
            run_results: The results of the test run.
            start_time: Time the tests started at. We're looking for crash
                logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if result.type != test_expectations.CRASH:
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                if failure.has_log:
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes,
                                                       start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, (crash_log, crash_site) in crash_logs.iteritems():
                writer = TestResultWriter(self._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)
                run_results.unexpected_results_by_name[
                    test].crash_site = crash_site

    def _clobber_old_results(self):
        dir_above_results_path = self._filesystem.dirname(
            self._results_directory)
        self._printer.write_update('Clobbering old results in %s.' %
                                   dir_above_results_path)
        if not self._filesystem.exists(dir_above_results_path):
            return
        file_list = self._filesystem.listdir(dir_above_results_path)
        results_directories = []
        for name in file_list:
            file_path = self._filesystem.join(dir_above_results_path, name)
            if self._filesystem.isdir(
                    file_path) and self._results_directory in file_path:
                results_directories.append(file_path)
        self._delete_dirs(results_directories)

        # Port specific clean-up.
        self._port.clobber_old_port_specific_results()

    def _tests_to_retry(self, run_results):
        # TODO(ojan): This should also check that result.type != test_expectations.MISSING
        # since retrying missing expectations is silly. But that's a bit tricky since we
        # only consider the last retry attempt for the count of unexpected regressions.
        return [
            result.test_name
            for result in run_results.unexpected_results_by_name.values()
            if result.type != test_expectations.PASS
        ]

    def _write_json_files(self, summarized_full_results,
                          summarized_failing_results, initial_results,
                          running_all_tests):
        _log.debug("Writing JSON files in %s.", self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(
            initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory,
                                                'times_ms.json')
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        # Save out the times data so we can use it for --fastest in the future.
        if running_all_tests:
            bot_test_times_path = self._port.bot_test_times_path()
            self._filesystem.maybe_make_directory(
                self._filesystem.dirname(bot_test_times_path))
            json_results_generator.write_json(self._filesystem, times_trie,
                                              bot_test_times_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory,
                                           'stats.json')
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory,
                                                  'full_results.json')
        json_results_generator.write_json(self._filesystem,
                                          summarized_full_results,
                                          full_results_path)

        full_results_jsonp_path = self._filesystem.join(
            self._results_directory, 'full_results_jsonp.js')
        json_results_generator.write_json(self._filesystem,
                                          summarized_full_results,
                                          full_results_jsonp_path,
                                          callback='ADD_FULL_RESULTS')
        full_results_path = self._filesystem.join(self._results_directory,
                                                  'failing_results.json')
        # We write failing_results.json out as jsonp because we need to load it
        # from a file url for results.html and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem,
                                          summarized_failing_results,
                                          full_results_path,
                                          callback='ADD_RESULTS')

        # Write out the JSON files suitable for other tools to process.
        # As the output can be quite large (as there are 60k+ tests) we also
        # support only outputting the failing results.
        if self._options.json_failing_test_results:
            # FIXME(tansell): Make sure this includes an *unexpected* results
            # (IE Passing when expected to be failing.)
            json_results_generator.write_json(
                self._filesystem, summarized_failing_results,
                self._options.json_failing_test_results)
        if self._options.json_test_results:
            json_results_generator.write_json(self._filesystem,
                                              summarized_full_results,
                                              self._options.json_test_results)

        _log.debug('Finished writing JSON files.')

    def _upload_json_files(self):
        if not self._options.test_results_server:
            return

        if not self._options.master_name:
            _log.error(
                '--test-results-server was set, but --master-name was not.  Not uploading JSON files.'
            )
            return

        _log.debug('Uploading JSON files for builder: %s',
                   self._options.builder_name)
        attrs = [('builder', self._options.builder_name),
                 ('testtype', self._options.step_name),
                 ('master', self._options.master_name)]

        files = [
            (name, self._filesystem.join(self._results_directory, name))
            for name in
            ['failing_results.json', 'full_results.json', 'times_ms.json']
        ]

        url = 'https://%s/testfile/upload' % self._options.test_results_server
        # Set uploading timeout in case appengine server is having problems.
        # 120 seconds are more than enough to upload test results.
        uploader = FileUploader(url, 120)
        try:
            response = uploader.upload_as_multipart_form_data(
                self._filesystem, files, attrs)
            if response:
                if response.code == 200:
                    _log.debug('JSON uploaded.')
                else:
                    _log.debug('JSON upload failed, %d: "%s"', response.code,
                               response.read())
            else:
                _log.error('JSON upload failed; no response returned')
        except IOError as err:
            _log.error('Upload failed: %s', err)

    def _copy_results_html_file(self, destination_dir, filename):
        """Copies a file from the template directory to the results directory."""
        template_dir = self._path_finder.path_from_layout_tests(
            'fast', 'harness')
        source_path = self._filesystem.join(template_dir, filename)
        destination_path = self._filesystem.join(destination_dir, filename)
        # Note that the results.html template file won't exist when
        # we're using a MockFileSystem during unit tests, so make sure
        # it exists before we try to copy it.
        if self._filesystem.exists(source_path):
            self._filesystem.copyfile(source_path, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {
                    'results':
                    (_worker_number(result.worker_name), result.test_number,
                     result.pid, int(result.test_run_time * 1000),
                     int(result.total_run_time * 1000))
                }
        stats_trie = {}
        for name, value in stats.iteritems():
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie
示例#14
0
文件: manager.py 项目: kseo/webkit
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'

        # disable wss server. need to install pyOpenSSL on buildbots.
        # self._websocket_secure_server = websocket_server.PyWebSocket(
        #        options.results_directory, use_tls=True, port=9323)

        self._paths = set()
        self._test_names = None
        self._retrying = False
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._expectations, self._test_is_slow)

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _http_tests(self):
        return set(
            test for test in self._test_names if self._is_http_test(test))

    def _websocket_tests(self):
        return set(
            test for test in self._test_files if self._is_websocket(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (
            self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self):
        tests_to_skip = self._finder.skip_tests(self._paths, self._test_names,
                                                self._expectations,
                                                self._http_tests())
        self._test_names = list(set(self._test_names) - tests_to_skip)

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.randomize_order:
            random.shuffle(self._test_names)
        else:
            self._test_names.sort(key=self._runner.test_key)

        self._test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            self._test_names)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        if self._options.repeat_each > 1:
            list_with_repetitions = []
            for test in self._test_names:
                list_with_repetitions += ([test] * self._options.repeat_each)
            self._test_names = list_with_repetitions

        if self._options.iterations > 1:
            self._test_names = self._test_names * self._options.iterations

        iterations = self._options.repeat_each * self._options.iterations
        return ResultSummary(self._expectations, set(self._test_names),
                             iterations, tests_to_skip)

    def _test_input_for_file(self, test_file):
        return TestInput(
            test_file, self._options.slow_time_out_ms
            if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file),
            self._port.reference_files(test_file)
            if self._options.shard_ref_tests else None)

    def _test_requires_lock(self, test_file):
        """Return True if the test needs to be locked when
        running multiple copies of NRWTs. Perf tests are locked
        because heavy load caused by running other tests in parallel
        might cause some of them to timeout."""
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_slow(self, test_file):
        return self._expectations.has_modifier(test_file,
                                               test_expectations.SLOW)

    def _is_ref_test(self, test_input):
        if test_input.reference_files is None:
            # Lazy initialization.
            test_input.reference_files = self._port.reference_files(
                test_input.test_name)
        return bool(test_input.reference_files)

    def needs_servers(self):
        return any(
            self._test_requires_lock(test_name)
            for test_name in self._test_names) and self._options.http

    def _set_up_run(self):
        self._printer.write_update("Checking build ...")
        if not self._port.check_build(self.needs_servers()):
            _log.error("Build check failed")
            return False

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        if self._options.pixel_tests:
            self._printer.write_update("Starting pixel test helper ...")
            self._port.start_helper()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps(self.needs_servers()):
                self._port.stop_helper()
                return False

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        self._port.setup_test_run()
        return True

    def run(self, args):
        """Run all our tests on all our test files and return the number of unexpected results (0 == success)."""
        self._printer.write_update("Collecting tests ...")
        try:
            self._paths, self._test_names = self._collect_tests(args)
        except IOError as exception:
            # This is raised if --test-list doesn't exist
            return -1

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, self._test_names)

        num_all_test_files_found = len(self._test_names)
        result_summary = self._prepare_lists()

        # Check to make sure we're not skipping every test.
        if not self._test_names:
            _log.critical('No tests to run.')
            return -1

        self._printer.print_found(num_all_test_files_found,
                                  len(self._test_names),
                                  self._options.repeat_each,
                                  self._options.iterations)
        self._printer.print_expected(
            result_summary, self._expectations.get_tests_with_result_type)

        if not self._set_up_run():
            return -1

        start_time = time.time()

        interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = \
            self._run_tests(self._test_names, result_summary, int(self._options.child_processes))

        # We exclude the crashes from the list of results to retry, because
        # we want to treat even a potentially flaky crash as an error.

        failures = self._get_failures(
            result_summary,
            include_crashes=self._port.should_retry_crashes(),
            include_missing=False)
        retry_summary = result_summary
        while (len(failures) and self._options.retry_failures
               and not self._retrying and not interrupted
               and not keyboard_interrupted):
            _log.info('')
            _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
            _log.info('')
            self._retrying = True
            retry_summary = ResultSummary(self._expectations, failures.keys(),
                                          1, set())
            # Note that we intentionally ignore the return value here.
            self._run_tests(failures.keys(), retry_summary, 1)
            failures = self._get_failures(
                retry_summary, include_crashes=True, include_missing=True)

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._look_for_new_crash_logs(result_summary, start_time)
        self._look_for_new_crash_logs(retry_summary, start_time)
        self._clean_up_run()

        unexpected_results = summarize_results(
            self._port,
            self._expectations,
            result_summary,
            retry_summary,
            individual_test_timings,
            only_unexpected=True,
            interrupted=interrupted)

        self._printer.print_results(end_time - start_time, thread_timings,
                                    test_timings, individual_test_timings,
                                    result_summary, unexpected_results)

        # Re-raise a KeyboardInterrupt if necessary so the caller can handle it.
        if keyboard_interrupted:
            raise KeyboardInterrupt

        # FIXME: remove record_results. It's just used for testing. There's no need
        # for it to be a commandline argument.
        if (self._options.record_results and not self._options.dry_run
                and not keyboard_interrupted):
            self._port.print_leaks_summary()
            # Write the same data to log files and upload generated JSON files to appengine server.
            summarized_results = summarize_results(
                self._port,
                self._expectations,
                result_summary,
                retry_summary,
                individual_test_timings,
                only_unexpected=False,
                interrupted=interrupted)
            self._upload_json_files(summarized_results, result_summary,
                                    individual_test_timings)

        # Write the summary to disk (results.html) and display it if requested.
        if not self._options.dry_run:
            self._copy_results_html_file()
            if self._options.show_results:
                self._show_results_html_file(result_summary)

        return self._port.exit_code_from_summarized_results(unexpected_results)

    def _run_tests(self, tests, result_summary, num_workers):
        test_inputs = [self._test_input_for_file(test) for test in tests]
        needs_http = any(self._is_http_test(test) for test in tests)
        needs_websockets = any(self._is_websocket_test(test) for test in tests)
        return self._runner.run_tests(test_inputs, self._expectations,
                                      result_summary, num_workers, needs_http,
                                      needs_websockets, self._retrying)

    def _clean_up_run(self):
        """Restores the system after we're done running tests."""
        _log.debug("flushing stdout")
        sys.stdout.flush()
        _log.debug("flushing stderr")
        sys.stderr.flush()
        _log.debug("stopping helper")
        self._port.stop_helper()
        _log.debug("cleaning up port")
        self._port.clean_up_test_run()

    def _look_for_new_crash_logs(self, result_summary, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           result_summary: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in result_summary.unexpected_results.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update(
            "Clobbering old results in %s" % self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(
                    self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(
                    self._filesystem.join(self._results_directory, dirname))

    def _get_failures(self, result_summary, include_crashes, include_missing):
        """Filters a dict of results and returns only the failures.

        Args:
          result_summary: the results of the test run
          include_crashes: whether crashes are included in the output.
            We use False when finding the list of failures to retry
            to see if the results were flaky. Although the crashes may also be
            flaky, we treat them as if they aren't so that they're not ignored.
        Returns:
          a dict of files -> results
        """
        failed_results = {}
        for test, result in result_summary.unexpected_results.iteritems():
            if (result.type == test_expectations.PASS
                    or (result.type == test_expectations.CRASH
                        and not include_crashes)
                    or (result.type == test_expectations.MISSING
                        and not include_missing)):
                continue
            failed_results[test] = result.type

        return failed_results

    def _char_for_result(self, result):
        result = result.lower()
        if result in TestExpectations.EXPECTATIONS:
            result_enum_value = TestExpectations.EXPECTATIONS[result]
        else:
            result_enum_value = TestExpectations.MODIFIERS[result]
        return json_layout_results_generator.JSONLayoutResultsGenerator.FAILURE_TO_CHAR[
            result_enum_value]

    def _upload_json_files(self, summarized_results, result_summary,
                           individual_test_timings):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          unexpected_results: dict of unexpected results
          summarized_results: dict of results
          result_summary: full summary object
          individual_test_timings: list of test times (used by the flakiness
            dashboard).
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        times_trie = json_results_generator.test_timings_trie(
            self._port, individual_test_timings)
        times_json_path = self._filesystem.join(self._results_directory,
                                                "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(
            self._filesystem,
            summarized_results,
            full_results_path,
            callback="ADD_RESULTS")

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            BUILDER_BASE_URL, individual_test_timings, self._expectations,
            result_summary, self._test_names,
            self._options.test_results_server, "layout-tests",
            self._options.master_name)

        _log.debug("Finished writing JSON files.")

        json_files = [
            "incremental_results.json", "full_results.json", "times_ms.json"
        ]

        generator.upload_json_files(json_files)

        incremental_results_path = self._filesystem.join(
            self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)

    def _num_digits(self, num):
        """Returns the number of digits needed to represent the length of a
        sequence."""
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def _copy_results_html_file(self):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast',
                                                    'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # FIXME: What should we do if this doesn't exist (e.g., in unit tests)?
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(
                results_file,
                self._filesystem.join(self._results_directory, "results.html"))

    def _show_results_html_file(self, result_summary):
        """Shows the results.html page."""
        if self._options.full_results_html:
            test_files = result_summary.failures.keys()
        else:
            unexpected_failures = self._get_failures(
                result_summary, include_crashes=True, include_missing=True)
            test_files = unexpected_failures.keys()

        if not len(test_files):
            return

        results_filename = self._filesystem.join(self._results_directory,
                                                 "results.html")
        self._port.show_results_html_file(results_filename)
示例#15
0
    def run(self, args):
        total_tests = set()
        aggregate_test_names = set()
        aggregate_tests = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = 'for {} '.format(
                device_type) if device_type else ''
            self._printer.write_update(
                'Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(
                    args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update(
                'Parsing expectations {}...'.format(for_device_type))
            self._expectations[
                device_type] = test_expectations.TestExpectations(
                    self._port,
                    test_names,
                    force_expectations_pass=self._options.force,
                    device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(
                paths, test_names, device_type=device_type)

            total_tests.update(tests_to_run)
            total_tests.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [
                test for test in tests_to_run if test not in aggregate_tests
            ]
            aggregate_tests.update(tests_to_run)

        tests_to_skip = total_tests - aggregate_tests
        self._printer.print_found(len(aggregate_test_names),
                                  len(aggregate_tests),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not sum(
            [len(tests) for tests in tests_to_run_by_device.itervalues()]):
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        needs_http = any((self._is_http_test(test)
                          and not self._needs_web_platform_test(test))
                         for tests in tests_to_run_by_device.itervalues()
                         for test in tests)
        needs_web_platform_test_server = any(
            self._needs_web_platform_test(test)
            for tests in tests_to_run_by_device.itervalues() for test in tests)
        needs_websockets = any(
            self._is_websocket_test(test)
            for tests in tests_to_run_by_device.itervalues() for test in tests)
        self._runner = LayoutTestRunner(
            self._options,
            self._port,
            self._printer,
            self._results_directory,
            self._test_is_slow,
            needs_http=needs_http,
            needs_web_platform_test_server=needs_web_platform_test_server,
            needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes

        for device_type in device_type_list:
            self._runner._test_is_slow = lambda test_file: self._test_is_slow(
                test_file, device_type=device_type)
            self._options.child_processes = min(
                self._port.max_child_processes(device_type=device_type),
                int(child_processes_option_value
                    or self._port.default_child_processes(
                        device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(
                    pluralize(len(tests_to_run_by_device[device_type]),
                              'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes,
                                              max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info('Running {}{}'.format(
                pluralize(len(tests_to_run_by_device[device_type]), 'test'),
                ' for {}'.format(str(device_type)) if device_type else ''))
            _log.info('')
            if not tests_to_run_by_device[device_type]:
                continue
            if not self._set_up_run(tests_to_run_by_device[device_type],
                                    device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(
                tests_to_run_by_device[device_type],
                tests_to_skip,
                device_type=device_type)
            initial_results = initial_results.merge(
                temp_initial_results
            ) if initial_results else temp_initial_results
            retry_results = retry_results.merge(
                temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        return self._end_test_run(start_time, end_time, initial_results,
                                  retry_results, enabled_pixel_tests_in_retry)
示例#16
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'

        # disable wss server. need to install pyOpenSSL on buildbots.
        # self._websocket_secure_server = websocket_server.PyWebSocket(
        #        options.results_directory, use_tls=True, port=9323)

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._is_web_platform_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _is_web_platform_test(self, test):
        return self.web_platform_test_subdir in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
        tests_to_run = [test for test in test_names if test not in tests_to_skip]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(test_file,
            self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._is_http_test(test_file))

    def _test_is_slow(self, test_file):
        return self._expectations.model().has_modifier(test_file, test_expectations.SLOW)

    def needs_servers(self, test_names):
        return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http

    def _get_test_inputs(self, tests_to_run, repeat_each, iterations):
        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return test_inputs

    def _update_worker_count(self, test_names):
        test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations)
        worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
        self._options.child_processes = worker_count

    def _set_up_run(self, test_names):
        self._printer.write_update("Checking build ...")
        if not self._port.check_build(self.needs_servers(test_names)):
            _log.error("Build check failed")
            return False

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        self._printer.write_update("Starting helper ...")
        if not self._port.start_helper(self._options.pixel_tests):
            return False

        self._update_worker_count(test_names)
        self._port.reset_preferences()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps(self.needs_servers(test_names)):
                self._port.stop_helper()
                return False

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        self._port.setup_test_run()
        return True

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        try:
            if not self._set_up_run(tests_to_run):
                return test_run_results.RunDetails(exit_code=-1)

            enabled_pixel_tests_in_retry = False
            initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                int(self._options.child_processes), retrying=False)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                _log.info('')
                _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
                    num_workers=1, retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
        needs_http = any((self._is_http_test(test) and not self._is_web_platform_test(test)) for test in tests_to_run)
        needs_web_platform_test_server = any(self._is_web_platform_test(test) for test in tests_to_run)
        needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)

        test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations)
        return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, needs_web_platform_test_server, retrying)

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        return self._port.start_helper()

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append([test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

                # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
                if not any(process[0] == test for process in crashed_processes):
                    result = test_results.TestResult(test)
                    result.type = test_expectations.CRASH
                    result.is_other_crash = True
                    run_results.add(result, expected=False, test_is_slow=False)
                    _log.debug("Adding results for other crash: " + str(test))

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" %
                                   self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))

    def _tests_to_retry(self, run_results, include_crashes):
        return [result.test_name for result in run_results.unexpected_results_by_name.values() if
                   ((result.type != test_expectations.PASS) and
                    (result.type != test_expectations.MISSING) and
                    (result.type != test_expectations.CRASH or include_crashes))]

    def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory, "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")

        results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
        if results_including_passes:
            json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            self._expectations, initial_results,
            self._options.test_results_server,
            "layout-tests",
            self._options.master_name)

        if generator.generate_json_output():
            _log.debug("Finished writing JSON file for the test results server.")
        else:
            _log.debug("Failed to generate JSON file for the test results server.")

        json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]

        generator.upload_json_files(json_files)
        if results_including_passes:
            self.upload_results(results_json_path, start_time, end_time)

        incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)
        if results_including_passes:
            self._filesystem.remove(results_json_path)

    def upload_results(self, results_json_path, start_time, end_time):
        hostname = self._options.results_server_host
        if not hostname:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_slave = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_slave:
            _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)

        attrs = [
            ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
            ('builder_name', builder_name),
            ('build_number', build_number),
            ('build_slave', build_slave),
            ('revisions', json.dumps(revisions)),
            ('start_time', str(start_time)),
            ('end_time', str(end_time)),
        ]

        uploader = FileUploader("http://%s/api/report" % hostname, 360)
        try:
            response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
            if not response:
                _log.error("JSON upload failed; no response returned")
                return

            if response.code != 200:
                _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
                return

            response_text = response.read()
            try:
                response_json = json.loads(response_text)
            except ValueError, error:
                _log.error("JSON upload failed; failed to parse the response: %s", response_text)
                return

            if response_json['status'] != 'OK':
                _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
                return

            _log.info("JSON uploaded.")
        except Exception, error:
            _log.error("Upload failed: %s" % error)
            return
示例#17
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self._http_server_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._test_is_slow)

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (
            self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names,
                                                self._expectations,
                                                self._http_tests(test_names))
        tests_to_run = [
            test for test in test_names if test not in tests_to_skip
        ]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)
        elif self._options.order == 'random-seeded':
            rnd = random.Random()
            rnd.seed(4)  # http://xkcd.com/221/
            rnd.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(
            tests_to_run)
        self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(
            test_file,
            self._options.slow_time_out_ms
            if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file),
            should_add_missing_baselines=(
                self._options.new_test_results
                and not self._test_is_expected_missing(test_file)))

    def _test_requires_lock(self, test_file):
        """Return True if the test needs to be locked when
        running multiple copies of NRWTs. Perf tests are locked
        because heavy load caused by running other tests in parallel
        might cause some of them to timeout."""
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_expected_missing(self, test_file):
        expectations = self._expectations.model().get_expectations(test_file)
        return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations

    def _test_is_slow(self, test_file):
        return test_expectations.SLOW in self._expectations.model(
        ).get_expectations(test_file)

    def needs_servers(self, test_names):
        return any(
            self._test_requires_lock(test_name) for test_name in test_names)

    def _set_up_run(self, test_names):
        self._printer.write_update("Checking build ...")
        if self._options.build:
            exit_code = self._port.check_build(self.needs_servers(test_names),
                                               self._printer)
            if exit_code:
                _log.error("Build check failed")
                return exit_code

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        if self._options.pixel_tests:
            self._printer.write_update("Starting pixel test helper ...")
            self._port.start_helper()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            exit_code = self._port.check_sys_deps(
                self.needs_servers(test_names))
            if exit_code:
                self._port.stop_helper()
                return exit_code

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        self._port.setup_test_run()
        return test_run_results.OK_EXIT_STATUS

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            initial_results = self._run_tests(
                tests_to_run,
                tests_to_skip,
                self._options.repeat_each,
                self._options.iterations,
                self._port.num_workers(int(self._options.child_processes)),
                retrying=False)

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)

            tests_to_retry = self._tests_to_retry(initial_results)
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                _log.info('')
                _log.info("Retrying %d unexpected failure(s) ..." %
                          len(tests_to_retry))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results)
            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (
                        exit_code or (self._options.full_results_html
                                      and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time,
                                            initial_results,
                                            summarized_failing_results)
        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, retry_results,
                                           enabled_pixel_tests_in_retry)

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
                   num_workers, retrying):

        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return self._runner.run_tests(self._expectations, test_inputs,
                                      tests_to_skip, num_workers, retrying)

    def _start_servers(self, tests_to_run):
        if self._port.requires_http_server() or any(
                self._is_http_test(test) for test in tests_to_run):
            self._printer.write_update('Starting HTTP server ...')
            self._port.start_http_server(
                number_of_servers=(2 * self._options.max_locked_shards))
            self._http_server_started = True

        if any(self._is_websocket_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WebSocket server ...')
            self._port.start_websocket_server()
            self._websockets_server_started = True

    def _stop_servers(self):
        if self._http_server_started:
            self._printer.write_update('Stopping HTTP server ...')
            self._http_server_started = False
            self._port.stop_http_server()
        if self._websockets_server_started:
            self._printer.write_update('Stopping WebSocket server ...')
            self._websockets_server_started = False
            self._port.stop_websocket_server()

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        self._port.start_helper()

        return True

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes,
                                                       start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" %
                                   self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(
                    self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(
                    self._filesystem.join(self._results_directory, dirname))

        # Port specific clean-up.
        self._port.clobber_old_port_specific_results()

    def _tests_to_retry(self, run_results):
        return [
            result.test_name
            for result in run_results.unexpected_results_by_name.values()
            if result.type != test_expectations.PASS
        ]

    def _write_json_files(self, summarized_full_results,
                          summarized_failing_results, initial_results):
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(
            self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory,
                                                "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory,
                                           "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "full_results.json")
        json_results_generator.write_json(self._filesystem,
                                          summarized_full_results,
                                          full_results_path)

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "failing_results.json")
        # We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem,
                                          summarized_failing_results,
                                          full_results_path,
                                          callback="ADD_RESULTS")

        _log.debug("Finished writing JSON files.")

    def _upload_json_files(self):
        if not self._options.test_results_server:
            return

        if not self._options.master_name:
            _log.error(
                "--test-results-server was set, but --master-name was not.  Not uploading JSON files."
            )
            return

        _log.debug("Uploading JSON files for builder: %s",
                   self._options.builder_name)
        attrs = [("builder", self._options.builder_name),
                 ("testtype", "layout-tests"),
                 ("master", self._options.master_name)]

        files = [
            (file, self._filesystem.join(self._results_directory, file))
            for file in
            ["failing_results.json", "full_results.json", "times_ms.json"]
        ]

        url = "http://%s/testfile/upload" % self._options.test_results_server
        # Set uploading timeout in case appengine server is having problems.
        # 120 seconds are more than enough to upload test results.
        uploader = FileUploader(url, 120)
        try:
            response = uploader.upload_as_multipart_form_data(
                self._filesystem, files, attrs)
            if response:
                if response.code == 200:
                    _log.debug("JSON uploaded.")
                else:
                    _log.debug("JSON upload failed, %d: '%s'" %
                               (response.code, response.read()))
            else:
                _log.error("JSON upload failed; no response returned")
        except Exception, err:
            _log.error("Upload failed: %s" % err)
示例#18
0
文件: manager.py 项目: EQ4/h5vcc
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'

        # disable wss server. need to install pyOpenSSL on buildbots.
        # self._websocket_secure_server = websocket_server.PyWebSocket(
        #        options.results_directory, use_tls=True, port=9323)

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
        tests_to_run = [test for test in test_names if test not in tests_to_skip]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(test_file,
            self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file))

    def _test_requires_lock(self, test_file):
        """Return True if the test needs to be locked when
        running multiple copies of NRWTs. Perf tests are locked
        because heavy load caused by running other tests in parallel
        might cause some of them to timeout."""
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_slow(self, test_file):
        return self._expectations.has_modifier(test_file, test_expectations.SLOW)

    def needs_servers(self, test_names):
        return any(self._test_requires_lock(test_name) for test_name in test_names) and self._options.http

    def _set_up_run(self, test_names):
        self._printer.write_update("Checking build ...")
        if not self._port.check_build(self.needs_servers(test_names)):
            _log.error("Build check failed")
            return False

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        if self._options.pixel_tests:
            self._printer.write_update("Starting pixel test helper ...")
            self._port.start_helper()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps(self.needs_servers(test_names)):
                self._port.stop_helper()
                return False

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        self._port.setup_test_run()
        return True

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        start_time = time.time()
        try:
            initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                int(self._options.child_processes), retrying=False)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            if self._options.retry_failures and tests_to_retry and not initial_results.interrupted:
                _log.info('')
                _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
                    num_workers=1, retrying=True)
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if self._options.show_results and (initial_results.unexpected_results_by_name or
                                               (self._options.full_results_html and initial_results.total_failures)):
                self._port.show_results_html_file(results_path)

        return test_run_results.RunDetails(self._port.exit_code_from_summarized_results(summarized_results),
                                           summarized_results, initial_results, retry_results)

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
        needs_http = self._port.requires_http_server() or any(self._is_http_test(test) for test in tests_to_run)
        needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)

        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))

        return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying)

    def _clean_up_run(self):
        """Restores the system after we're done running tests."""
        _log.debug("flushing stdout")
        sys.stdout.flush()
        _log.debug("flushing stderr")
        sys.stderr.flush()
        _log.debug("stopping helper")
        self._port.stop_helper()
        _log.debug("cleaning up port")
        self._port.clean_up_test_run()

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append([test, failure.process_name, failure.pid])

        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" %
                                   self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))

    def _tests_to_retry(self, run_results, include_crashes):
        return [result.test_name for result in run_results.unexpected_results_by_name.values() if
                   ((result.type != test_expectations.PASS) and
                    (result.type != test_expectations.MISSING) and
                    (result.type != test_expectations.CRASH or include_crashes))]

    def _upload_json_files(self, summarized_results, initial_results):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory, "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            BUILDER_BASE_URL,
            self._expectations, initial_results,
            self._options.test_results_server,
            "layout-tests",
            self._options.master_name)

        _log.debug("Finished writing JSON files.")


        json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]

        generator.upload_json_files(json_files)

        incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
        stats_trie = {}
        for name, value in stats.iteritems():
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie
示例#19
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = OrderedDict()
        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR + 'test'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
        self.webkit_specific_web_platform_test_subdir = 'http' + port.TEST_PATH_SEPARATOR + 'wpt' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = None

        test_options_json_path = self._port.path_from_webkit_base(self.LAYOUT_TESTS_DIRECTORY, "tests-options.json")
        self._tests_options = json.loads(self._filesystem.read_text_file(test_options_json_path)) if self._filesystem.exists(test_options_json_path) else {}

    def _collect_tests(self, args, device_type=None):
        return self._finder.find_tests(self._options, args, device_type=device_type)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._needs_web_platform_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _needs_web_platform_test(self, test):
        return self.web_platform_test_subdir in test or self.webkit_specific_web_platform_test_subdir in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _prepare_lists(self, paths, test_names, device_type=None):
        tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations[device_type], self._http_tests(test_names))
        tests_to_run = [test for test in test_names if test not in tests_to_skip]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
        self._expectations[device_type].add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file, device_type=None):
        return TestInput(test_file,
            self._options.slow_time_out_ms if self._test_is_slow(test_file, device_type=device_type) else self._options.time_out_ms,
            self._is_http_test(test_file),
            should_dump_jsconsolelog_in_stderr=self._test_should_dump_jsconsolelog_in_stderr(test_file, device_type=device_type))

    def _test_is_slow(self, test_file, device_type=None):
        if self._expectations[device_type].model().has_modifier(test_file, test_expectations.SLOW):
            return True
        return "slow" in self._tests_options.get(test_file, [])

    def _test_should_dump_jsconsolelog_in_stderr(self, test_file, device_type=None):
        return self._expectations[device_type].model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)

    def needs_servers(self, test_names):
        return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http

    def _get_test_inputs(self, tests_to_run, repeat_each, iterations, device_type=None):
        test_inputs = []
        for _ in range(iterations):
            for test in tests_to_run:
                for _ in range(repeat_each):
                    test_inputs.append(self._test_input_for_file(test, device_type=device_type))
        return test_inputs

    def _update_worker_count(self, test_names, device_type=None):
        test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations, device_type=device_type)
        worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
        self._options.child_processes = worker_count

    def _set_up_run(self, test_names, device_type=None):
        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        self._printer.write_update("Starting helper ...")
        if not self._port.start_helper(pixel_tests=self._options.pixel_tests, prefer_integrated_gpu=self._options.prefer_integrated_gpu):
            return False

        self._update_worker_count(test_names, device_type=device_type)
        self._port.reset_preferences()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps():
                self._port.stop_helper()
                return False

        self._port.setup_test_run(device_type)
        return True

    def run(self, args):
        num_failed_uploads = 0
        total_tests = set()
        aggregate_test_names = set()
        aggregate_tests = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = u'for {} '.format(device_type) if device_type else ''
            self._printer.write_update(u'Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)

            total_tests.update(tests_to_run)
            total_tests.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests]
            aggregate_tests.update(tests_to_run)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in total_tests and arg not in aggregate_tests:
                    tests_to_run_by_device[device_type_list[0]].append(arg)
                    aggregate_tests.add(arg)

        tests_to_skip = total_tests - aggregate_tests
        self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not sum([len(tests) for tests in itervalues(tests_to_run_by_device)]):
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(self._needs_web_platform_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(self._is_websocket_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._runner._test_is_slow = lambda test_file: self._test_is_slow(test_file, device_type=device_type)
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue
            if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite='layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result

    def _run_test_subset(self, tests_to_run, tests_to_skip, device_type=None):
        try:
            enabled_pixel_tests_in_retry = False
            initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False, device_type=device_type)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                _log.info('')
                _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1, num_workers=1, retrying=True, device_type=device_type)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        return (initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
        if initial_results is None:
            _log.error('No results generated')
            return test_run_results.RunDetails(exit_code=-1)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._output_perf_metrics(end_time - start_time, initial_results)
            self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying, device_type=None):
        test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations, device_type=device_type)

        return self._runner.run_tests(self._expectations[device_type], test_inputs, tests_to_skip, num_workers, retrying)

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        return self._port.start_helper()

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.items():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append([test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
        if sample_files:
            for test, sample_file in sample_files.items():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.items():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

                # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
                if not any(process[0] == test for process in crashed_processes):
                    result = test_results.TestResult(test)
                    result.type = test_expectations.CRASH
                    result.is_other_crash = True
                    run_results.add(result, expected=False, test_is_slow=False)
                    _log.debug("Adding results for other crash: " + str(test))

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" %
                                   self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))

    def _tests_to_retry(self, run_results, include_crashes):
        return [result.test_name for result in run_results.unexpected_results_by_name.values() if
                   ((result.type != test_expectations.PASS) and
                    (result.type != test_expectations.MISSING) and
                    (result.type != test_expectations.CRASH or include_crashes))]

    def _output_perf_metrics(self, run_time, initial_results):
        perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
        perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
        self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))

    def _results_to_upload_json_trie(self, expectations, results):
        FAILURE_TO_TEXT = {
            test_expectations.PASS: Upload.Expectations.PASS,
            test_expectations.CRASH: Upload.Expectations.CRASH,
            test_expectations.TIMEOUT: Upload.Expectations.TIMEOUT,
            test_expectations.IMAGE: Upload.Expectations.IMAGE,
            test_expectations.TEXT: Upload.Expectations.TEXT,
            test_expectations.AUDIO: Upload.Expectations.AUDIO,
            test_expectations.MISSING: Upload.Expectations.WARNING,
            test_expectations.IMAGE_PLUS_TEXT: ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]),
        }

        results_trie = {}
        for result in itervalues(results.results_by_name):
            if result.type == test_expectations.SKIP:
                continue

            expected = expectations.filtered_expectations_for_test(
                result.test_name,
                self._options.pixel_tests or bool(result.reftest_type),
                self._options.world_leaks,
            )
            if expected == {test_expectations.PASS}:
                expected = None
            else:
                expected = ' '.join([FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL) for e in expected])

            json_results_generator.add_path_to_trie(
                result.test_name,
                Upload.create_test_result(
                    expected=expected,
                    actual=FAILURE_TO_TEXT.get(result.type, Upload.Expectations.FAIL) if result.type else None,
                    time=int(result.test_run_time * 1000),
                ), results_trie)
        return results_trie

    def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory, "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")

        results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
        if results_including_passes:
            json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            self._expectations, initial_results,
            self._options.test_results_server,
            "layout-tests",
            self._options.master_name)

        if generator.generate_json_output():
            _log.debug("Finished writing JSON file for the test results server.")
        else:
            _log.debug("Failed to generate JSON file for the test results server.")
            return

        json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]

        generator.upload_json_files(json_files)
        if results_including_passes:
            self.upload_results(results_json_path, start_time, end_time)

        incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)
        if results_including_passes:
            self._filesystem.remove(results_json_path)

    def upload_results(self, results_json_path, start_time, end_time):
        if not self._options.results_server_host:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_slave = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_slave:
            _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
            revision = scm.native_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}

        for hostname in self._options.results_server_host:
            _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)

            attrs = [
                ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
                ('builder_name', builder_name),
                ('build_number', build_number),
                ('build_slave', build_slave),
                ('revisions', json.dumps(revisions)),
                ('start_time', str(start_time)),
                ('end_time', str(end_time)),
            ]

            uploader = FileUploader("http://%s/api/report" % hostname, 360)
            try:
                response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
                if not response:
                    _log.error("JSON upload failed; no response returned")
                    continue

                if response.code != 200:
                    _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
                    continue

                response_text = response.read()
                try:
                    response_json = json.loads(response_text)
                except ValueError as error:
                    _log.error("JSON upload failed; failed to parse the response: %s", response_text)
                    continue

                if response_json['status'] != 'OK':
                    _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
                    continue

                _log.info("JSON uploaded.")
            except Exception as error:
                _log.error("Upload failed: %s" % error)
                continue

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
        stats_trie = {}
        for name, value in iteritems(stats):
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie

    def _print_expectation_line_for_test(self, format_string, test, device_type=None):
        line = self._expectations[device_type].model().get_expectation_line(test)
        print(format_string.format(test, line.expected_behavior, self._expectations[device_type].readable_filename_and_line_number(line), line.original_string or ''))

    def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip={}):
        format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
        if tests_to_skip:
            print('')
            print('Tests to skip ({})'.format(len(tests_to_skip)))
            for test in sorted(tests_to_skip):
                self._print_expectation_line_for_test(format_string, test, device_type=device_type)

        print('')
        print('Tests to run{} ({})'.format(' for ' + str(device_type) if device_type else '', len(tests_to_run)))
        for test in sorted(tests_to_run):
            self._print_expectation_line_for_test(format_string, test, device_type=device_type)

    def print_expectations(self, args):
        aggregate_test_names = set()
        aggregate_tests_to_run = set()
        aggregate_tests_to_skip = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.DEFAULT_DEVICE_TYPES or [self._port.DEVICE_TYPE]
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = 'for {} '.format(device_type) if device_type else ''
            self._printer.write_update('Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update('Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)
            aggregate_tests_to_skip.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests_to_run]
            aggregate_tests_to_run.update(tests_to_run)

        aggregate_tests_to_skip = aggregate_tests_to_skip - aggregate_tests_to_run

        self._printer.print_found(len(aggregate_test_names), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations)
        test_col_width = len(max(aggregate_tests_to_run.union(aggregate_tests_to_skip), key=len)) + 1

        self._print_expectations_for_subset(device_type_list[0], test_col_width, tests_to_run_by_device[device_type_list[0]], aggregate_tests_to_skip)

        for device_type in device_type_list[1:]:
            self._print_expectations_for_subset(device_type, test_col_width, tests_to_run_by_device[device_type])

        return 0
示例#20
0
    def run(self, args):
        num_failed_uploads = 0
        total_tests = set()
        aggregate_test_names = set()
        aggregate_tests = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = u'for {} '.format(device_type) if device_type else ''
            self._printer.write_update(u'Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)

            total_tests.update(tests_to_run)
            total_tests.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests]
            aggregate_tests.update(tests_to_run)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in total_tests and arg not in aggregate_tests:
                    tests_to_run_by_device[device_type_list[0]].append(arg)
                    aggregate_tests.add(arg)

        tests_to_skip = total_tests - aggregate_tests
        self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not sum([len(tests) for tests in itervalues(tests_to_run_by_device)]):
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(self._needs_web_platform_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(self._is_websocket_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._runner._test_is_slow = lambda test_file: self._test_is_slow(test_file, device_type=device_type)
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue
            if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite='layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result
示例#21
0
class Manager(object):
    """A class for managing running a series of layout tests."""
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: An object implementing platform-specific functionality.
          options: An options argument which contains command line options.
          printer: A Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self.ARCHIVED_RESULTS_LIMIT = 25
        self._http_server_started = False
        self._wptserve_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._test_is_slow)

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        running_all_tests = False
        try:
            paths, test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            num_workers = self._port.num_workers(
                int(self._options.child_processes))

            initial_results = self._run_tests(tests_to_run, tests_to_skip,
                                              self._options.repeat_each,
                                              self._options.iterations,
                                              num_workers)

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)

            tests_to_retry = self._tests_to_retry(initial_results)
            all_retry_results = []
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                for retry_attempt in xrange(1, self._options.num_retries + 1):
                    if not tests_to_retry:
                        break

                    _log.info('')
                    _log.info(
                        'Retrying %s, attempt %d of %d...',
                        grammar.pluralize('unexpected failure',
                                          len(tests_to_retry)), retry_attempt,
                        self._options.num_retries)

                    retry_results = self._run_tests(
                        tests_to_retry,
                        tests_to_skip=set(),
                        repeat_each=1,
                        iterations=1,
                        num_workers=num_workers,
                        retry_attempt=retry_attempt)
                    all_retry_results.append(retry_results)

                    tests_to_retry = self._tests_to_retry(retry_results)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
            exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            if self._options.write_full_results_to:
                self._filesystem.copyfile(
                    self._filesystem.join(self._results_directory,
                                          "full_results.json"),
                    self._options.write_full_results_to)

            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = test_run_results.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or (self._options.full_results_html
                                      and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time,
                                            initial_results,
                                            summarized_failing_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)

    def _collect_tests(self, args):
        return self._finder.find_tests(
            args,
            test_list=self._options.test_list,
            fastest_percentile=self._options.fastest)

    def _is_http_test(self, test):
        return (test.startswith(self.HTTP_SUBDIR)
                or self._is_websocket_test(test)
                or self._port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR in test)

    def _is_inspector_test(self, test):
        return self.INSPECTOR_SUBDIR in test

    def _is_websocket_test(self, test):
        if self._port.should_use_wptserve(test):
            return False

        return self.WEBSOCKET_SUBDIR in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (
            self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names,
                                                self._expectations,
                                                self._http_tests(test_names))
        tests_to_run = [
            test for test in test_names if test not in tests_to_skip
        ]

        if not tests_to_run:
            return tests_to_run, tests_to_skip

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            tests_to_run.sort()
            random.Random(self._options.seed).shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(
            tests_to_run)
        self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(
            test_file,
            self._options.slow_time_out_ms
            if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file),
            should_add_missing_baselines=(
                self._options.new_test_results
                and not self._test_is_expected_missing(test_file)))

    def _test_requires_lock(self, test_file):
        """Return True if the test needs to be locked when running multiple
        instances of this test runner.

        Perf tests are locked because heavy load caused by running other
        tests in parallel might cause some of them to time out.
        """
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_expected_missing(self, test_file):
        expectations = self._expectations.model().get_expectations(test_file)
        return (test_expectations.MISSING in expectations
                or test_expectations.NEEDS_REBASELINE in expectations
                or test_expectations.NEEDS_MANUAL_REBASELINE in expectations)

    def _test_is_slow(self, test_file):
        return test_expectations.SLOW in self._expectations.model(
        ).get_expectations(test_file)

    def _needs_servers(self, test_names):
        return any(
            self._test_requires_lock(test_name) for test_name in test_names)

    def _rename_results_folder(self):
        try:
            timestamp = time.strftime(
                "%Y-%m-%d-%H-%M-%S",
                time.localtime(
                    self._filesystem.mtime(
                        self._filesystem.join(self._results_directory,
                                              "results.html"))))
        except (IOError, OSError) as e:
            # It might be possible that results.html was not generated in previous run, because the test
            # run was interrupted even before testing started. In those cases, don't archive the folder.
            # Simply override the current folder contents with new results.
            import errno
            if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
                self._printer.write_update(
                    "No results.html file found in previous run, skipping it.")
            return None
        archived_name = ''.join(
            (self._filesystem.basename(self._results_directory), "_",
             timestamp))
        archived_path = self._filesystem.join(
            self._filesystem.dirname(self._results_directory), archived_name)
        self._filesystem.move(self._results_directory, archived_path)

    def _delete_dirs(self, dir_list):
        for dir in dir_list:
            self._filesystem.rmtree(dir)

    def _limit_archived_results_count(self):
        results_directory_path = self._filesystem.dirname(
            self._results_directory)
        file_list = self._filesystem.listdir(results_directory_path)
        results_directories = []
        for dir in file_list:
            file_path = self._filesystem.join(results_directory_path, dir)
            if self._filesystem.isdir(
                    file_path) and self._results_directory in file_path:
                results_directories.append(file_path)
        results_directories.sort(key=lambda x: self._filesystem.mtime(x))
        self._printer.write_update("Clobbering excess archived results in %s" %
                                   results_directory_path)
        self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])

    def _set_up_run(self, test_names):
        self._printer.write_update("Checking build ...")
        if self._options.build:
            exit_code = self._port.check_build(self._needs_servers(test_names),
                                               self._printer)
            if exit_code:
                _log.error("Build check failed")
                return exit_code

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        if self._options.pixel_tests:
            self._printer.write_update("Starting pixel test helper ...")
            self._port.start_helper()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            exit_code = self._port.check_sys_deps(
                self._needs_servers(test_names))
            if exit_code:
                self._port.stop_helper()
                return exit_code

        if self._options.clobber_old_results:
            self._clobber_old_results()
        elif self._filesystem.exists(self._results_directory):
            self._limit_archived_results_count()
            # Rename the existing results folder for archiving.
            self._rename_results_folder()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        self._port.setup_test_run()
        return test_run_results.OK_EXIT_STATUS

    def _run_tests(self,
                   tests_to_run,
                   tests_to_skip,
                   repeat_each,
                   iterations,
                   num_workers,
                   retry_attempt=0):

        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return self._runner.run_tests(self._expectations, test_inputs,
                                      tests_to_skip, num_workers,
                                      retry_attempt)

    def _start_servers(self, tests_to_run):
        if self._port.is_wptserve_enabled() and any(
                self._port.is_wptserve_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WPTServe ...')
            self._port.start_wptserve()
            self._wptserve_started = True

        if self._port.requires_http_server() or any(
            (self._is_http_test(test) or self._is_inspector_test(test))
                for test in tests_to_run):
            self._printer.write_update('Starting HTTP server ...')
            self._port.start_http_server(
                additional_dirs={},
                number_of_drivers=self._options.max_locked_shards)
            self._http_server_started = True

        if any(self._is_websocket_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WebSocket server ...')
            self._port.start_websocket_server()
            self._websockets_server_started = True

    def _stop_servers(self):
        if self._wptserve_started:
            self._printer.write_update('Stopping WPTServe ...')
            self._wptserve_started = False
            self._port.stop_wptserve()
        if self._http_server_started:
            self._printer.write_update('Stopping HTTP server ...')
            self._http_server_started = False
            self._port.stop_http_server()
        if self._websockets_server_started:
            self._printer.write_update('Stopping WebSocket server ...')
            self._websockets_server_started = False
            self._port.stop_websocket_server()

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        self._port.start_helper()

        return True

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Looks for and writes new crash logs, at the end of the test run.

        Since crash logs can take a long time to be written out if the system is
        under stress, do a second pass at the end of the test run.

        Args:
          run_results: The results of the test run.
          start_time: Time the tests started at. We're looking for crash
              logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if result.type != test_expectations.CRASH:
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                if failure.has_log:
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes,
                                                       start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

    def _clobber_old_results(self):
        dir_above_results_path = self._filesystem.dirname(
            self._results_directory)
        self._printer.write_update("Clobbering old results in %s" %
                                   dir_above_results_path)
        if not self._filesystem.exists(dir_above_results_path):
            return
        file_list = self._filesystem.listdir(dir_above_results_path)
        results_directories = []
        for dir in file_list:
            file_path = self._filesystem.join(dir_above_results_path, dir)
            if self._filesystem.isdir(
                    file_path) and self._results_directory in file_path:
                results_directories.append(file_path)
        self._delete_dirs(results_directories)

        # Port specific clean-up.
        self._port.clobber_old_port_specific_results()

    def _tests_to_retry(self, run_results):
        # TODO(ojan): This should also check that result.type != test_expectations.MISSING
        # since retrying missing expectations is silly. But that's a bit tricky since we
        # only consider the last retry attempt for the count of unexpected regressions.
        return [
            result.test_name
            for result in run_results.unexpected_results_by_name.values()
            if result.type != test_expectations.PASS
        ]

    def _write_json_files(self, summarized_full_results,
                          summarized_failing_results, initial_results,
                          running_all_tests):
        _log.debug("Writing JSON files in %s.", self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(
            initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory,
                                                "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        # Save out the times data so we can use it for --fastest in the future.
        if running_all_tests:
            bot_test_times_path = self._port.bot_test_times_path()
            self._filesystem.maybe_make_directory(
                self._filesystem.dirname(bot_test_times_path))
            json_results_generator.write_json(self._filesystem, times_trie,
                                              bot_test_times_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory,
                                           "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "full_results.json")
        json_results_generator.write_json(self._filesystem,
                                          summarized_full_results,
                                          full_results_path)

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "failing_results.json")
        # We write failing_results.json out as jsonp because we need to load it
        # from a file url for results.html and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem,
                                          summarized_failing_results,
                                          full_results_path,
                                          callback="ADD_RESULTS")

        if self._options.json_test_results:
            json_results_generator.write_json(self._filesystem,
                                              summarized_failing_results,
                                              self._options.json_test_results)

        _log.debug("Finished writing JSON files.")

    def _upload_json_files(self):
        if not self._options.test_results_server:
            return

        if not self._options.master_name:
            _log.error(
                "--test-results-server was set, but --master-name was not.  Not uploading JSON files."
            )
            return

        _log.debug("Uploading JSON files for builder: %s",
                   self._options.builder_name)
        attrs = [("builder", self._options.builder_name),
                 ("testtype", self._options.step_name),
                 ("master", self._options.master_name)]

        files = [
            (file, self._filesystem.join(self._results_directory, file))
            for file in
            ["failing_results.json", "full_results.json", "times_ms.json"]
        ]

        url = "http://%s/testfile/upload" % self._options.test_results_server
        # Set uploading timeout in case appengine server is having problems.
        # 120 seconds are more than enough to upload test results.
        uploader = FileUploader(url, 120)
        try:
            response = uploader.upload_as_multipart_form_data(
                self._filesystem, files, attrs)
            if response:
                if response.code == 200:
                    _log.debug("JSON uploaded.")
                else:
                    _log.debug("JSON upload failed, %d: '%s'", response.code,
                               response.read())
            else:
                _log.error("JSON upload failed; no response returned")
        except Exception as err:
            _log.error("Upload failed: %s", err)

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast',
                                                    'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {
                    'results':
                    (_worker_number(result.worker_name), result.test_number,
                     result.pid, int(result.test_run_time * 1000),
                     int(result.total_run_time * 1000))
                }
        stats_trie = {}
        for name, value in stats.iteritems():
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie
示例#22
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        default_device_tests = []

        # Look for tests with custom device requirements.
        custom_device_tests = defaultdict(list)
        for test_file in tests_to_run:
            custom_device = self._custom_device_for_test(test_file)
            if custom_device:
                custom_device_tests[custom_device].append(test_file)
            else:
                default_device_tests.append(test_file)

        if custom_device_tests:
            for device_class in custom_device_tests:
                _log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class))

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for test in tests_to_run)
        needs_web_platform_test_server = any(self._needs_web_platform_test(test) for test in tests_to_run)
        needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        if default_device_tests:
            _log.info('')
            _log.info("Running %s", pluralize(len(tests_to_run), "test"))
            _log.info('')
            if not self._set_up_run(tests_to_run):
                return test_run_results.RunDetails(exit_code=-1)

            initial_results, retry_results, enabled_pixel_tests_in_retry = self._run_test_subset(default_device_tests, tests_to_skip)

        # Only use a single worker for custom device classes
        self._options.child_processes = 1
        for device_class in custom_device_tests:
            device_tests = custom_device_tests[device_class]
            if device_tests:
                _log.info('')
                _log.info('Running %s for %s', pluralize(len(device_tests), "test"), device_class)
                _log.info('')
                if not self._set_up_run(device_tests, device_class):
                    return test_run_results.RunDetails(exit_code=-1)

                device_initial_results, device_retry_results, device_enabled_pixel_tests_in_retry = self._run_test_subset(device_tests, tests_to_skip)

                initial_results = initial_results.merge(device_initial_results) if initial_results else device_initial_results
                retry_results = retry_results.merge(device_retry_results) if retry_results else device_retry_results
                enabled_pixel_tests_in_retry |= device_enabled_pixel_tests_in_retry

        self._runner.stop_servers()
        end_time = time.time()
        return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
示例#23
0
class Manager(object):
    """Test execution manager

    This class has the main entry points for run-webkit-tests; the ..run_webkit_tests module almost
    exclusively just handles CLI options. It orchestrates collecting the tests (through
    LayoutTestFinder), running them (LayoutTestRunner), and then displaying the results
    (TestResultWriter/Printer).
    """

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        fs = port.host.filesystem
        self._filesystem = fs
        self._options = options
        self._printer = printer
        self._expectations = OrderedDict()
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = None

        self._tests_options = {}
        test_options_json_path = fs.join(self._port.layout_tests_dir(), "tests-options.json")
        if fs.exists(test_options_json_path):
            with fs.open_binary_file_for_reading(test_options_json_path) as fd:
                try:
                    self._tests_options = json.load(fd)
                except (ValueError, IOError):
                    pass

    def _collect_tests(self,
                       paths,  # type: List[str]
                       device_type_list,  # type: List[Optional[DeviceType]]
                       ):
        aggregate_tests = set()  # type: Set[Test]
        aggregate_tests_to_run = set()  # type: Set[Test]
        tests_to_run_by_device = {}  # type: Dict[Optional[DeviceType], List[Test]]

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            for_device_type = u'for {} '.format(device_type) if device_type else ''
            self._printer.write_update(u'Collecting tests {}...'.format(for_device_type))
            paths, tests = self._finder.find_tests(self._options, paths, device_type=device_type)
            aggregate_tests.update(tests)

            test_names = [test.test_path for test in tests]

            self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            tests_to_run = self._tests_to_run(tests, device_type=device_type)
            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests_to_run]
            aggregate_tests_to_run.update(tests_to_run_by_device[device_type])

        aggregate_tests_to_skip = aggregate_tests - aggregate_tests_to_run

        return tests_to_run_by_device, aggregate_tests_to_skip

    def _skip_tests(self, all_tests_list, expectations, http_tests):
        all_tests = set(all_tests_list)

        tests_to_skip = expectations.model().get_tests_with_result_type(test_expectations.SKIP)
        if self._options.skip_failing_tests:
            tests_to_skip.update(expectations.model().get_tests_with_result_type(test_expectations.FAIL))
            tests_to_skip.update(expectations.model().get_tests_with_result_type(test_expectations.FLAKY))

        if self._options.skipped == 'only':
            tests_to_skip = all_tests - tests_to_skip
        elif self._options.skipped == 'ignore':
            tests_to_skip = set()

        # unless of course we don't want to run the HTTP tests :)
        if not self._options.http:
            tests_to_skip.update(set(http_tests))

        return tests_to_skip

    def _split_into_chunks(self, test_names):
        """split into a list to run and a set to skip, based on --run-chunk and --run-part."""
        if not self._options.run_chunk and not self._options.run_part:
            return test_names, set()

        # If the user specifies they just want to run a subset of the tests,
        # just grab a subset of the non-skipped tests.
        chunk_value = self._options.run_chunk or self._options.run_part
        try:
            (chunk_num, chunk_len) = chunk_value.split(":")
            chunk_num = int(chunk_num)
            assert(chunk_num >= 0)
            test_size = int(chunk_len)
            assert(test_size > 0)
        except AssertionError:
            _log.critical("invalid chunk '%s'" % chunk_value)
            return (None, None)

        # Get the number of tests
        num_tests = len(test_names)

        # Get the start offset of the slice.
        if self._options.run_chunk:
            chunk_len = test_size
            # In this case chunk_num can be really large. We need
            # to make the worker fit in the current number of tests.
            slice_start = (chunk_num * chunk_len) % num_tests
        else:
            # Validate the data.
            assert(test_size <= num_tests)
            assert(chunk_num <= test_size)

            # To count the chunk_len, and make sure we don't skip
            # some tests, we round to the next value that fits exactly
            # all the parts.
            rounded_tests = num_tests
            if rounded_tests % test_size != 0:
                rounded_tests = (num_tests + test_size - (num_tests % test_size))

            chunk_len = rounded_tests // test_size
            slice_start = chunk_len * (chunk_num - 1)
            # It does not mind if we go over test_size.

        # Get the end offset of the slice.
        slice_end = min(num_tests, slice_start + chunk_len)

        tests_to_run = test_names[slice_start:slice_end]

        _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))

        # If we reached the end and we don't have enough tests, we run some
        # from the beginning.
        if slice_end - slice_start < chunk_len:
            extra = chunk_len - (slice_end - slice_start)
            _log.debug('   last chunk is partial, appending [0:%d]' % extra)
            tests_to_run.extend(test_names[0:extra])

        return (tests_to_run, set(test_names) - set(tests_to_run))

    def _tests_to_run(self, tests, device_type):
        test_names = {test.test_path for test in tests}
        test_names_to_skip = self._skip_tests(test_names,
                                              self._expectations[device_type],
                                              {test.test_path for test in tests if test.needs_any_server})
        tests_to_run = [test for test in tests if test.test_path not in test_names_to_skip]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=lambda x: self._port.test_key(x.test_path))
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, _ = self._split_into_chunks(tests_to_run)
        return tests_to_run

    def _test_input_for_file(self, test_file, device_type):
        test_is_slow = self._test_is_slow(test_file.test_path, device_type=device_type)
        reference_files = self._port.reference_files(
            test_file.test_path, device_type=device_type
        )
        timeout = (
            self._options.slow_time_out_ms
            if test_is_slow
            else self._options.time_out_ms
        )
        should_dump_jsconsolelog_in_stderr = (
            self._test_should_dump_jsconsolelog_in_stderr(
                test_file.test_path, device_type=device_type
            )
        )

        if reference_files:
            should_run_pixel_test = True
        elif not self._options.pixel_tests:
            should_run_pixel_test = False
        elif self._options.pixel_test_directories:
            should_run_pixel_test = any(
                test_file.test_path.startswith(directory)
                for directory in self._options.pixel_test_directories
            )
        else:
            should_run_pixel_test = True

        return TestInput(
            test_file,
            timeout=timeout,
            is_slow=test_is_slow,
            needs_servers=test_file.needs_any_server,
            should_dump_jsconsolelog_in_stderr=should_dump_jsconsolelog_in_stderr,
            reference_files=reference_files,
            should_run_pixel_test=should_run_pixel_test,
        )

    def _test_is_slow(self, test_file, device_type):
        if self._expectations[device_type].model().has_modifier(test_file, test_expectations.SLOW):
            return True
        return "slow" in self._tests_options.get(test_file, [])

    def _test_should_dump_jsconsolelog_in_stderr(self, test_file, device_type):
        return self._expectations[device_type].model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)

    def _multiply_test_inputs(self, test_inputs, repeat_each, iterations):
        if repeat_each == 1:
            per_iteration = list(test_inputs)[:]
        else:
            per_iteration = []
            for test_input in test_inputs:
                per_iteration.extend([test_input] * repeat_each)

        return per_iteration * iterations

    def _update_worker_count(self, test_inputs):
        new_test_inputs = self._multiply_test_inputs(test_inputs, self._options.repeat_each, self._options.iterations)
        worker_count = self._runner.get_worker_count(new_test_inputs, int(self._options.child_processes))
        self._options.child_processes = worker_count

    def _set_up_run(self, test_inputs, device_type):
        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        self._printer.write_update("Starting helper ...")
        if not self._port.start_helper(pixel_tests=self._options.pixel_tests, prefer_integrated_gpu=self._options.prefer_integrated_gpu):
            return False

        self._update_worker_count(test_inputs)
        self._port.reset_preferences()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps():
                self._port.stop_helper()
                return False

        self._port.setup_test_run(device_type)
        return True

    def run(self, args):
        num_failed_uploads = 0

        device_type_list = self._port.supported_device_types()
        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        aggregate_tests_to_run = set()  # type: Set[Test]
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)

        skipped_tests_by_path = defaultdict(set)
        for test in aggregate_tests_to_skip:
            skipped_tests_by_path[test.test_path].add(test)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in skipped_tests_by_path:
                    tests = skipped_tests_by_path[arg]
                    tests_to_run_by_device[device_type_list[0]].extend(tests)
                    aggregate_tests_to_run |= tests
                    aggregate_tests_to_skip -= tests
                    del skipped_tests_by_path[arg]

        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if aggregate_tests == aggregate_tests_to_skip:
            # XXX: this is currently identical to the follow if, which likely isn't intended
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not aggregate_tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue

            test_inputs = [self._test_input_for_file(test, device_type=device_type)
                           for test in tests_to_run_by_device[device_type]]

            if not self._set_up_run(test_inputs, device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type)

            skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip))
            for skipped_test in set(aggregate_tests_to_skip):
                skipped_result = test_results.TestResult(skipped_test.test_path)
                skipped_result.type = test_expectations.SKIP
                skipped_results.add(skipped_result, expected=True)
            temp_initial_results = temp_initial_results.merge(skipped_results)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result

    def _run_test_subset(self,
                         test_inputs,  # type: List[TestInput]
                         device_type,  # type: Optional[DeviceType]
                         ):
        try:
            enabled_pixel_tests_in_retry = False
            initial_results = self._run_tests(test_inputs, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False, device_type=device_type)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
                if enabled_pixel_tests_in_retry:
                    retry_test_inputs = [self._test_input_for_file(test_input.test, device_type=device_type)
                                         for test_input in test_inputs
                                         if test_input.test.test_path in tests_to_retry]
                else:
                    retry_test_inputs = [test_input
                                         for test_input in test_inputs
                                         if test_input.test.test_path in tests_to_retry]

                _log.info('')
                _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(retry_test_inputs,
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True,
                                                device_type=device_type)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        return (initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
        if initial_results is None:
            _log.error('No results generated')
            return test_run_results.RunDetails(exit_code=-1)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._output_perf_metrics(end_time - start_time, initial_results)
            self._save_json_files(summarized_results, initial_results)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _run_tests(self,
                   test_inputs,  # type: List[TestInput]
                   repeat_each,  # type: int
                   iterations,  # type: int
                   num_workers,  # type: int
                   retrying,  # type: bool
                   device_type,  # type: Optional[DeviceType]
                   ):
        new_test_inputs = self._multiply_test_inputs(test_inputs, repeat_each, iterations)

        assert self._runner is not None
        return self._runner.run_tests(self._expectations[device_type], new_test_inputs, num_workers, retrying, device_type)

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._options.pixel_tests = True
        return self._port.start_helper(prefer_integrated_gpu=self._options.prefer_integrated_gpu)

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.items():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append([test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
        if sample_files:
            for test, sample_file in sample_files.items():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.items():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

                # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
                if not any(process[0] == test for process in crashed_processes):
                    result = test_results.TestResult(test)
                    result.type = test_expectations.CRASH
                    result.is_other_crash = True
                    run_results.add(result, expected=False)
                    _log.debug("Adding results for other crash: " + str(test))

    def _clobber_old_results(self):
        self._printer.write_update("Deleting results directory {}".format(self._results_directory))
        if self._filesystem.isdir(self._results_directory):
            self._filesystem.rmtree(self._results_directory)

    def _tests_to_retry(self, run_results, include_crashes):
        return [result.test_name for result in run_results.unexpected_results_by_name.values() if
                   ((result.type != test_expectations.PASS) and
                    (result.type != test_expectations.MISSING) and
                    (result.type != test_expectations.CRASH or include_crashes))]

    def _output_perf_metrics(self, run_time, initial_results):
        perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
        perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
        self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))

    def _results_to_upload_json_trie(self, expectations, results):
        FAILURE_TO_TEXT = {
            test_expectations.PASS: Upload.Expectations.PASS,
            test_expectations.CRASH: Upload.Expectations.CRASH,
            test_expectations.TIMEOUT: Upload.Expectations.TIMEOUT,
            test_expectations.IMAGE: Upload.Expectations.IMAGE,
            test_expectations.TEXT: Upload.Expectations.TEXT,
            test_expectations.AUDIO: Upload.Expectations.AUDIO,
            test_expectations.MISSING: Upload.Expectations.WARNING,
            test_expectations.IMAGE_PLUS_TEXT: ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]),
        }

        results_trie = {}
        for result in itervalues(results.results_by_name):
            if result.type == test_expectations.SKIP:
                continue

            expected = expectations.filtered_expectations_for_test(
                result.test_name,
                self._options.pixel_tests or bool(result.reftest_type),
                self._options.world_leaks,
            )
            if expected == {test_expectations.PASS}:
                expected = None
            else:
                expected = ' '.join([FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL) for e in expected])

            json_results_generator.add_path_to_trie(
                result.test_name,
                Upload.create_test_result(
                    expected=expected,
                    actual=FAILURE_TO_TEXT.get(result.type, Upload.Expectations.FAIL) if result.type else None,
                    time=int(result.test_run_time * 1000),
                ), results_trie)
        return results_trie

    def _save_json_files(self, summarized_results, initial_results):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory, "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._results_directory,
            self._expectations, initial_results,
            "layout-tests")

        if generator.generate_json_output():
            _log.debug("Finished writing JSON file for the test results server.")
        else:
            _log.debug("Failed to generate JSON file for the test results server.")
            return

        incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
        stats_trie = {}
        for name, value in iteritems(stats):
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie

    def _print_expectation_line_for_test(self, format_string, test, device_type):
        test_path = test.test_path
        line = self._expectations[device_type].model().get_expectation_line(test_path)
        print(format_string.format(test_path,
                                   line.expected_behavior,
                                   self._expectations[device_type].readable_filename_and_line_number(line),
                                   line.original_string or ''))

    def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip=None):
        format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
        if tests_to_skip:
            print('')
            print('Tests to skip ({})'.format(len(tests_to_skip)))
            for test in sorted(tests_to_skip):
                self._print_expectation_line_for_test(format_string, test, device_type=device_type)

        print('')
        print('Tests to run{} ({})'.format(' for ' + str(device_type) if device_type else '', len(tests_to_run)))
        for test in sorted(tests_to_run):
            self._print_expectation_line_for_test(format_string, test, device_type=device_type)

    def print_expectations(self, args):
        device_type_list = self._port.DEFAULT_DEVICE_TYPES or [self._port.DEVICE_TYPE]

        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return -1

        aggregate_tests_to_run = set()
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)
        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations)
        test_col_width = max(len(test.test_path) for test in aggregate_tests) + 1

        self._print_expectations_for_subset(device_type_list[0], test_col_width, tests_to_run_by_device[device_type_list[0]], aggregate_tests_to_skip)

        for device_type in device_type_list[1:]:
            self._print_expectations_for_subset(device_type, test_col_width, tests_to_run_by_device[device_type])

        return 0
示例#24
0
    def run(self, args):
        num_failed_uploads = 0

        device_type_list = self._port.supported_device_types()
        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        aggregate_tests_to_run = set()  # type: Set[Test]
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)

        skipped_tests_by_path = defaultdict(set)
        for test in aggregate_tests_to_skip:
            skipped_tests_by_path[test.test_path].add(test)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in skipped_tests_by_path:
                    tests = skipped_tests_by_path[arg]
                    tests_to_run_by_device[device_type_list[0]].extend(tests)
                    aggregate_tests_to_run |= tests
                    aggregate_tests_to_skip -= tests
                    del skipped_tests_by_path[arg]

        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if aggregate_tests == aggregate_tests_to_skip:
            # XXX: this is currently identical to the follow if, which likely isn't intended
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not aggregate_tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue

            test_inputs = [self._test_input_for_file(test, device_type=device_type)
                           for test in tests_to_run_by_device[device_type]]

            if not self._set_up_run(test_inputs, device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type)

            skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip))
            for skipped_test in set(aggregate_tests_to_skip):
                skipped_result = test_results.TestResult(skipped_test.test_path)
                skipped_result.type = test_expectations.SKIP
                skipped_results.add(skipped_result, expected=True)
            temp_initial_results = temp_initial_results.merge(skipped_results)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result
示例#25
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.PERF_SUBDIR = 'perf'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'

        # disable wss server. need to install pyOpenSSL on buildbots.
        # self._websocket_secure_server = websocket_server.PyWebSocket(
        #        options.results_directory, use_tls=True, port=9323)

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._test_is_slow)

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _is_perf_test(self, test):
        return self.PERF_SUBDIR == test or (
            self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names,
                                                self._expectations,
                                                self._http_tests(test_names))
        tests_to_run = [
            test for test in test_names if test not in tests_to_skip
        ]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(
            tests_to_run)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(
            test_file, self._options.slow_time_out_ms
            if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file))

    def _test_requires_lock(self, test_file):
        """Return True if the test needs to be locked when
        running multiple copies of NRWTs. Perf tests are locked
        because heavy load caused by running other tests in parallel
        might cause some of them to timeout."""
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_slow(self, test_file):
        return self._expectations.has_modifier(test_file,
                                               test_expectations.SLOW)

    def needs_servers(self, test_names):
        return any(
            self._test_requires_lock(test_name)
            for test_name in test_names) and self._options.http

    def _set_up_run(self, test_names):
        self._printer.write_update("Checking build ...")
        if not self._port.check_build(self.needs_servers(test_names)):
            _log.error("Build check failed")
            return False

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        if self._options.pixel_tests:
            self._printer.write_update("Starting pixel test helper ...")
            self._port.start_helper()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps(self.needs_servers(test_names)):
                self._port.stop_helper()
                return False

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        self._port.setup_test_run()
        return True

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        start_time = time.time()
        enabled_pixel_tests_in_retry = False
        try:
            initial_results = self._run_tests(
                tests_to_run,
                tests_to_skip,
                self._options.repeat_each,
                self._options.iterations,
                int(self._options.child_processes),
                retrying=False)

            tests_to_retry = self._tests_to_retry(
                initial_results,
                include_crashes=self._port.should_retry_crashes())
            if self._options.retry_failures and tests_to_retry and not initial_results.interrupted:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                _log.info('')
                _log.info("Retrying %d unexpected failure(s) ..." %
                          len(tests_to_retry))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, retry_results,
            enabled_pixel_tests_in_retry)
        self._printer.print_results(end_time - start_time, initial_results,
                                    summarized_results)

        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results)

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if self._options.show_results and (
                    initial_results.unexpected_results_by_name or
                (self._options.full_results_html
                 and initial_results.total_failures)):
                self._port.show_results_html_file(results_path)

        return test_run_results.RunDetails(
            self._port.exit_code_from_summarized_results(summarized_results),
            summarized_results, initial_results, retry_results,
            enabled_pixel_tests_in_retry)

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
                   num_workers, retrying):
        needs_http = any(self._is_http_test(test) for test in tests_to_run)
        needs_websockets = any(
            self._is_websocket_test(test) for test in tests_to_run)

        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return self._runner.run_tests(self._expectations, test_inputs,
                                      tests_to_skip, num_workers, needs_http,
                                      needs_websockets, retrying)

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        self._port.start_helper()

        return True

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes,
                                                       start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" %
                                   self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(
                    self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(
                    self._filesystem.join(self._results_directory, dirname))

    def _tests_to_retry(self, run_results, include_crashes):
        return [
            result.test_name
            for result in run_results.unexpected_results_by_name.values()
            if ((result.type != test_expectations.PASS) and (
                result.type != test_expectations.MISSING) and (
                    result.type != test_expectations.CRASH or include_crashes))
        ]

    def _upload_json_files(self, summarized_results, initial_results):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(
            self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory,
                                                "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory,
                                           "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem,
                                          summarized_results,
                                          full_results_path,
                                          callback="ADD_RESULTS")

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            BUILDER_BASE_URL, self._expectations, initial_results,
            self._options.test_results_server, "layout-tests",
            self._options.master_name)

        _log.debug("Finished writing JSON files.")

        json_files = [
            "incremental_results.json", "full_results.json", "times_ms.json"
        ]

        generator.upload_json_files(json_files)

        incremental_results_path = self._filesystem.join(
            self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast',
                                                    'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {
                    'results':
                    (_worker_number(result.worker_name), result.test_number,
                     result.pid, int(result.test_run_time * 1000),
                     int(result.total_run_time * 1000))
                }
        stats_trie = {}
        for name, value in stats.iteritems():
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie
示例#26
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None

        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root(
        )
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'

        # disable wss server. need to install pyOpenSSL on buildbots.
        # self._websocket_secure_server = websocket_server.PyWebSocket(
        #        options.results_directory, use_tls=True, port=9323)

        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = LayoutTestRunner(self._options, self._port,
                                        self._printer, self._results_directory,
                                        self._test_is_slow)

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(
            test) or self._is_web_platform_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _is_web_platform_test(self, test):
        return self.web_platform_test_subdir in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names,
                                                self._expectations,
                                                self._http_tests(test_names))
        tests_to_run = [
            test for test in test_names if test not in tests_to_skip
        ]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(
            tests_to_run)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(
            test_file, self._options.slow_time_out_ms
            if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._is_http_test(test_file))

    def _test_is_slow(self, test_file):
        return self._expectations.model().has_modifier(test_file,
                                                       test_expectations.SLOW)

    def needs_servers(self, test_names):
        return any(self._is_http_test(test_name)
                   for test_name in test_names) and self._options.http

    def _set_up_run(self, test_names):
        self._printer.write_update("Checking build ...")
        if not self._port.check_build(self.needs_servers(test_names)):
            _log.error("Build check failed")
            return False

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        self._printer.write_update("Starting helper ...")
        if not self._port.start_helper(self._options.pixel_tests):
            return False

        self._port.reset_preferences()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps(self.needs_servers(test_names)):
                self._port.stop_helper()
                return False

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        self._port.setup_test_run()
        return True

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port,
            test_names,
            force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        enabled_pixel_tests_in_retry = False
        try:
            initial_results = self._run_tests(
                tests_to_run,
                tests_to_skip,
                self._options.repeat_each,
                self._options.iterations,
                int(self._options.child_processes),
                retrying=False)

            tests_to_retry = self._tests_to_retry(
                initial_results,
                include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                _log.info('')
                _log.info("Retrying %s ..." %
                          pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, retry_results,
            enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(
                self._port,
                self._expectations,
                initial_results,
                retry_results,
                enabled_pixel_tests_in_retry,
                include_passes=True,
                include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results,
                                    summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results,
                                    results_including_passes, start_time,
                                    end_time)

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (
                        initial_results.unexpected_results_by_name or
                    (self._options.full_results_html
                     and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(
                    summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results,
                                           initial_results, retry_results,
                                           enabled_pixel_tests_in_retry)

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
                   num_workers, retrying):
        needs_http = any(
            (self._is_http_test(test) and not self._is_web_platform_test(test))
            for test in tests_to_run)
        needs_web_platform_test_server = any(
            self._is_web_platform_test(test) for test in tests_to_run)
        needs_websockets = any(
            self._is_websocket_test(test) for test in tests_to_run)

        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return self._runner.run_tests(self._expectations, test_inputs,
                                      tests_to_skip, num_workers, needs_http,
                                      needs_websockets,
                                      needs_web_platform_test_server, retrying)

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        return self._port.start_helper()

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes,
                                                       start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

                # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
                if not any(process[0] == test
                           for process in crashed_processes):
                    result = test_results.TestResult(test)
                    result.type = test_expectations.CRASH
                    result.is_other_crash = True
                    run_results.add(result, expected=False, test_is_slow=False)
                    _log.debug("Adding results for other crash: " + str(test))

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" %
                                   self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(
                    self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(
                    self._filesystem.join(self._results_directory, dirname))

    def _tests_to_retry(self, run_results, include_crashes):
        return [
            result.test_name
            for result in run_results.unexpected_results_by_name.values()
            if ((result.type != test_expectations.PASS) and (
                result.type != test_expectations.MISSING) and (
                    result.type != test_expectations.CRASH or include_crashes))
        ]

    def _upload_json_files(self,
                           summarized_results,
                           initial_results,
                           results_including_passes=None,
                           start_time=None,
                           end_time=None):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(
            self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory,
                                                "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory,
                                           "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem,
                                          summarized_results,
                                          full_results_path,
                                          callback="ADD_RESULTS")

        results_json_path = self._filesystem.join(
            self._results_directory, "results_including_passes.json")
        if results_including_passes:
            json_results_generator.write_json(self._filesystem,
                                              results_including_passes,
                                              results_json_path)

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            self._expectations, initial_results,
            self._options.test_results_server, "layout-tests",
            self._options.master_name)

        if generator.generate_json_output():
            _log.debug(
                "Finished writing JSON file for the test results server.")
        else:
            _log.debug(
                "Failed to generate JSON file for the test results server.")

        json_files = [
            "incremental_results.json", "full_results.json", "times_ms.json"
        ]

        generator.upload_json_files(json_files)
        if results_including_passes:
            self.upload_results(results_json_path, start_time, end_time)

        incremental_results_path = self._filesystem.join(
            self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)
        if results_including_passes:
            self._filesystem.remove(results_json_path)

    def upload_results(self, results_json_path, start_time, end_time):
        hostname = self._options.results_server_host
        if not hostname:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_slave = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_slave:
            _log.error(
                "--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files."
            )
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem,
                              self._port.host.executive).detect_scm_system(
                                  path) or self._port.host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {
                'revision': revision,
                'timestamp': scm.timestamp_of_revision(path, revision)
            }

        _log.info(
            "Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s",
            master_name, builder_name, build_number, build_slave, hostname)

        attrs = [
            ('master', 'build.webkit.org' if master_name == 'webkit.org' else
             master_name),  # FIXME: Pass in build.webkit.org.
            ('builder_name', builder_name),
            ('build_number', build_number),
            ('build_slave', build_slave),
            ('revisions', json.dumps(revisions)),
            ('start_time', str(start_time)),
            ('end_time', str(end_time)),
        ]

        uploader = FileUploader("http://%s/api/report" % hostname, 360)
        try:
            response = uploader.upload_as_multipart_form_data(
                self._filesystem, [('results.json', results_json_path)], attrs)
            if not response:
                _log.error("JSON upload failed; no response returned")
                return

            if response.code != 200:
                _log.error("JSON upload failed, %d: '%s'" %
                           (response.code, response.read()))
                return

            response_text = response.read()
            try:
                response_json = json.loads(response_text)
            except ValueError, error:
                _log.error(
                    "JSON upload failed; failed to parse the response: %s",
                    response_text)
                return

            if response_json['status'] != 'OK':
                _log.error("JSON upload failed, %s: %s",
                           response_json['status'], response_text)
                return

            _log.info("JSON uploaded.")
        except Exception, error:
            _log.error("Upload failed: %s" % error)
            return
示例#27
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port,
            test_names,
            force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        # Look for tests with custom device requirements.
        test_device_mapping = defaultdict(list)
        for test_file in tests_to_run:
            test_device_mapping[self._custom_device_for_test(test_file)
                                or self._port.DEFAULT_DEVICE_TYPE].append(
                                    test_file)

        # Order device types from most specific to least specific in the hopes that some of the more specific device
        # types will match the less specific device types.
        device_type_order = []
        types_with_family = []
        remaining_types = []
        for device_type in test_device_mapping.iterkeys():
            if device_type and device_type.hardware_family and device_type.hardware_type:
                device_type_order.append(device_type)
            elif device_type and device_type.hardware_family:
                types_with_family.append(device_type)
            else:
                remaining_types.append(device_type)
        device_type_order.extend(types_with_family + remaining_types)

        needs_http = any((self._is_http_test(test)
                          and not self._needs_web_platform_test(test))
                         for test in tests_to_run)
        needs_web_platform_test_server = any(
            self._needs_web_platform_test(test) for test in tests_to_run)
        needs_websockets = any(
            self._is_websocket_test(test) for test in tests_to_run)
        self._runner = LayoutTestRunner(
            self._options,
            self._port,
            self._printer,
            self._results_directory,
            self._test_is_slow,
            needs_http=needs_http,
            needs_web_platform_test_server=needs_web_platform_test_server,
            needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        child_processes_option_value = self._options.child_processes

        while device_type_order:
            device_type = device_type_order[0]
            tests = test_device_mapping[device_type]
            del device_type_order[0]

            self._options.child_processes = min(
                self._port.max_child_processes(device_type=device_type),
                int(child_processes_option_value
                    or self._port.default_child_processes(
                        device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(
                    pluralize(len(test_device_mapping[device_type]), 'test'),
                    str(device_type)))
                _log.info('')
                continue

            # This loop looks for any less-specific device types which match the current device type
            index = 0
            while index < len(device_type_order):
                if device_type_order[index] == device_type:
                    tests.extend(test_device_mapping[device_type_order[index]])

                    # Remove devices types from device_type_order once tests associated with that type have been claimed.
                    del device_type_order[index]
                else:
                    index += 1

            _log.info('Running {}{}'.format(
                pluralize(len(tests), 'test'),
                ' for {}'.format(str(device_type)) if device_type else ''))
            _log.info('')
            if not self._set_up_run(tests, device_type):
                return test_run_results.RunDetails(exit_code=-1)

            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(
                tests, tests_to_skip)
            initial_results = initial_results.merge(
                temp_initial_results
            ) if initial_results else temp_initial_results
            retry_results = retry_results.merge(
                temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

        self._runner.stop_servers()

        end_time = time.time()
        return self._end_test_run(start_time, end_time, initial_results,
                                  retry_results, enabled_pixel_tests_in_retry)