コード例 #1
0
ファイル: benchmark_results.py プロジェクト: visnix/WebKit
    def _aggregate_results_for_test(cls, test):
        subtest_results = cls._aggregate_results(
            test['tests']) if 'tests' in test else {}
        results = {}
        for metric_name, metric in iteritems(test.get('metrics', {})):
            if not isinstance(metric, list):
                results[metric_name] = {None: {}}
                for config_name, values in iteritems(metric):
                    results[metric_name][None][
                        config_name] = cls._flatten_list(values)
                continue

            # Filter duplicate aggregators that could have arisen from merging JSONs.
            aggregator_list = list(set(metric))
            results[metric_name] = {}
            for aggregator in aggregator_list:
                values_by_config_iteration = cls._subtest_values_by_config_iteration(
                    subtest_results, metric_name, aggregator)
                for config_name, values_by_iteration in iteritems(
                        values_by_config_iteration):
                    results[metric_name].setdefault(aggregator, {})
                    results[metric_name][aggregator][config_name] = [
                        cls._aggregate_values(aggregator, values)
                        for values in values_by_iteration
                    ]

        return {'metrics': results, 'tests': subtest_results}
コード例 #2
0
    def _lint_subtest_results(cls, subtests, parent_test, parent_aggregator_list):
        iteration_groups_by_config = {}
        for test_name, test in iteritems(subtests):
            aggregator_list = None

            if 'metrics' not in test and 'tests' not in test:
                raise TypeError('"%s" does not contain metrics or tests' % test_name)

            if 'metrics' in test:
                metrics = test['metrics']
                if not isinstance(metrics, dict):
                    raise TypeError('The metrics in "%s" is not a dictionary' % test_name)
                for metric_name, metric in iteritems(metrics):
                    if isinstance(metric, list):
                        cls._lint_aggregator_list(test_name, metric_name, metric, parent_test, parent_aggregator_list)
                        aggregator_list = metric
                    elif isinstance(metric, dict):
                        cls._lint_configuration(test_name, metric_name, metric, parent_test, parent_aggregator_list, iteration_groups_by_config)
                    else:
                        raise TypeError('"%s" metric of "%s" was not an aggregator list or a dictionary of configurations: %s' % (metric_name, test_name, str(metric)))

            if 'tests' in test:
                cls._lint_subtest_results(test['tests'], test_name, aggregator_list)
            elif aggregator_list:
                raise TypeError('"%s" requires aggregation but it has no subtests' % (test_name))
        return iteration_groups_by_config
コード例 #3
0
ファイル: benchmark_results.py プロジェクト: visnix/WebKit
    def _lint_configuration(cls, test_name, metric_name, configurations,
                            parent_test, parent_aggregator_list,
                            iteration_groups_by_config):
        # FIXME: Check that config_name is always "current".
        for config_name, values in iteritems(configurations):
            nested_list_count = [isinstance(value, list)
                                 for value in values].count(True)
            if nested_list_count not in [0, len(values)]:
                raise TypeError(
                    '"%s" metric of "%s" had malformed values: %s' %
                    (metric_name, test_name, json.dumps(values)))

            if nested_list_count:
                value_shape = []
                for value_group in values:
                    value_shape.append(len(value_group))
                    cls._lint_values(test_name, metric_name, value_group)
            else:
                value_shape = len(values)
                cls._lint_values(test_name, metric_name, values)

            iteration_groups_by_config.setdefault(metric_name, {}).setdefault(
                config_name, value_shape)
            if parent_aggregator_list and value_shape != iteration_groups_by_config[
                    metric_name][config_name]:
                raise TypeError(
                    '"%s" metric of "%s" had a mismatching subtest values' %
                    (metric_name, parent_test))
コード例 #4
0
 def _subtest_values_by_config_iteration(cls, subtest_results, metric_name, aggregator):
     values_by_config_iteration = {}
     for subtest_name, subtest in iteritems(subtest_results):
         results_for_metric = subtest['metrics'].get(metric_name, {})
         if aggregator in results_for_metric:
             results_for_aggregator = results_for_metric.get(aggregator)
         elif None in results_for_metric:
             results_for_aggregator = results_for_metric.get(None)
         elif len(results_for_metric.keys()) == 1:
             results_for_aggregator = results_for_metric.get(list(results_for_metric.keys())[0])
         else:
             results_for_aggregator = {}
         for config_name, values in iteritems(results_for_aggregator):
             values_by_config_iteration.setdefault(config_name, [[] for _ in values])
             for iteration, value in enumerate(values):
                 values_by_config_iteration[config_name][iteration].append(value)
     return values_by_config_iteration
コード例 #5
0
ファイル: layout_test_runner.py プロジェクト: visnix/WebKit
    def _do_post_tests_work(self, driver):
        additional_results = []
        if not driver:
            return additional_results

        post_test_output = driver.do_post_tests_work()
        if post_test_output:
            for test_name, doc_list in iteritems(post_test_output.world_leaks_dict):
                additional_results.append(test_results.TestResult(test_name, [test_failures.FailureDocumentLeak(doc_list)]))
        return additional_results
コード例 #6
0
ファイル: manager.py プロジェクト: eocanha/webkit
 def _print_tests_result_with_status(self, status, runner):
     mapping = runner.result_map_by_status(status)
     if mapping:
         self._stream.writeln(runner.NAME_FOR_STATUS[status])
         self._stream.writeln('')
         need_newline = False
         for test, output in iteritems(mapping):
             need_newline = Manager._print_test_result(
                 self._stream, test, output)
         if need_newline:
             self._stream.writeln('')
コード例 #7
0
    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
        stats_trie = {}
        for name, value in iteritems(stats):
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie
コード例 #8
0
def convert_trie_to_flat_paths(trie, prefix=None):
    """Converts the directory structure in the given trie to flat paths, prepending a prefix to each."""
    result = {}
    for name, data in iteritems(trie):
        if prefix:
            name = prefix + "/" + name

        if len(data) and not "results" in data:
            result.update(convert_trie_to_flat_paths(data, name))
        else:
            result[name] = data

    return result
コード例 #9
0
    def _convert_json_to_current_version(self, results_json):
        """If the JSON does not match the current version, converts it to the
        current version and adds in the new version number.
        """
        if self.VERSION_KEY in results_json:
            archive_version = results_json[self.VERSION_KEY]
            if archive_version == self.VERSION:
                return
        else:
            archive_version = 3

        # version 3->4
        if archive_version == 3:
            num_results = len(results_json.values())
            for builder, results in iteritems(results_json):
                self._convert_tests_to_trie(results)

        results_json[self.VERSION_KEY] = self.VERSION
コード例 #10
0
    def run(self, tests, num_workers):
        if not tests:
            return

        self.printer.write_update('Sharding tests ...')
        shards = Runner._shard_tests(tests)

        original_level = server_process_logger.level
        server_process_logger.setLevel(logging.CRITICAL)

        try:
            self._num_workers = min(num_workers, len(shards))
            with message_pool.get(
                    self, lambda caller: _Worker(caller, self.port, shards),
                    self._num_workers) as pool:
                pool.run(('test', shard) for shard, _ in iteritems(shards))
        finally:
            server_process_logger.setLevel(original_level)
コード例 #11
0
    def _should_log_linter_warning(warning, files, cwd, host):
        abs_filename = host.filesystem.join(cwd, warning.filename)

        # Case 1, the line the warning was tied to is in our patch.
        if abs_filename in files and files[abs_filename] and warning.line_number in files[abs_filename]:
            return True

        for file, lines in iteritems(warning.related_files):
            abs_filename = host.filesystem.join(cwd, file)
            if abs_filename in files:
                # Case 2, a file associated with the warning is in our patch
                # Note that this will really only happen if you delete a test.
                if lines is None:
                    return True

                # Case 3, a line associated with the warning is in our patch.
                for line in lines:
                    if files[abs_filename] and line in files[abs_filename]:
                        return True
        return False
コード例 #12
0
ファイル: layout_test_runner.py プロジェクト: visnix/WebKit
    def _shard_by_directory(self, test_inputs, num_workers):
        """Returns a lists of shards, each shard containing all the files in a directory.

        This is the default mode, and gets as much parallelism as we can while
        minimizing flakiness caused by inter-test dependencies."""
        shards = []
        tests_by_dir = {}
        # FIXME: Given that the tests are already sorted by directory,
        # we can probably rewrite this to be clearer and faster.
        for test_input in test_inputs:
            directory = self._split(test_input.test_name)[0]
            tests_by_dir.setdefault(directory, [])
            tests_by_dir[directory].append(test_input)

        for directory, test_inputs in iteritems(tests_by_dir):
            shard = TestShard(directory, test_inputs)
            shards.append(shard)

        # Sort the shards by directory name.
        shards.sort(key=lambda shard: shard.name)

        return shards
コード例 #13
0
    def run(self, tests, num_workers):
        if not tests:
            return

        self.printer.write_update('Sharding tests ...')
        shards = Runner._shard_tests(tests)

        original_level = server_process_logger.level
        server_process_logger.setLevel(logging.CRITICAL)

        try:
            if Runner.instance:
                raise RuntimeError('Cannot nest API test runners')
            Runner.instance = self
            self._num_workers = min(num_workers, len(shards))

            devices = None
            if getattr(self.port, 'DEVICE_MANAGER', None):
                devices = dict(
                    available_devices=self.port.DEVICE_MANAGER.
                    AVAILABLE_DEVICES,
                    initialized_devices=self.port.DEVICE_MANAGER.
                    INITIALIZED_DEVICES,
                )

            with TaskPool(
                    workers=self._num_workers,
                    setup=setup_shard,
                    setupkwargs=dict(port=self.port, devices=devices),
                    teardown=teardown_shard,
            ) as pool:
                for name, tests in iteritems(shards):
                    pool.do(run_shard, name, *tests)
                pool.wait()

        finally:
            server_process_logger.setLevel(original_level)
            Runner.instance = None
コード例 #14
0
    def __init__(self, port_obj, output_dir, additional_dirs=None, port=None):
        """Args:
          port_obj: handle to the platform-specific routines
          output_dir: the absolute path to the layout test result directory
        """
        http_server_base.HttpServerBase.__init__(self, port_obj)
        # We use the name "httpd" instead of "apache" to make our paths (e.g. the pid file: /tmp/WebKit/httpd.pid)
        # match old-run-webkit-tests: https://bugs.webkit.org/show_bug.cgi?id=63956

        self._name = 'httpd'
        self._port = port
        if self._port is not None:
            self._mappings = [{'port': self._port}]
        else:
            self._mappings = [{'port': self.HTTP_SERVER_PORT},
                              {'port': self.ALTERNATIVE_HTTP_SERVER_PORT},
                              {'port': self.HTTPS_SERVER_PORT, 'sslcert': True}]
        self._output_dir = output_dir
        self._filesystem.maybe_make_directory(output_dir)

        self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)

        if port_obj.host.platform.is_cygwin():
            # Convert to MSDOS file naming:
            precompiledBuildbot = re.compile('^/home/buildbot')
            precompiledDrive = re.compile('^/cygdrive/[cC]')
            output_dir = precompiledBuildbot.sub("C:/cygwin/home/buildbot", output_dir)
            output_dir = precompiledDrive.sub("C:", output_dir)
            self.tests_dir = precompiledBuildbot.sub("C:/cygwin/home/buildbot", self.tests_dir)
            self.tests_dir = precompiledDrive.sub("C:", self.tests_dir)
            self._pid_file = self._filesystem.join("C:/xampp/apache/logs", '%s.pid' % self._name)

        mime_types_path = self._filesystem.join(self.tests_dir, "http", "conf", "mime.types")
        cert_file = self._filesystem.join(self.tests_dir, "http", "conf", "webkit-httpd.pem")
        access_log = self._filesystem.join(output_dir, "access_log.txt")
        error_log = self._filesystem.join(output_dir, "error_log.txt")
        document_root = self._filesystem.join(self.tests_dir, "http", "tests")
        php_ini_dir = self._filesystem.join(self.tests_dir, "http", "conf")

        if port_obj.get_option('http_access_log'):
            access_log = port_obj.get_option('http_access_log')

        if port_obj.get_option('http_error_log'):
            error_log = port_obj.get_option('http_error_log')

        # FIXME: We shouldn't be calling a protected method of _port_obj!
        executable = self._port_obj._path_to_apache()
        config_file_path = self._copy_apache_config_file(self.tests_dir, output_dir)

        start_cmd = [executable,
            '-f', config_file_path,
            '-C', 'DocumentRoot "%s"' % document_root,
            '-c', 'TypesConfig "%s"' % mime_types_path,
            '-c', 'PHPINIDir "%s"' % php_ini_dir,
            '-c', 'CustomLog "%s" common' % access_log,
            '-c', 'ErrorLog "%s"' % error_log,
            '-c', 'PidFile "%s"' % self._pid_file,
            '-k', "start"]

        for (alias, path) in self.aliases():
            start_cmd.extend(['-c', 'Alias %s "%s"' % (alias, path)])

        if not port_obj.host.platform.is_win():
            start_cmd.extend(['-C', 'User "%s"' % os.environ.get("USERNAME", os.environ.get("USER", ""))])

        enable_ipv6 = self._port_obj.http_server_supports_ipv6()
        # Perform part of the checks Apache's APR does when trying to listen to
        # a specific host/port. This allows us to avoid trying to listen to
        # IPV6 addresses when it fails on Apache. APR itself tries to call
        # getaddrinfo() again without AI_ADDRCONFIG if the first call fails
        # with EBADFLAGS, but that is not how it normally fails in our use
        # cases, so ignore that for now.
        # See https://bugs.webkit.org/show_bug.cgi?id=98602#c7
        try:
            socket.getaddrinfo('::1', 0, 0, 0, 0, socket.AI_ADDRCONFIG)
        except:
            enable_ipv6 = False

        bind_address = '' if self._port_obj.get_option("http_all_interfaces") else '127.0.0.1:'

        for mapping in self._mappings:
            port = mapping['port']

            start_cmd += ['-C', 'Listen %s%d' % (bind_address, port)]

            # We listen to both IPv4 and IPv6 loop-back addresses, but ignore
            # requests to 8000 from random users on network.
            # See https://bugs.webkit.org/show_bug.cgi?id=37104
            if enable_ipv6:
                start_cmd += ['-C', 'Listen [::1]:%d' % port]

        if additional_dirs:
            for alias, path in iteritems(additional_dirs):
                start_cmd += ['-c', 'Alias %s "%s"' % (alias, path),
                        # Disable CGI handler for additional dirs.
                        '-c', '<Location %s>' % alias,
                        '-c', 'RemoveHandler .cgi .pl',
                        '-c', '</Location>']

        stop_cmd = [executable,
            '-f', config_file_path,
            '-c', 'PidFile "%s"' % self._pid_file,
            '-k', "stop"]

        start_cmd.extend(['-c', 'SSLCertificateFile "%s"' % cert_file])

        self._start_cmd = start_cmd
        self._stop_cmd = stop_cmd
コード例 #15
0
def summarize_results(port_obj, expectations_by_type, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=False, include_time_and_modifiers=False):
    """Returns a dictionary containing a summary of the test runs, with the following fields:
        'version': a version indicator
        'fixable': The number of fixable tests (NOW - PASS)
        'skipped': The number of skipped tests (NOW & SKIPPED)
        'num_regressions': The number of non-flaky failures
        'num_flaky': The number of flaky failures
        'num_missing': The number of tests with missing results
        'num_passes': The number of unexpected passes
        'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
        'date': the current date and time
    """
    results = {}
    results['version'] = 4

    tbe = initial_results.tests_by_expectation
    tbt = initial_results.tests_by_timeline
    results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
    results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])

    num_passes = 0
    num_flaky = 0
    num_missing = 0
    num_regressions = 0
    keywords = {}
    for expectation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.items():
        keywords[expectation_enum] = expectation_string.upper()

    for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.items():
        keywords[modifier_enum] = modifier_string.upper()

    tests = {}
    other_crashes_dict = {}

    for test_name, result in iteritems(initial_results.results_by_name):
        # Note that if a test crashed in the original run, we ignore
        # whether or not it crashed when we retried it (if we retried it),
        # and always consider the result not flaky.
        pixel_tests_enabled = enabled_pixel_tests_in_retry or port_obj._options.pixel_tests or bool(result.reftest_type)

        # We're basically trying to find the first non-skip expectation, and use that expectation object for the remainder of the loop.
        # This works because tests are run on the first device type which won't skip them, regardless of other expectations, and never re-run.
        expected = 'SKIP'
        expectations = list(expectations_by_type.values())[0]
        for element in expectations_by_type.values():
            test_expectation = element.filtered_expectations_for_test(test_name, pixel_tests_enabled, port_obj._options.world_leaks)
            expected = element.model().expectations_to_string(test_expectation)
            if expected != 'SKIP':
                expectations = element
                continue

        result_type = result.type
        actual = [keywords[result_type]]

        if result_type == test_expectations.SKIP:
            continue

        if result.is_other_crash:
            other_crashes_dict[test_name] = {}
            continue

        test_dict = {}
        if result.has_stderr:
            test_dict['has_stderr'] = True

        if result.reftest_type:
            test_dict.update(reftest_type=list(result.reftest_type))

        if expectations.model().has_modifier(test_name, test_expectations.WONTFIX):
            test_dict['wontfix'] = True

        if result_type == test_expectations.PASS:
            num_passes += 1
            # FIXME: include passing tests that have stderr output.
            if expected == 'PASS' and not include_passes:
                continue
        elif result_type == test_expectations.CRASH:
            if test_name in initial_results.unexpected_results_by_name:
                num_regressions += 1
                test_dict['report'] = 'REGRESSION'
        elif result_type == test_expectations.MISSING:
            if test_name in initial_results.unexpected_results_by_name:
                num_missing += 1
                test_dict['report'] = 'MISSING'
        elif test_name in initial_results.unexpected_results_by_name:
            if retry_results and test_name not in retry_results.unexpected_results_by_name:
                actual.extend(expectations.model().get_expectations_string(test_name).split(" "))
                num_flaky += 1
                test_dict['report'] = 'FLAKY'
            elif retry_results:
                retry_result_type = retry_results.unexpected_results_by_name[test_name].type
                if result_type != retry_result_type:
                    if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and (retry_result_type == test_expectations.IMAGE_PLUS_TEXT or retry_result_type == test_expectations.MISSING):
                        if retry_result_type == test_expectations.MISSING:
                            num_missing += 1
                        num_regressions += 1
                        test_dict['report'] = 'REGRESSION'
                    else:
                        num_flaky += 1
                        test_dict['report'] = 'FLAKY'
                    actual.append(keywords[retry_result_type])
                else:
                    num_regressions += 1
                    test_dict['report'] = 'REGRESSION'
            else:
                num_regressions += 1
                test_dict['report'] = 'REGRESSION'

        test_dict['expected'] = expected
        test_dict['actual'] = " ".join(actual)
        if include_time_and_modifiers:
            test_dict['time'] = round(1000 * result.test_run_time)
            # FIXME: Fix get_modifiers to return modifiers in new format.
            test_dict['modifiers'] = ' '.join(expectations.model().get_modifiers(test_name)).replace('BUGWK', 'webkit.org/b/')

        test_dict.update(_interpret_test_failures(result.failures))

        if retry_results:
            retry_result = retry_results.unexpected_results_by_name.get(test_name)
            if retry_result:
                test_dict.update(_interpret_test_failures(retry_result.failures))

        # Store test hierarchically by directory. e.g.
        # foo/bar/baz.html: test_dict
        # foo/bar/baz1.html: test_dict
        #
        # becomes
        # foo: {
        #     bar: {
        #         baz.html: test_dict,
        #         baz1.html: test_dict
        #     }
        # }
        parts = test_name.split('/')
        current_map = tests
        for i, part in enumerate(parts):
            if i == (len(parts) - 1):
                current_map[part] = test_dict
                break
            if part not in current_map:
                current_map[part] = {}
            current_map = current_map[part]

    results['tests'] = tests
    results['num_passes'] = num_passes
    results['num_flaky'] = num_flaky
    results['num_missing'] = num_missing
    results['num_regressions'] = num_regressions
    results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
    results['interrupted'] = initial_results.interrupted  # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
    results['layout_tests_dir'] = port_obj.layout_tests_dir()
    results['has_pretty_patch'] = port_obj.pretty_patch.pretty_patch_available()
    results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
    results['other_crashes'] = other_crashes_dict
    results['date'] = datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")

    try:
        # We only use the svn revision for using trac links in the results.html file,
        # Don't do this by default since it takes >100ms.
        # FIXME: Do we really need to populate this both here and in the json_results_generator?
        if port_obj.get_option("builder_name"):
            port_obj.host.initialize_scm()
            results['revision'] = port_obj.host.scm().head_svn_revision()
    except Exception as e:
        _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
        # Handle cases where we're running outside of version control.
        import traceback
        _log.debug('Failed to learn head svn revision:')
        _log.debug(traceback.format_exc())
        results['revision'] = ""

    return results
コード例 #16
0
ファイル: http_server.py プロジェクト: wuyibo0817/webkit
    def _prepare_config(self):
        base_conf_file = self._port_obj.path_from_webkit_base('Tools',
            'Scripts', 'webkitpy', 'layout_tests', 'servers', 'lighttpd.conf')
        out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf')
        time_str = time.strftime("%d%b%Y-%H%M%S")
        access_file_name = "access.log-" + time_str + ".txt"
        access_log = os.path.join(self._output_dir, access_file_name)
        log_file_name = "error.log-" + time_str + ".txt"
        error_log = os.path.join(self._output_dir, log_file_name)

        if self._port_obj.get_option('http_access_log'):
            access_log = self._port_obj.get_option('http_access_log')

        if self._port_obj.get_option('http_error_log'):
            error_log = self._port_obj.get_option('http_error_log')

        # Write out the config
        base_conf = self._filesystem.read_text_file(base_conf_file)

        # FIXME: This should be re-worked so that this block can
        # use with open() instead of a manual file.close() call.
        f = self._filesystem.open_text_file_for_writing(out_conf_file)
        f.write(base_conf)

        # Write out our cgi handlers.  Run perl through env so that it
        # processes the #! line and runs perl with the proper command
        # line arguments. Emulate apache's mod_asis with a cat cgi handler.
        f.write(('cgi.assign = ( ".cgi"  => "/usr/bin/env",\n'
                 '               ".pl"   => "/usr/bin/env",\n'
                 '               ".asis" => "/bin/cat",\n'
                 '               ".php"  => "%s" )\n\n') %
                                     self._port_obj._path_to_lighttpd_php())

        # Setup log files
        f.write(('server.errorlog = "%s"\n'
                 'accesslog.filename = "%s"\n\n') % (error_log, access_log))

        # Setup upload folders. Upload folder is to hold temporary upload files
        # and also POST data. This is used to support XHR layout tests that
        # does POST.
        f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir))

        # Setup a link to where the js test templates and media resources are stored.
        operator = "="
        for alias in self.aliases():
            f.write(('alias.url %s ( "%s" => "%s" )\n\n') % (operator, alias[0], alias[1]))
            operator = "+="

        if self._additional_dirs:
            for alias, path in iteritems(self._additional_dirs):
                f.write(('alias.url += ( "%s" => "%s" )\n\n') % (alias, path))

        # dump out of virtual host config at the bottom.
        if self._root:
            if self._port:
                # Have both port and root dir.
                mappings = [{'port': self._port, 'docroot': self._root}]
            else:
                # Have only a root dir - set the ports as for LayoutTests.
                # This is used in ui_tests to run http tests against a browser.

                # default set of ports as for LayoutTests but with a
                # specified root.
                mappings = [{'port': self.HTTP_SERVER_PORT, 'docroot': self._root},
                            {'port': self.ALTERNATIVE_HTTP_SERVER_PORT, 'docroot': self._root},
                            {'port': self.HTTPS_SERVER_PORT, 'docroot': self._root,
                             'sslcert': self._pem_file}]
        else:
            mappings = self.VIRTUALCONFIG

        bind_address = '' if self._port_obj.get_option('http_all_addresses') else '127.0.0.1'
        for mapping in mappings:
            ssl_setup = ''
            if 'sslcert' in mapping:
                ssl_setup = ('  ssl.engine = "enable"\n'
                             '  ssl.pemfile = "%s"\n' % mapping['sslcert'])

            f.write(('$SERVER["socket"] == "%s:%d" {\n'
                     '  server.document-root = "%s"\n' +
                     ssl_setup +
                     '}\n\n') % (bind_address, mapping['port'], mapping['docroot']))
        f.close()

        executable = self._port_obj._path_to_lighttpd()
        module_path = self._port_obj._path_to_lighttpd_modules()
        start_cmd = [executable,
                     # Newly written config file
                     '-f', os.path.join(self._output_dir, 'lighttpd.conf'),
                     # Where it can find its module dynamic libraries
                     '-m', module_path]

        if not self._run_background:
            start_cmd.append('-D')  # Don't background.

        # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the
        # bug that mod_alias.so loads it from the hard coded path.
        if self._port_obj.host.platform.is_mac():
            tmp_module_path = '/tmp/lighttpd/lib'
            if not self._filesystem.exists(tmp_module_path):
                self._filesystem.maybe_make_directory(tmp_module_path)
            lib_file = 'liblightcomp.dylib'
            self._filesystem.copyfile(self._filesystem.join(module_path, lib_file),
                                      self._filesystem.join(tmp_module_path, lib_file))

        self._start_cmd = start_cmd
        self._env = self._port_obj.setup_environ_for_server('lighttpd')
        self._mappings = mappings
コード例 #17
0
ファイル: benchmark_results.py プロジェクト: visnix/WebKit
 def _aggregate_results(cls, tests):
     results = {}
     for test_name, test in iteritems(tests):
         results[test_name] = cls._aggregate_results_for_test(test)
     return results
コード例 #18
0
ファイル: buildbot_results.py プロジェクト: wuyibo0817/webkit
    def print_unexpected_results(self,
                                 summarized_results,
                                 enabled_pixel_tests_in_retry=False):
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test,
                       results,
                       passes=passes,
                       flaky=flaky,
                       regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")

            def is_expected(result):
                return (result in expected) or (result in ('AUDIO', 'TEXT',
                                                           'IMAGE+TEXT')
                                                and 'FAIL' in expected)

            if all(is_expected(actual_result) for actual_result in actual):
                # Don't print anything for tests that ran as expected.
                return

            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed', test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                         test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed', test)
            elif enabled_pixel_tests_in_retry and (
                    actual == ['TEXT', 'IMAGE+TEXT']
                    or actual == ['TEXT', 'MISSING']):
                add_to_dict_of_lists(regressions, actual[0], test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(summarized_results['tests'],
                                        add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print("")
        if len(passes):
            for key, tests in iteritems(passes):
                self._print("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print("  %s" % test)
                self._print("")
            self._print("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in iteritems(flaky):
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Unexpected flakiness: %s (%d)" %
                            (descriptions[result], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(
                        summarized_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    # FIXME: clean this up once the old syntax is gone
                    new_expectations_list = [
                        TestExpectationParser._inverted_expectation_tokens[exp]
                        for exp in list(set(actual) | set(expected))
                    ]
                    self._print("  %s [ %s ]" %
                                (test, " ".join(new_expectations_list)))
                self._print("")
            self._print("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in iteritems(regressions):
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Regressions: Unexpected %s (%d)" %
                            (descriptions[result], len(tests)))
                tests.sort()
                for test in tests:
                    self._print("  %s [ %s ]" %
                                (test, TestExpectationParser.
                                 _inverted_expectation_tokens[key]))
                self._print("")

        if len(summarized_results['tests']) and self.debug_logging:
            self._print("%s" % ("-" * 78))
コード例 #19
0
 def result_map_by_status(self, status=None):
     map = {}
     for test_name, result in iteritems(self.results):
         if result[0] == status:
             map[test_name] = result[1]
     return map
コード例 #20
0
ファイル: manager.py プロジェクト: eocanha/webkit
    def run(self, args, json_output=None):
        if json_output:
            json_output = self.host.filesystem.abspath(json_output)
            if not self.host.filesystem.isdir(
                    self.host.filesystem.dirname(json_output)
            ) or self.host.filesystem.isdir(json_output):
                raise RuntimeError('Cannot write to {}'.format(json_output))

        start_time = time.time()

        self._stream.write_update('Checking build ...')
        if not self._port.check_api_test_build(
                self._binaries_for_arguments(args)):
            _log.error('Build check failed')
            return Manager.FAILED_BUILD_CHECK

        self._initialize_devices()

        self._stream.write_update('Collecting tests ...')
        try:
            test_names = self._collect_tests(args)
        except ScriptError:
            self._stream.writeln('Failed to collect tests')
            return Manager.FAILED_COLLECT_TESTS
        self._stream.write_update('Found {} tests'.format(len(test_names)))
        if len(test_names) == 0:
            self._stream.writeln('No tests found')
            return Manager.FAILED_COLLECT_TESTS

        if self._port.get_option('dump'):
            for test in test_names:
                self._stream.writeln(test)
            return Manager.SUCCESS

        test_names = [
            test for test in test_names
            for _ in range(self._options.repeat_each)
        ]
        if self._options.repeat_each != 1:
            _log.debug('Repeating each test {} times'.format(
                self._options.iterations))

        try:
            _log.info('Running tests')
            runner = Runner(self._port, self._stream)
            for i in range(self._options.iterations):
                _log.debug('\nIteration {}'.format(i + 1))
                runner.run(
                    test_names,
                    int(self._options.child_processes)
                    if self._options.child_processes else
                    self._port.default_child_processes())
        except KeyboardInterrupt:
            # If we receive a KeyboardInterrupt, print results.
            self._stream.writeln('')

        end_time = time.time()

        successful = runner.result_map_by_status(runner.STATUS_PASSED)
        disabled = len(runner.result_map_by_status(runner.STATUS_DISABLED))
        _log.info('Ran {} tests of {} with {} successful'.format(
            len(runner.results) - disabled, len(test_names), len(successful)))

        result_dictionary = {
            'Skipped': [],
            'Failed': [],
            'Crashed': [],
            'Timedout': [],
        }

        self._stream.writeln('-' * 30)
        result = Manager.SUCCESS
        if len(successful) * self._options.repeat_each + disabled == len(
                test_names):
            self._stream.writeln('All tests successfully passed!')
            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))
        else:
            self._stream.writeln('Test suite failed')
            self._stream.writeln('')

            skipped = []
            for test in test_names:
                if test not in runner.results:
                    skipped.append(test)
                    result_dictionary['Skipped'].append({
                        'name': test,
                        'output': None
                    })
            if skipped:
                self._stream.writeln('Skipped {} tests'.format(len(skipped)))
                self._stream.writeln('')
                if self._options.verbose:
                    for test in skipped:
                        self._stream.writeln('    {}'.format(test))

            self._print_tests_result_with_status(runner.STATUS_FAILED, runner)
            self._print_tests_result_with_status(runner.STATUS_CRASHED, runner)
            self._print_tests_result_with_status(runner.STATUS_TIMEOUT, runner)

            for test, result in iteritems(runner.results):
                status_to_string = {
                    runner.STATUS_FAILED: 'Failed',
                    runner.STATUS_CRASHED: 'Crashed',
                    runner.STATUS_TIMEOUT: 'Timedout',
                }.get(result[0])
                if not status_to_string:
                    continue
                result_dictionary[status_to_string].append({
                    'name': test,
                    'output': result[1]
                })

            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))

            result = Manager.FAILED_TESTS

        if self._options.report_urls:
            self._stream.writeln('\n')
            self._stream.write_update('Preparing upload data ...')

            status_to_test_result = {
                runner.STATUS_PASSED: None,
                runner.STATUS_FAILED: Upload.Expectations.FAIL,
                runner.STATUS_CRASHED: Upload.Expectations.CRASH,
                runner.STATUS_TIMEOUT: Upload.Expectations.TIMEOUT,
            }
            upload = Upload(
                suite=self._options.suite or 'api-tests',
                configuration=self._port.configuration_for_upload(
                    self._port.target_host(0)),
                details=Upload.create_details(options=self._options),
                commits=self._port.commits_for_upload(),
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(result_dictionary['Skipped']),
                ),
                results={
                    test: Upload.create_test_result(
                        actual=status_to_test_result[result[0]])
                    for test, result in iteritems(runner.results)
                    if result[0] in status_to_test_result
                },
            )
            for url in self._options.report_urls:
                self._stream.write_update('Uploading to {} ...'.format(url))
                if not upload.upload(url, log_line_func=self._stream.writeln):
                    result = Manager.FAILED_UPLOAD
            self._stream.writeln('Uploads completed!')

        return result