コード例 #1
0
ファイル: wptserve.py プロジェクト: IncoCura/qt5
    def __init__(self, port_obj, output_dir):
        super(WPTServe, self).__init__(port_obj, output_dir)
        # These ports must match wpt_support/wpt.config.json
        http_port, http_alt_port, https_port = (8001, 8081, 8444)
        ws_port, wss_port = (9001, 9444)
        self._name = 'wptserve'
        self._log_prefixes = ('access_log', 'error_log')
        self._mappings = [{
            'port': http_port
        }, {
            'port': http_alt_port
        }, {
            'port': https_port,
            'sslcert': True
        }, {
            'port': ws_port
        }, {
            'port': wss_port,
            'sslcert': True
        }]

        # TODO(burnik): We can probably avoid PID files for WPT in the future.
        fs = self._filesystem
        self._pid_file = fs.join(self._runtime_path, '%s.pid' % self._name)

        finder = PathFinder(fs)
        path_to_thirdparty = finder.path_from_tools_scripts(
            'webkitpy', 'thirdparty')
        path_to_wpt_support = finder.path_from_tools_scripts(
            'webkitpy', 'thirdparty', 'wpt')
        path_to_wpt_root = fs.join(path_to_wpt_support, 'wpt')
        path_to_wpt_config = fs.join(path_to_wpt_support, 'wpt.config.json')
        path_to_wpt_tests = fs.abspath(
            fs.join(self._port_obj.layout_tests_dir(), 'external', 'wpt'))
        path_to_ws_handlers = fs.join(path_to_wpt_tests, 'websockets',
                                      'handlers')
        serve_script = fs.join(path_to_wpt_root, 'serve')
        start_cmd = [
            self._port_obj.host.executable, '-u', serve_script, '--config',
            path_to_wpt_config, '--doc_root', path_to_wpt_tests
        ]

        # TODO(burnik): Merge with default start_cmd once we roll in websockets.
        if self._port_obj.host.filesystem.exists(path_to_ws_handlers):
            start_cmd += ['--ws_doc_root', path_to_ws_handlers]

        self._stdout = self._stderr = self._executive.DEVNULL
        # TODO(burnik): We should stop setting the CWD once WPT can be run without it.
        self._cwd = path_to_wpt_root
        self._env = port_obj.host.environ.copy()
        self._env.update({'PYTHONPATH': path_to_thirdparty})
        self._start_cmd = start_cmd

        expiration_date = datetime.date(2025, 1, 4)
        if datetime.date.today() > expiration_date - datetime.timedelta(30):
            logging.getLogger(__name__).error(
                'Pre-generated keys and certificates are going to be expired at %s.'
                ' Please re-generate them by following steps in %s/README.chromium.'
                % (expiration_date.strftime('%b %d %Y'), path_to_wpt_support))
コード例 #2
0
ファイル: wpt_manifest.py プロジェクト: sokolovp/BraveMining
    def generate_manifest(host, dest_path):
        """Generates MANIFEST.json on the specified directory."""
        executive = host.executive
        finder = PathFinder(host.filesystem)
        wpt_exec_path = finder.path_from_tools_scripts('webkitpy',
                                                       'thirdparty', 'wpt',
                                                       'wpt', 'wpt')

        cmd = [
            'python', wpt_exec_path, 'manifest', '--work', '--tests-root',
            dest_path
        ]
        _log.debug('Running command: %s', ' '.join(cmd))
        proc = executive.popen(cmd,
                               stdout=executive.PIPE,
                               stderr=executive.PIPE,
                               stdin=executive.PIPE)
        out, err = proc.communicate('')
        if proc.returncode:
            _log.info('# ret> %d', proc.returncode)
            if out:
                _log.info(out)
            if err:
                _log.info(err)
            host.exit(proc.returncode)
        return proc.returncode, out
コード例 #3
0
 def do_POST(self):
     json_raw_data = self.rfile.read(
         int(self.headers.getheader('content-length')))
     json_data = json.loads(json_raw_data)
     test_list = ''
     for each in json_data['tests']:
         test_list += each + ' '
     filesystem = FileSystem()
     path_finder = PathFinder(filesystem)
     script_dir = path_finder.path_from_tools_scripts()
     executable_path = script_dir + '/run-webkit-tests'
     cmd = 'python ' + executable_path + ' --no-show-results '
     cmd += test_list
     process = subprocess.Popen(cmd,
                                shell=True,
                                cwd=script_dir,
                                env=None,
                                stdout=subprocess.PIPE,
                                stderr=STDOUT)
     self.send_response(200)
     self.send_header('Access-Control-Allow-Origin', '*')
     self.send_header('Content-type', 'text/html')
     self.end_headers()
     while process.poll() is None:
         html_output = '<br>' + str(process.stdout.readline())
         self.wfile.write(html_output)
         self.wfile.flush()
         time.sleep(0.05)
     process.wait()
コード例 #4
0
    def test_filter_transform_patch(self):
        host = Host()
        finder = PathFinder(host.filesystem)
        resources_path = finder.path_from_tools_scripts('webkitpy', 'w3c', 'resources')
        sample_patch = host.filesystem.read_text_file(host.filesystem.join(resources_path, 'sample.patch'))
        expected_patch = host.filesystem.read_text_file(host.filesystem.join(resources_path, 'expected.patch'))

        cl = GerritCL({'change_id': 1}, MockGerritAPI(None, None, None))
        actual_patch = cl.filter_transform_patch(sample_patch)
        self.assertEqual(actual_patch, expected_patch)
コード例 #5
0
ファイル: python.py プロジェクト: ysoftman/chromium
 def run_pylint(self, path):
     finder = PathFinder(FileSystem())
     executive = Executive()
     env = os.environ.copy()
     env['PYTHONPATH'] = os.pathsep.join([
         finder.path_from_tools_scripts(),
         finder.path_from_blink_source('build', 'scripts'),
         get_blinkpy_thirdparty_dir(),
         get_blink_tools_dir(),
         finder.path_from_blink_source('bindings', 'scripts'),
         finder.path_from_chromium_base('build', 'android'),
         finder.path_from_chromium_base('third_party', 'catapult', 'devil'),
         finder.path_from_chromium_base('third_party', 'pymock'),
     ])
     return executive.run_command([
         sys.executable,
         finder.path_from_depot_tools_base('pylint.py'),
         '--output-format=parseable',
         '--rcfile=' + finder.path_from_tools_scripts('webkitpy', 'pylintrc'),
         path,
     ], env=env, error_handler=executive.ignore_error)
コード例 #6
0
    def generate_manifest(host, dest_path):
        """Generates MANIFEST.json on the specified directory."""
        finder = PathFinder(host.filesystem)
        wpt_exec_path = finder.path_from_tools_scripts('webkitpy',
                                                       'thirdparty', 'wpt',
                                                       'wpt', 'wpt')
        cmd = [
            'python', wpt_exec_path, 'manifest', '--work', '--tests-root',
            dest_path
        ]

        # ScriptError will be raised if the command fails.
        host.executive.run_command(
            cmd,
            return_stderr=
            True  # This will also include stderr in the exception message.
        )
コード例 #7
0
class WPTExpectationsUpdater(object):
    def __init__(self, host):
        self.host = host
        self.port = self.host.port_factory.get()
        self.git_cl = GitCL(host)
        self.finder = PathFinder(self.host.filesystem)
        self.ports_with_no_results = set()
        self.ports_with_all_pass = set()

    def run(self, args=None):
        """Downloads text new baselines and adds test expectations lines."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        issue_number = self.get_issue_number()
        if issue_number == 'None':
            _log.error('No issue on current branch.')
            return 1

        build_to_status = self.get_latest_try_jobs()
        _log.debug('Latest try jobs: %r', build_to_status)
        if not build_to_status:
            _log.error('No try job information was collected.')
            return 1

        # The manifest may be used below to do check which tests are reference tests.
        WPTManifest.ensure_manifest(self.host)

        # Here we build up a dict of failing test results for all platforms.
        test_expectations = {}
        for build, job_status in build_to_status.iteritems():
            if job_status.result == 'SUCCESS':
                self.ports_with_all_pass.add(self.port_name(build))

            port_results = self.get_failing_results_dict(build)
            test_expectations = self.merge_dicts(test_expectations,
                                                 port_results)

        # And then we merge results for different platforms that had the same results.
        for test_name, platform_result in test_expectations.iteritems():
            # platform_result is a dict mapping platforms to results.
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        test_expectations = self.download_text_baselines(test_expectations)
        test_expectation_lines = self.create_line_list(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return 0

    def get_issue_number(self):
        """Returns current CL number. Can be replaced in unit tests."""
        return self.git_cl.get_issue_number()

    def get_latest_try_jobs(self):
        """Returns the latest finished try jobs as Build objects."""
        return self.git_cl.latest_try_jobs(self._get_try_bots())

    def get_failing_results_dict(self, build):
        """Returns a nested dict of failing test results.

        Retrieves a full list of layout test results from a builder result URL.
        Collects the builder name, platform and a list of tests that did not
        run as expected.

        Args:
            build: A Build object.

        Returns:
            A dictionary with the structure: {
                test-with-failing-result: {
                    'full-port-name': {
                        'expected': 'TIMEOUT',
                        'actual': 'CRASH',
                        'bug': 'crbug.com/11111'
                    }
                }
            }
            If results could be fetched but none are failing,
            this will return an empty dictionary.
        """
        port_name = self.port_name(build)
        if port_name in self.ports_with_all_pass:
            # All tests passed, so there should be no failing results.
            return {}
        layout_test_results = self.host.buildbot.fetch_results(build)
        if layout_test_results is None:
            _log.warning('No results for build %s', build)
            self.ports_with_no_results.add(self.port_name(build))
            return {}
        test_results = [
            result
            for result in layout_test_results.didnt_run_as_expected_results()
            if not result.did_pass()
        ]
        return self.generate_results_dict(self.port_name(build), test_results)

    @memoized
    def port_name(self, build):
        return self.host.builders.port_name_for_builder_name(
            build.builder_name)

    def generate_results_dict(self, full_port_name, test_results):
        """Makes a dict with results for one platform.

        Args:
            full_port_name: The fully-qualified port name, e.g. "win-win10".
            test_results: A list of LayoutTestResult objects.

        Returns:
            A dict mapping the full port name to a dict with the results for
            the given test and platform.
        """
        test_dict = {}
        for result in test_results:
            test_name = result.test_name()

            if not self.port.is_wpt_test(test_name):
                continue

            test_dict[test_name] = {
                full_port_name: {
                    'expected': result.expected_results(),
                    'actual': result.actual_results(),
                    'bug': 'crbug.com/626703'
                }
            }
        return test_dict

    def merge_dicts(self, target, source, path=None):
        """Recursively merges nested dictionaries.

        Args:
            target: First dictionary, which is updated based on source.
            source: Second dictionary, not modified.
            path: A list of keys, only used for making error messages.

        Returns:
            The updated target dictionary.
        """
        path = path or []
        for key in source:
            if key in target:
                if (isinstance(target[key], dict)) and isinstance(
                        source[key], dict):
                    self.merge_dicts(target[key], source[key],
                                     path + [str(key)])
                elif target[key] == source[key]:
                    pass
                else:
                    raise ValueError(
                        'The key: %s already exist in the target dictionary.' %
                        '.'.join(path))
            else:
                target[key] = source[key]
        return target

    def merge_same_valued_keys(self, dictionary):
        """Merges keys in dictionary with same value.

        Traverses through a dict and compares the values of keys to one another.
        If the values match, the keys are combined to a tuple and the previous
        keys are removed from the dict.

        Args:
            dictionary: A dictionary with a dictionary as the value.

        Returns:
            A new dictionary with updated keys to reflect matching values of keys.
            Example: {
                'one': {'foo': 'bar'},
                'two': {'foo': 'bar'},
                'three': {'foo': 'bar'}
            }
            is converted to a new dictionary with that contains
            {('one', 'two', 'three'): {'foo': 'bar'}}
        """
        merged_dict = {}
        matching_value_keys = set()
        keys = sorted(dictionary.keys())
        while keys:
            current_key = keys[0]
            found_match = False
            if current_key == keys[-1]:
                merged_dict[current_key] = dictionary[current_key]
                keys.remove(current_key)
                break

            for next_item in keys[1:]:
                if dictionary[current_key] == dictionary[next_item]:
                    found_match = True
                    matching_value_keys.update([current_key, next_item])

                if next_item == keys[-1]:
                    if found_match:
                        merged_dict[tuple(
                            matching_value_keys)] = dictionary[current_key]
                        keys = [
                            k for k in keys if k not in matching_value_keys
                        ]
                    else:
                        merged_dict[current_key] = dictionary[current_key]
                        keys.remove(current_key)
            matching_value_keys = set()
        return merged_dict

    def get_expectations(self, results, test_name=''):
        """Returns a set of test expectations to use based on results.

        Returns a set of one or more test expectations based on the expected
        and actual results of a given test name. This function is to decide
        expectations for tests that could not be rebaselined.

        Args:
            results: A dictionary that maps one test to its results. Example:
                {
                    'test_name': {
                        'expected': 'PASS',
                        'actual': 'FAIL',
                        'bug': 'crbug.com/11111'
                    }
                }
            test_name: The test name string (optional).

        Returns:
            A set of one or more test expectation strings with the first letter
            capitalized. Example: set(['Failure', 'Timeout']).
        """
        # If the result is MISSING, this implies that the test was not
        # rebaselined and has an actual result but no baseline. We can't
        # add a Missing expectation (this is not allowed), but no other
        # expectation is correct.
        # We also want to skip any new manual tests that are not automated;
        # see crbug.com/708241 for context.
        if (results['actual'] == 'MISSING'
                or '-manual.' in test_name and results['actual'] == 'TIMEOUT'):
            return {'Skip'}
        expectations = set()
        failure_types = ('TEXT', 'IMAGE+TEXT', 'IMAGE', 'AUDIO')
        other_types = ('TIMEOUT', 'CRASH', 'PASS')
        for actual in results['actual'].split():
            if actual in failure_types:
                expectations.add('Failure')
            if actual in other_types:
                expectations.add(actual.capitalize())
        return expectations

    def create_line_list(self, merged_results):
        """Creates list of test expectations lines.

        Traverses through the given |merged_results| dictionary and parses the
        value to create one test expectations line per key.

        Args:
            merged_results: A dictionary with the format:
                {
                    'test_name': {
                        'platform': {
                            'expected: 'PASS',
                            'actual': 'FAIL',
                            'bug': 'crbug.com/11111'
                        }
                    }
                }

        Returns:
            A list of test expectations lines with the format:
            ['BUG_URL [PLATFORM(S)] TEST_NAME [EXPECTATION(S)]']
        """
        line_list = []
        for test_name, port_results in sorted(merged_results.iteritems()):
            if not self.port.is_wpt_test(test_name):
                _log.warning(
                    'Non-WPT test "%s" unexpectedly passed to create_line_list.',
                    test_name)
                continue
            for port_names, results in sorted(port_results.iteritems()):
                line_list.append(
                    self._create_line(test_name, port_names, results))
        return line_list

    def _create_line(self, test_name, port_names, results):
        """Constructs and returns a test expectation line string."""
        port_names = self.tuple_or_value_to_list(port_names)

        # The set of ports with no results is assumed to have have no
        # overlap with the set of port names passed in here.
        assert (set(port_names) & self.ports_with_no_results) == set()

        # The ports with no results are generally ports of builders that
        # failed, maybe for unrelated reasons. At this point, we add ports
        # with no results to the list of platforms because we're guessing
        # that this new expectation might be cross-platform and should
        # also apply to any ports that we weren't able to get results for.
        port_names.extend(self.ports_with_no_results)

        specifier_part = self.specifier_part(port_names, test_name)

        line_parts = [results['bug']]
        if specifier_part:
            line_parts.append(specifier_part)
        line_parts.append(test_name)
        line_parts.append('[ %s ]' %
                          ' '.join(self.get_expectations(results, test_name)))

        return ' '.join(line_parts)

    def specifier_part(self, port_names, test_name):
        """Returns the specifier part for a new test expectations line.

        Args:
            port_names: A list of full port names that the line should apply to.
            test_name: The test name for the expectation line.

        Returns:
            The specifier part of the new expectation line, e.g. "[ Mac ]".
            This will be an empty string if the line should apply to all platforms.
        """
        specifiers = []
        for name in sorted(port_names):
            specifiers.append(
                self.host.builders.version_specifier_for_port_name(name))

        specifiers.extend(self.skipped_specifiers(test_name))
        specifiers = self.simplify_specifiers(
            specifiers, self.port.configuration_specifier_macros())
        if not specifiers:
            return ''
        return '[ %s ]' % ' '.join(specifiers)

    @staticmethod
    def tuple_or_value_to_list(tuple_or_value):
        """Converts a tuple to a list, and a string value to a one-item list."""
        if isinstance(tuple_or_value, tuple):
            return list(tuple_or_value)
        return [tuple_or_value]

    def skipped_specifiers(self, test_name):
        """Returns a list of platform specifiers for which the test is skipped."""
        specifiers = []
        for port in self.all_try_builder_ports():
            if port.skips_test(test_name):
                specifiers.append(
                    self.host.builders.version_specifier_for_port_name(
                        port.name()))
        return specifiers

    @memoized
    def all_try_builder_ports(self):
        """Returns a list of Port objects for all try builders."""
        return [
            self.host.port_factory.get_from_builder_name(name)
            for name in self._get_try_bots()
        ]

    @staticmethod
    def simplify_specifiers(specifiers, configuration_specifier_macros):  # pylint: disable=unused-argument
        """Converts some collection of specifiers to an equivalent and maybe shorter list.

        The input strings are all case-insensitive, but the strings in the
        return value will all be capitalized.

        Args:
            specifiers: A collection of lower-case specifiers.
            configuration_specifier_macros: A dict mapping "macros" for
                groups of specifiers to lists of specific specifiers. In
                practice, this is a dict mapping operating systems to
                supported versions, e.g. {"win": ["win7", "win10"]}.

        Returns:
            A shortened list of specifiers. For example, ["win7", "win10"]
            would be converted to ["Win"]. If the given list covers all
            supported platforms, then an empty list is returned.
            This list will be sorted and have capitalized specifier strings.
        """
        specifiers = {specifier.lower() for specifier in specifiers}
        for macro_specifier, version_specifiers in configuration_specifier_macros.iteritems(
        ):
            macro_specifier = macro_specifier.lower()
            version_specifiers = {
                specifier.lower()
                for specifier in version_specifiers
            }
            if version_specifiers.issubset(specifiers):
                specifiers -= version_specifiers
                specifiers.add(macro_specifier)
        if specifiers == {
                macro.lower()
                for macro in configuration_specifier_macros.keys()
        }:
            return []
        return sorted(specifier.capitalize() for specifier in specifiers)

    def write_to_test_expectations(self, line_list):
        """Writes the given lines to the TestExpectations file.

        The place in the file where the new lines are inserted is after a marker
        comment line. If this marker comment line is not found, then everything
        including the marker line is appended to the end of the file.

        Args:
            line_list: A list of lines to add to the TestExpectations file.
        """
        if not line_list:
            _log.info('No lines to write to TestExpectations.')
            return
        _log.info('Lines to write to TestExpectations:')
        for line in line_list:
            _log.info('  %s', line)

        expectations_file_path = self.port.path_to_generic_test_expectations_file(
        )
        file_contents = self.host.filesystem.read_text_file(
            expectations_file_path)

        line_list = [
            line for line in line_list if
            self._test_name_from_expectation_string(line) not in file_contents
        ]
        if not line_list:
            return

        marker_comment_index = file_contents.find(MARKER_COMMENT)
        if marker_comment_index == -1:
            file_contents += '\n%s\n' % MARKER_COMMENT
            file_contents += '\n'.join(line_list)
        else:
            end_of_marker_line = (file_contents[marker_comment_index:].find(
                '\n')) + marker_comment_index
            file_contents = file_contents[:end_of_marker_line + 1] + '\n'.join(
                line_list) + file_contents[end_of_marker_line:]

        self.host.filesystem.write_text_file(expectations_file_path,
                                             file_contents)

    @staticmethod
    def _test_name_from_expectation_string(expectation_string):
        return TestExpectationLine.tokenize_line(
            filename='', expectation_string=expectation_string,
            line_number=0).name

    def download_text_baselines(self, test_results):
        """Fetches new baseline files for tests that should be rebaselined.

        Invokes `webkit-patch rebaseline-cl` in order to download new baselines
        (-expected.txt files) for testharness.js tests that did not crash or
        time out. Then, the platform-specific test is removed from the overall
        failure test dictionary and the resulting dictionary is returned.

        Args:
            test_results: A dict mapping test name to platform to test results.

        Returns:
            An updated test_results dictionary which should only contain
            test failures for tests that couldn't be rebaselined.
        """
        tests_to_rebaseline, test_results = self.get_tests_to_rebaseline(
            test_results)
        if not tests_to_rebaseline:
            _log.info('No tests to rebaseline.')
            return test_results
        _log.info('Tests to rebaseline:')
        for test in tests_to_rebaseline:
            _log.info('  %s', test)

        webkit_patch = self.finder.path_from_tools_scripts('webkit-patch')
        self.host.executive.run_command([
            'python',
            webkit_patch,
            'rebaseline-cl',
            '--verbose',
            '--no-trigger-jobs',
            '--fill-missing',
        ] + tests_to_rebaseline)
        return test_results

    def get_tests_to_rebaseline(self, test_results):
        """Returns a list of tests to download new baselines for.

        Creates a list of tests to rebaseline depending on the tests' platform-
        specific results. In general, this will be non-ref tests that failed
        due to a baseline mismatch (rather than crash or timeout).

        Args:
            test_results: A dictionary of failing test results, mapping tests
                to platforms to result dicts.

        Returns:
            A pair: A set of tests to be rebaselined, and a modified copy of
            the test results dictionary. The tests to be rebaselined should
            include testharness.js tests that failed due to a baseline mismatch.
        """
        new_test_results = copy.deepcopy(test_results)
        tests_to_rebaseline = set()
        for test_path in test_results:
            for platform, result in test_results[test_path].iteritems():
                if self.can_rebaseline(test_path, result):
                    del new_test_results[test_path][platform]
                    tests_to_rebaseline.add(test_path)
        return sorted(tests_to_rebaseline), new_test_results

    def can_rebaseline(self, test_path, result):
        if self.is_reference_test(test_path):
            return False
        if result['actual'] in ('CRASH', 'TIMEOUT', 'MISSING'):
            return False
        return True

    def is_reference_test(self, test_path):
        """Checks whether a given file is a testharness.js test."""
        return bool(self.port.reference_files(test_path))

    def _get_try_bots(self):
        return self.host.builders.all_try_builder_names()
コード例 #8
0
class Bisector(object):

    def __init__(self, tests, is_debug):
        self.executive = Executive()
        self.tests = tests
        self.expected_failure = tests[-1]
        self.is_debug = is_debug
        self.path_finder = PathFinder(FileSystem())

    def bisect(self):
        if self.test_fails_in_isolation():
            self.buckets = [Bucket([self.expected_failure])]
            print '%s fails when run in isolation.' % self.expected_failure
            self.print_result()
            return 0
        if not self.test_fails(self.tests):
            _log.error('%s does not fail', self.expected_failure)
            return 1
        # Split the list of test into buckets. Each bucket has at least one test required to cause
        # the expected failure at the end. Split buckets in half until there are only buckets left
        # with one item in them.
        self.buckets = [Bucket(self.tests[:-1]), Bucket([self.expected_failure])]
        while not self.is_done():
            self.print_progress()
            self.split_largest_bucket()
        self.print_result()
        self.verify_non_flaky()
        return 0

    def test_fails_in_isolation(self):
        return self.test_bucket_list_fails([Bucket([self.expected_failure])])

    def verify_non_flaky(self):
        print 'Verifying the failure is not flaky by running 10 times.'
        count_failures = 0
        for _ in range(0, 10):
            if self.test_bucket_list_fails(self.buckets):
                count_failures += 1
        print 'Failed %d/10 times' % count_failures

    def print_progress(self):
        count = 0
        for bucket in self.buckets:
            count += len(bucket.tests)
        print '%d tests left, %d buckets' % (count, len(self.buckets))

    def print_result(self):
        tests = []
        for bucket in self.buckets:
            tests += bucket.tests
        extra_args = ' --debug' if self.is_debug else ''
        print 'run-webkit-tests%s --child-processes=1 --order=none %s' % (extra_args, ' '.join(tests))

    def is_done(self):
        for bucket in self.buckets:
            if bucket.size() > 1:
                return False
        return True

    def split_largest_bucket(self):
        index = 0
        largest_index = 0
        largest_size = 0
        for bucket in self.buckets:
            if bucket.size() > largest_size:
                largest_index = index
                largest_size = bucket.size()
            index += 1

        bucket_to_split = self.buckets[largest_index]
        halfway_point = int(largest_size / 2)
        first_half = Bucket(bucket_to_split.tests[:halfway_point])
        second_half = Bucket(bucket_to_split.tests[halfway_point:])

        buckets_before = self.buckets[:largest_index]
        buckets_after = self.buckets[largest_index + 1:]

        # Do the second half first because it tends to be faster because the http tests are front-loaded and slow.
        new_buckets = buckets_before + [second_half] + buckets_after
        if self.test_bucket_list_fails(new_buckets):
            self.buckets = new_buckets
            return

        new_buckets = buckets_before + [first_half] + buckets_after
        if self.test_bucket_list_fails(new_buckets):
            self.buckets = new_buckets
            return

        self.buckets = buckets_before + [first_half, second_half] + buckets_after

    def test_bucket_list_fails(self, buckets):
        tests = []
        for bucket in buckets:
            tests += bucket.tests
        return self.test_fails(tests)

    def test_fails(self, tests):
        extra_args = ['--debug'] if self.is_debug else []
        path_to_run_webkit_tests = self.path_finder.path_from_tools_scripts('run-webkit-tests')
        output = self.executive.popen(
            [path_to_run_webkit_tests, '--child-processes', '1', '--order', 'none', '--no-retry',
             '--no-show-results', '--verbose'] + extra_args + tests, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        failure_string = self.expected_failure + ' failed'
        if failure_string in output.stderr.read():
            return True
        return False