Пример #1
0
class W3CExpectationsLineAdder(object):
    def __init__(self, host):
        self.host = host
        self.host.initialize_scm()
        self.finder = WebKitFinder(self.host.filesystem)

    def run(self, args=None):
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        args = parser.parse_args(args)
        log_level = logging.DEBUG if args.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        issue_number = self.get_issue_number()
        if issue_number == 'None':
            _log.error('No issue on current branch.')
            return 1

        try_bots = self.get_try_bots()
        rietveld = Rietveld(self.host.web)
        try_jobs = rietveld.latest_try_jobs(issue_number, try_bots)
        _log.debug('Latest try jobs: %r', try_jobs)

        if not try_jobs:
            _log.error('No try job information was collected.')
            return 1

        test_expectations = {}
        for job in try_jobs:
            platform_results = self.get_failing_results_dict(job)
            test_expectations = self.merge_dicts(test_expectations,
                                                 platform_results)

        for test_name, platform_result in test_expectations.iteritems():
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        test_expectations = self.get_expected_txt_files(test_expectations)
        test_expectation_lines = self.create_line_list(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return 0

    def get_issue_number(self):
        return GitCL(self.host).get_issue_number()

    def get_try_bots(self):
        return self.host.builders.all_try_builder_names()

    def generate_results_dict(self, platform, result_list):
        test_dict = {}
        if '-' in platform:
            platform = platform[platform.find('-') + 1:].capitalize()
        for result in result_list:
            test_dict[result.test_name()] = {
                platform: {
                    'expected': result.expected_results(),
                    'actual': result.actual_results(),
                    'bug': 'crbug.com/626703'
                }
            }
        return test_dict

    def get_failing_results_dict(self, build):
        """Returns a nested dict of failing test results.

        Retrieves a full list of layout test results from a builder result URL.
        Collects the builder name, platform and a list of tests that did not
        run as expected.

        Args:
            build: A Build object.

        Returns:
            A dictionary with the structure: {
                'key': {
                    'expected': 'TIMEOUT',
                    'actual': 'CRASH',
                    'bug': 'crbug.com/11111'
                }
            }
            If there are no failing results or no results could be fetched,
            this will return an empty dict.
        """
        layout_test_results = self.host.buildbot.fetch_results(build)
        if layout_test_results is None:
            _log.warning('No results for build %s', build)
            return {}
        platform = self.host.builders.port_name_for_builder_name(
            build.builder_name)
        result_list = layout_test_results.didnt_run_as_expected_results()
        failing_results_dict = self.generate_results_dict(
            platform, result_list)
        return failing_results_dict

    def merge_dicts(self, target, source, path=None):
        """Recursively merges nested dictionaries.

        Args:
            target: First dictionary, which is updated based on source.
            source: Second dictionary, not modified.

        Returns:
            An updated target dictionary.
        """
        path = path or []
        for key in source:
            if key in target:
                if (isinstance(target[key], dict)) and isinstance(
                        source[key], dict):
                    self.merge_dicts(target[key], source[key],
                                     path + [str(key)])
                elif target[key] == source[key]:
                    pass
                else:
                    raise ValueError(
                        'The key: %s already exist in the target dictionary.' %
                        '.'.join(path))
            else:
                target[key] = source[key]
        return target

    def merge_same_valued_keys(self, dictionary):
        """Merges keys in dictionary with same value.

        Traverses through a dict and compares the values of keys to one another.
        If the values match, the keys are combined to a tuple and the previous
        keys are removed from the dict.

        Args:
            dictionary: A dictionary with a dictionary as the value.

        Returns:
            A new dictionary with updated keys to reflect matching values of keys.
            Example: {
                'one': {'foo': 'bar'},
                'two': {'foo': 'bar'},
                'three': {'foo': 'bar'}
            }
            is converted to a new dictionary with that contains
            {('one', 'two', 'three'): {'foo': 'bar'}}
        """
        merged_dict = {}
        matching_value_keys = set()
        keys = sorted(dictionary.keys())
        while keys:
            current_key = keys[0]
            found_match = False
            if current_key == keys[-1]:
                merged_dict[current_key] = dictionary[current_key]
                keys.remove(current_key)
                break

            for next_item in keys[1:]:
                if dictionary[current_key] == dictionary[next_item]:
                    found_match = True
                    matching_value_keys.update([current_key, next_item])

                if next_item == keys[-1]:
                    if found_match:
                        merged_dict[tuple(
                            matching_value_keys)] = dictionary[current_key]
                        keys = [
                            k for k in keys if k not in matching_value_keys
                        ]
                    else:
                        merged_dict[current_key] = dictionary[current_key]
                        keys.remove(current_key)
            matching_value_keys = set()
        return merged_dict

    def get_expectations(self, results):
        """Returns a set of test expectations for a given test dict.

        Returns a set of one or more test expectations based on the expected
        and actual results of a given test name.

        Args:
            results: A dictionary that maps one test to its results. Example:
                {
                    'test_name': {
                        'expected': 'PASS',
                        'actual': 'FAIL',
                        'bug': 'crbug.com/11111'
                    }
                }

        Returns:
            A set of one or more test expectation strings with the first letter
            capitalized. Example: set(['Failure', 'Timeout']).
        """
        expectations = set()
        failure_types = [
            'TEXT', 'FAIL', 'IMAGE+TEXT', 'IMAGE', 'AUDIO', 'MISSING', 'LEAK'
        ]
        test_expectation_types = [
            'SLOW', 'TIMEOUT', 'CRASH', 'PASS', 'REBASELINE',
            'NEEDSREBASELINE', 'NEEDSMANUALREBASELINE'
        ]
        for expected in results['expected'].split():
            for actual in results['actual'].split():
                if expected in test_expectation_types and actual in failure_types:
                    expectations.add('Failure')
                if expected in failure_types and actual in test_expectation_types:
                    expectations.add(actual.capitalize())
                if expected in test_expectation_types and actual in test_expectation_types:
                    expectations.add(actual.capitalize())
        return expectations

    def create_line_list(self, merged_results):
        """Creates list of test expectations lines.

        Traverses through the given |merged_results| dictionary and parses the
        value to create one test expectations line per key.

        Args:
            merged_results: A merged_results with the format:
                {
                    'test_name': {
                        'platform': {
                            'expected: 'PASS',
                            'actual': 'FAIL',
                            'bug': 'crbug.com/11111'
                        }
                    }
                }

        Returns:
            A list of test expectations lines with the format:
            ['BUG_URL [PLATFORM(S)] TEST_MAME [EXPECTATION(S)]']
        """
        line_list = []
        for test_name, platform_results in merged_results.iteritems():
            for platform in platform_results:
                if test_name.startswith('imported'):
                    platform_list = []
                    bug = []
                    expectations = []
                    if isinstance(platform, tuple):
                        platform_list = list(platform)
                    else:
                        platform_list.append(platform)
                    bug.append(platform_results[platform]['bug'])
                    expectations = self.get_expectations(
                        platform_results[platform])
                    line = '%s [ %s ] %s [ %s ]' % (bug[0], ' '.join(
                        platform_list), test_name, ' '.join(expectations))
                    line_list.append(str(line))
        return line_list

    def write_to_test_expectations(self, line_list):
        """Writes to TestExpectations.

        The place in the file where the new lines are inserted is after a
        marker comment line. If this marker comment line is not found, it will
        be added to the end of the file.

        Args:
            line_list: A list of lines to add to the TestExpectations file.
        """
        _log.debug('Lines to write to TestExpectations: %r', line_list)
        port = self.host.port_factory.get()
        expectations_file_path = port.path_to_generic_test_expectations_file()
        marker_comment = '# Tests added from W3C auto import bot'
        file_contents = self.host.filesystem.read_text_file(
            expectations_file_path)
        marker_comment_index = file_contents.find(marker_comment)
        line_list = [
            line for line in line_list if
            self._test_name_from_expectation_string(line) not in file_contents
        ]
        if not line_list:
            return
        if marker_comment_index == -1:
            file_contents += '\n%s\n' % marker_comment
            file_contents += '\n'.join(line_list)
        else:
            end_of_marker_line = (file_contents[marker_comment_index:].find(
                '\n')) + marker_comment_index
            file_contents = file_contents[:end_of_marker_line + 1] + '\n'.join(
                line_list) + file_contents[end_of_marker_line:]
        self.host.filesystem.write_text_file(expectations_file_path,
                                             file_contents)

    @staticmethod
    def _test_name_from_expectation_string(expectation_string):
        return TestExpectationLine.tokenize_line(
            filename='', expectation_string=expectation_string,
            line_number=0).name

    def get_expected_txt_files(self, tests_results):
        """Fetches new baseline files for tests that should be rebaselined.

        Invokes webkit-patch rebaseline-from-try-jobs in order to download new
        -expected.txt files for testharness.js tests that did not crash or time
        out. Then, the platform-specific test is removed from the overall
        failure test dictionary.

        Args:
            tests_results: A dict mapping test name to platform to test results.

        Returns:
            An updated tests_results dictionary without the platform-specific
            testharness.js tests that required new baselines to be downloaded
            from `webkit-patch rebaseline-from-try-jobs`.
        """
        modified_tests = self.get_modified_existing_tests()
        tests_to_rebaseline, tests_results = self.get_tests_to_rebaseline(
            modified_tests, tests_results)
        _log.debug('Tests to rebaseline: %r', tests_to_rebaseline)
        if tests_to_rebaseline:
            webkit_patch = self.host.filesystem.join(
                self.finder.chromium_base(), self.finder.webkit_base(),
                self.finder.path_to_script('webkit-patch'))
            self.host.executive.run_command([
                'python',
                webkit_patch,
                'rebaseline-cl',
                '--verbose',
                '--no-trigger-jobs',
                '--only-changed-tests',
            ] + tests_to_rebaseline)
            # NOTE(qyearsley): If rebaseline-cl is changed to stage all new files
            # with git, then this would be unnecessary and should be removed.
            self.host.executive.run_command(['git', 'add', '--all'])
        return tests_results

    def get_modified_existing_tests(self):
        """Returns a list of layout test names for layout tests that have been modified."""
        diff_output = self.host.executive.run_command([
            'git', 'diff', 'origin/master', '--name-only', '-diff-filter=AMR'
        ])  # Added, modified, and renamed files.
        paths_from_chromium_root = diff_output.splitlines()
        modified_tests = []
        for path in paths_from_chromium_root:
            absolute_path = self.host.filesystem.join(
                self.finder.chromium_base(), path)
            if not self.host.filesystem.exists(absolute_path):
                _log.warning('File does not exist: %s', absolute_path)
                continue
            test_path = self.finder.layout_test_name(path)
            if test_path:
                modified_tests.append(test_path)
        return modified_tests

    def get_tests_to_rebaseline(self, modified_tests, test_results):
        """Returns a list of tests to download new baselines for.

        Creates a list of tests to rebaseline depending on the tests' platform-
        specific results. In general, this will be non-ref tests that failed
        due to a baseline mismatch (rather than crash or timeout).

        Args:
            modified_tests: A list of paths to modified files (which should
                be added, removed or modified files in the imported w3c
                directory), relative to the LayoutTests directory.
            test_results: A dictionary of failing tests results.

        Returns:
            A pair: A set of tests to be rebaselined, and a modified copy of
            the test results dictionary. The tests to be rebaselined should include
            testharness.js tests that failed due to a baseline mismatch.
        """
        test_results = copy.deepcopy(test_results)
        tests_to_rebaseline = set()
        for test_path in modified_tests:
            if not (self.is_js_test(test_path)
                    and test_results.get(test_path)):
                continue
            for platform in test_results[test_path].keys():
                if test_results[test_path][platform]['actual'] not in [
                        'CRASH', 'TIMEOUT'
                ]:
                    del test_results[test_path][platform]
                    tests_to_rebaseline.add(test_path)
        return sorted(tests_to_rebaseline), test_results

    def is_js_test(self, test_path):
        """Checks whether a given file is a testharness.js test.

        Args:
            test_path: A file path relative to the layout tests directory.
                This might correspond to a deleted file or a non-test.
        """
        absolute_path = self.host.filesystem.join(
            self.finder.layout_tests_dir(), test_path)
        test_parser = TestParser(absolute_path, self.host)
        if not test_parser.test_doc:
            return False
        return test_parser.is_jstest()
Пример #2
0
class W3CExpectationsLineAdder(object):

    def __init__(self, host):
        self.host = host
        self.host.initialize_scm()
        self.finder = WebKitFinder(self.host.filesystem)

    def run(self, args=None):
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('-v', '--verbose', action='store_true', help='More verbose logging.')
        args = parser.parse_args(args)
        log_level = logging.DEBUG if args.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        issue_number = self.get_issue_number()
        if issue_number == 'None':
            _log.error('No issue on current branch.')
            return 1

        rietveld = Rietveld(self.host.web)
        builds = rietveld.latest_try_jobs(issue_number, self.get_try_bots())
        _log.debug('Latest try jobs: %r', builds)

        if not builds:
            _log.error('No try job information was collected.')
            return 1

        test_expectations = {}
        for build in builds:
            platform_results = self.get_failing_results_dict(build)
            test_expectations = self.merge_dicts(test_expectations, platform_results)

        for test_name, platform_result in test_expectations.iteritems():
            test_expectations[test_name] = self.merge_same_valued_keys(platform_result)

        test_expectations = self.get_expected_txt_files(test_expectations)
        test_expectation_lines = self.create_line_list(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return 0

    def get_issue_number(self):
        return GitCL(self.host).get_issue_number()

    def get_try_bots(self):
        return self.host.builders.all_try_builder_names()

    def generate_results_dict(self, platform, result_list):
        test_dict = {}
        if '-' in platform:
            platform = platform[platform.find('-') + 1:].capitalize()
        for result in result_list:
            test_dict[result.test_name()] = {
                platform: {
                    'expected': result.expected_results(),
                    'actual': result.actual_results(),
                    'bug': 'crbug.com/626703'
                }}
        return test_dict

    def get_failing_results_dict(self, build):
        """Returns a nested dict of failing test results.

        Retrieves a full list of layout test results from a builder result URL.
        Collects the builder name, platform and a list of tests that did not
        run as expected.

        Args:
            build: A Build object.

        Returns:
            A dictionary with the structure: {
                'key': {
                    'expected': 'TIMEOUT',
                    'actual': 'CRASH',
                    'bug': 'crbug.com/11111'
                }
            }
            If there are no failing results or no results could be fetched,
            this will return an empty dict.
        """
        layout_test_results = self.host.buildbot.fetch_results(build)
        if layout_test_results is None:
            _log.warning('No results for build %s', build)
            return {}
        platform = self.host.builders.port_name_for_builder_name(build.builder_name)
        result_list = layout_test_results.didnt_run_as_expected_results()
        failing_results_dict = self.generate_results_dict(platform, result_list)
        return failing_results_dict

    def merge_dicts(self, target, source, path=None):
        """Recursively merges nested dictionaries.

        Args:
            target: First dictionary, which is updated based on source.
            source: Second dictionary, not modified.

        Returns:
            An updated target dictionary.
        """
        path = path or []
        for key in source:
            if key in target:
                if (isinstance(target[key], dict)) and isinstance(source[key], dict):
                    self.merge_dicts(target[key], source[key], path + [str(key)])
                elif target[key] == source[key]:
                    pass
                else:
                    raise ValueError('The key: %s already exist in the target dictionary.' % '.'.join(path))
            else:
                target[key] = source[key]
        return target

    def merge_same_valued_keys(self, dictionary):
        """Merges keys in dictionary with same value.

        Traverses through a dict and compares the values of keys to one another.
        If the values match, the keys are combined to a tuple and the previous
        keys are removed from the dict.

        Args:
            dictionary: A dictionary with a dictionary as the value.

        Returns:
            A new dictionary with updated keys to reflect matching values of keys.
            Example: {
                'one': {'foo': 'bar'},
                'two': {'foo': 'bar'},
                'three': {'foo': 'bar'}
            }
            is converted to a new dictionary with that contains
            {('one', 'two', 'three'): {'foo': 'bar'}}
        """
        merged_dict = {}
        matching_value_keys = set()
        keys = sorted(dictionary.keys())
        while keys:
            current_key = keys[0]
            found_match = False
            if current_key == keys[-1]:
                merged_dict[current_key] = dictionary[current_key]
                keys.remove(current_key)
                break

            for next_item in keys[1:]:
                if dictionary[current_key] == dictionary[next_item]:
                    found_match = True
                    matching_value_keys.update([current_key, next_item])

                if next_item == keys[-1]:
                    if found_match:
                        merged_dict[tuple(matching_value_keys)] = dictionary[current_key]
                        keys = [k for k in keys if k not in matching_value_keys]
                    else:
                        merged_dict[current_key] = dictionary[current_key]
                        keys.remove(current_key)
            matching_value_keys = set()
        return merged_dict

    def get_expectations(self, results):
        """Returns a set of test expectations for a given test dict.

        Returns a set of one or more test expectations based on the expected
        and actual results of a given test name.

        Args:
            results: A dictionary that maps one test to its results. Example:
                {
                    'test_name': {
                        'expected': 'PASS',
                        'actual': 'FAIL',
                        'bug': 'crbug.com/11111'
                    }
                }

        Returns:
            A set of one or more test expectation strings with the first letter
            capitalized. Example: set(['Failure', 'Timeout']).
        """
        expectations = set()
        failure_types = ['TEXT', 'FAIL', 'IMAGE+TEXT', 'IMAGE', 'AUDIO', 'MISSING', 'LEAK']
        test_expectation_types = ['SLOW', 'TIMEOUT', 'CRASH', 'PASS', 'REBASELINE', 'NEEDSREBASELINE', 'NEEDSMANUALREBASELINE']
        for expected in results['expected'].split():
            for actual in results['actual'].split():
                if expected in test_expectation_types and actual in failure_types:
                    expectations.add('Failure')
                if expected in failure_types and actual in test_expectation_types:
                    expectations.add(actual.capitalize())
                if expected in test_expectation_types and actual in test_expectation_types:
                    expectations.add(actual.capitalize())
        return expectations

    def create_line_list(self, merged_results):
        """Creates list of test expectations lines.

        Traverses through the given |merged_results| dictionary and parses the
        value to create one test expectations line per key.

        Args:
            merged_results: A merged_results with the format:
                {
                    'test_name': {
                        'platform': {
                            'expected: 'PASS',
                            'actual': 'FAIL',
                            'bug': 'crbug.com/11111'
                        }
                    }
                }

        Returns:
            A list of test expectations lines with the format:
            ['BUG_URL [PLATFORM(S)] TEST_MAME [EXPECTATION(S)]']
        """
        line_list = []
        for test_name, platform_results in merged_results.iteritems():
            for platform in platform_results:
                if test_name.startswith('imported'):
                    platform_list = []
                    bug = []
                    expectations = []
                    if isinstance(platform, tuple):
                        platform_list = list(platform)
                    else:
                        platform_list.append(platform)
                    bug.append(platform_results[platform]['bug'])
                    expectations = self.get_expectations(platform_results[platform])
                    line = '%s [ %s ] %s [ %s ]' % (bug[0], ' '.join(platform_list), test_name, ' '.join(expectations))
                    line_list.append(str(line))
        return line_list

    def write_to_test_expectations(self, line_list):
        """Writes to TestExpectations.

        The place in the file where the new lines are inserted is after a
        marker comment line. If this marker comment line is not found, it will
        be added to the end of the file.

        Args:
            line_list: A list of lines to add to the TestExpectations file.
        """
        _log.debug('Lines to write to TestExpectations: %r', line_list)
        port = self.host.port_factory.get()
        expectations_file_path = port.path_to_generic_test_expectations_file()
        file_contents = self.host.filesystem.read_text_file(expectations_file_path)
        marker_comment_index = file_contents.find(MARKER_COMMENT)
        line_list = [line for line in line_list if self._test_name_from_expectation_string(line) not in file_contents]
        if not line_list:
            return
        if marker_comment_index == -1:
            file_contents += '\n%s\n' % MARKER_COMMENT
            file_contents += '\n'.join(line_list)
        else:
            end_of_marker_line = (file_contents[marker_comment_index:].find('\n')) + marker_comment_index
            file_contents = file_contents[:end_of_marker_line + 1] + '\n'.join(line_list) + file_contents[end_of_marker_line:]
        self.host.filesystem.write_text_file(expectations_file_path, file_contents)

    @staticmethod
    def _test_name_from_expectation_string(expectation_string):
        return TestExpectationLine.tokenize_line(filename='', expectation_string=expectation_string, line_number=0).name

    def get_expected_txt_files(self, tests_results):
        """Fetches new baseline files for tests that should be rebaselined.

        Invokes webkit-patch rebaseline-from-try-jobs in order to download new
        -expected.txt files for testharness.js tests that did not crash or time
        out. Then, the platform-specific test is removed from the overall
        failure test dictionary.

        Args:
            tests_results: A dict mapping test name to platform to test results.

        Returns:
            An updated tests_results dictionary without the platform-specific
            testharness.js tests that required new baselines to be downloaded
            from `webkit-patch rebaseline-from-try-jobs`.
        """
        modified_tests = self.get_modified_existing_tests()
        tests_to_rebaseline, tests_results = self.get_tests_to_rebaseline(modified_tests, tests_results)
        _log.debug('Tests to rebaseline: %r', tests_to_rebaseline)
        if tests_to_rebaseline:
            webkit_patch = self.host.filesystem.join(
                self.finder.chromium_base(), self.finder.webkit_base(), self.finder.path_to_script('webkit-patch'))
            self.host.executive.run_command([
                'python',
                webkit_patch,
                'rebaseline-cl',
                '--verbose',
                '--no-trigger-jobs',
            ] + tests_to_rebaseline)
        return tests_results

    def get_modified_existing_tests(self):
        """Returns a list of layout test names for layout tests that have been modified."""
        diff_output = self.host.executive.run_command(
            ['git', 'diff', 'origin/master', '--name-only', '--diff-filter=AMR'])  # Added, modified, and renamed files.
        paths_from_chromium_root = diff_output.splitlines()
        modified_tests = []
        for path in paths_from_chromium_root:
            absolute_path = self.host.filesystem.join(self.finder.chromium_base(), path)
            if not self.host.filesystem.exists(absolute_path):
                _log.warning('File does not exist: %s', absolute_path)
                continue
            test_path = self.finder.layout_test_name(path)
            if test_path:
                modified_tests.append(test_path)
        return modified_tests

    def get_tests_to_rebaseline(self, modified_tests, test_results):
        """Returns a list of tests to download new baselines for.

        Creates a list of tests to rebaseline depending on the tests' platform-
        specific results. In general, this will be non-ref tests that failed
        due to a baseline mismatch (rather than crash or timeout).

        Args:
            modified_tests: A list of paths to modified files (which should
                be added, removed or modified files in the imported w3c
                directory), relative to the LayoutTests directory.
            test_results: A dictionary of failing tests results.

        Returns:
            A pair: A set of tests to be rebaselined, and a modified copy of
            the test results dictionary. The tests to be rebaselined should include
            testharness.js tests that failed due to a baseline mismatch.
        """
        test_results = copy.deepcopy(test_results)
        tests_to_rebaseline = set()
        for test_path in modified_tests:
            if not (self.is_js_test(test_path) and test_results.get(test_path)):
                continue
            for platform in test_results[test_path].keys():
                if test_results[test_path][platform]['actual'] not in ['CRASH', 'TIMEOUT']:
                    del test_results[test_path][platform]
                    tests_to_rebaseline.add(test_path)
        return sorted(tests_to_rebaseline), test_results

    def is_js_test(self, test_path):
        """Checks whether a given file is a testharness.js test.

        Args:
            test_path: A file path relative to the layout tests directory.
                This might correspond to a deleted file or a non-test.
        """
        absolute_path = self.host.filesystem.join(self.finder.layout_tests_dir(), test_path)
        test_parser = TestParser(absolute_path, self.host)
        if not test_parser.test_doc:
            return False
        return test_parser.is_jstest()
Пример #3
0
class GDBCrashLogGenerator(object):
    _find_pid_regex = re.compile(r'PID: (\d+) \(.*\)')

    def __init__(self, executive, name, pid, newer_than, filesystem,
                 path_to_driver, port_name, configuration):
        self.name = name
        self.pid = pid
        self.newer_than = newer_than
        self._filesystem = filesystem
        self._path_to_driver = path_to_driver
        self._executive = executive
        self._port_name = port_name
        self._configuration = configuration
        self._webkit_finder = WebKitFinder(filesystem)

    def _get_gdb_output(self, coredump_path):
        process_name = self._filesystem.join(
            os.path.dirname(str(self._path_to_driver())), self.name)
        cmd = [
            'gdb', '-ex', 'thread apply all bt 1024', '--batch', process_name,
            coredump_path
        ]
        proc = self._executive.popen(cmd,
                                     stdin=None,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)
        stdout, stderr = proc.communicate()
        errors = [
            stderr_line.strip().decode('utf8', 'ignore')
            for stderr_line in stderr.splitlines()
        ]
        if proc.returncode != 0:
            stdout = (
                'ERROR: The gdb process exited with non-zero return code %s\n\n'
                % proc.returncode) + stdout
        return (stdout.decode('utf8', 'ignore'), errors)

    def _get_tmp_file_name(self, coredumpctl, filename):
        if coredumpctl[0] == 'flatpak-spawn':
            return "/run/host/" + filename

        return filename

    def _get_trace_from_systemd(self, coredumpctl, pid):
        if os.path.isfile("/.flatpak-info"):
            return self._get_trace_from_flatpak()

        # Letting up to 5 seconds for the backtrace to be generated on the systemd side
        for try_number in range(5):
            if try_number != 0:
                # Looping, it means we consider the logs might not be ready yet.
                time.sleep(1)

            try:
                info = self._executive.run_command(coredumpctl + [
                    'info',
                    "--since=" + time.strftime("%a %Y-%m-%d %H:%M:%S %Z",
                                               time.localtime(self.newer_than))
                ],
                                                   return_stderr=True)
            except (ScriptError, OSError):
                continue

            found_newer = False
            # Coredumpctl will use the latest core dump with the specified PID
            # assume it is the right one.
            pids = self._find_pid_regex.findall(info)
            if not pids:
                continue

            pid = pids[0]
            with tempfile.NamedTemporaryFile() as temp_file:
                if self._executive.run_command(
                        coredumpctl +
                    ['dump', pid, '--output', temp_file.name],
                        return_exit_code=True):
                    continue

                return self._get_gdb_output(
                    self._get_tmp_file_name(coredumpctl, temp_file.name))

        return '', []

    def _get_trace_from_flatpak(self):
        if self.newer_than:
            coredump_since = "--gdb-stack-trace=@%f" % self.newer_than
        else:
            coredump_since = "--gdb-stack-trace"
        webkit_flatpak_path = self._webkit_finder.path_to_script(
            'webkit-flatpak')
        cmd = ['flatpak-spawn', '--host']

        # Forward WEBKIT_FLATPAK_USER_DIR so webkit-flatpak can use the same flatpak
        # install as the current one.
        user_dir = os.environ.get('WEBKIT_FLATPAK_USER_DIR')
        if user_dir:
            cmd.append("--env=WEBKIT_FLATPAK_USER_DIR=%s" % user_dir)

        cmd.extend([
            webkit_flatpak_path,
            '--%s' % self._port_name,
            "--%s" % self._configuration.lower(), "--verbose", coredump_since
        ])

        proc = self._executive.popen(cmd,
                                     stdin=None,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)
        crash_log, stderr = proc.communicate()
        errors = string_utils.decode(str(stderr or '<empty>'),
                                     errors='ignore').splitlines()
        return crash_log, errors

    def generate_crash_log(self, stdout, stderr):
        pid_representation = str(self.pid or '<unknown>')
        log_directory = os.environ.get("WEBKIT_CORE_DUMPS_DIRECTORY")
        errors = []
        crash_log = ''
        expected_crash_dump_filename = "core-pid_%s.dump" % pid_representation
        proc_name = "%s" % (self.name)

        def match_filename(filesystem, directory, filename):
            if self.pid:
                return filename == expected_crash_dump_filename
            return filename.find(self.name) > -1

        # Poor man which, ignore any failure.
        for coredumpctl in [['coredumpctl'],
                            ['flatpak-spawn', '--host', 'coredumpctl'], []]:
            try:
                if not self._executive.run_command(coredumpctl,
                                                   return_exit_code=True):
                    break
            except:
                continue

        if log_directory:
            dumps = self._filesystem.files_under(log_directory,
                                                 file_filter=match_filename)
            if dumps:
                # Get the most recent coredump matching the pid and/or process name.
                coredump_path = list(reversed(sorted(dumps)))[0]
                if not self.newer_than or self._filesystem.mtime(
                        coredump_path) > self.newer_than:
                    crash_log, errors = self._get_gdb_output(coredump_path)
        elif coredumpctl:
            crash_log, errors = self._get_trace_from_systemd(
                coredumpctl, pid_representation)

        stderr_lines = errors + string_utils.decode(
            str(stderr or '<empty>'), errors='ignore').splitlines()
        errors_str = '\n'.join(
            ('STDERR: ' + stderr_line) for stderr_line in stderr_lines)
        cppfilt_proc = self._executive.popen(['c++filt'],
                                             stdin=subprocess.PIPE,
                                             stdout=subprocess.PIPE,
                                             stderr=subprocess.PIPE)
        errors_str = cppfilt_proc.communicate(
            string_utils.encode(errors_str))[0]
        errors_str = string_utils.decode(errors_str, errors='ignore')

        if not crash_log:
            if not log_directory:
                log_directory = "/path/to/coredumps"
            core_pattern = self._filesystem.join(log_directory,
                                                 "core-pid_%p.dump")
            crash_log = """\
Coredump %(expected_crash_dump_filename)s not found. To enable crash logs:

- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
- enable core dumps: ulimit -c unlimited
- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(log_directory)s

""" % locals()

        return (stderr, """\
crash log for %(proc_name)s (pid %(pid_representation)s):

%(crash_log)s
%(errors_str)s""" % locals())