Beispiel #1
0
 def test_latest_try_jobs(self):
     git_cl = GitCL(MockHost())
     git_cl.fetch_raw_try_job_results = lambda **_: [
         {
             'builder_name': 'builder-b',
             'status': 'COMPLETED',
             'result': 'SUCCESS',
             'url': 'http://build.chromium.org/p/master/builders/builder-b/builds/100',
         },
         {
             'builder_name': 'builder-b',
             'status': 'COMPLETED',
             'result': 'SUCCESS',
             'url': 'http://build.chromium.org/p/master/builders/builder-b/builds/90',
         },
         {
             'builder_name': 'builder-a',
             'status': 'SCHEDULED',
             'result': None,
             'url': None,
         },
         {
             'builder_name': 'builder-c',
             'status': 'COMPLETED',
             'result': 'SUCCESS',
             'url': 'http://ci.chromium.org/master/builder-c/123',
         },
     ]
     self.assertEqual(
         git_cl.latest_try_jobs(['builder-a', 'builder-b']),
         {
             Build('builder-a'): TryJobStatus('SCHEDULED'),
             Build('builder-b', 100): TryJobStatus('COMPLETED', 'SUCCESS'),
         })
 def test_latest_try_builds_failures(self):
     git_cl = GitCL(MockHost())
     git_cl.fetch_raw_try_job_results = lambda: [
         {
             'builder_name':
             'builder-a',
             'status':
             'COMPLETED',
             'result':
             'FAILURE',
             'failure_reason':
             'BUILD_FAILURE',
             'url':
             'http://build.chromium.org/p/master/builders/builder-a/builds/100',
         },
         {
             'builder_name':
             'builder-b',
             'status':
             'COMPLETED',
             'result':
             'FAILURE',
             'failure_reason':
             'INFRA_FAILURE',
             'url':
             'http://build.chromium.org/p/master/builders/builder-b/builds/200',
         },
     ]
     self.assertEqual(
         git_cl.latest_try_jobs(['builder-a', 'builder-b']), {
             Build('builder-a', 100): TryJobStatus('COMPLETED', 'FAILURE'),
             Build('builder-b', 200): TryJobStatus('COMPLETED', 'FAILURE'),
         })
Beispiel #3
0
 def test_latest_try_builds(self):
     git_cl = GitCL(MockHost())
     git_cl.fetch_try_results = lambda: [
         {
             'builder_name': 'builder-b',
             'status': 'COMPLETED',
             'result': 'SUCCESS',
             'url': 'http://build.chromium.org/p/master/builders/some-builder/builds/100',
         },
         {
             'builder_name': 'builder-b',
             'status': 'COMPLETED',
             'result': 'SUCCESS',
             'url': 'http://build.chromium.org/p/master/builders/some-builder/builds/90',
         },
         {
             'builder_name': 'builder-a',
             'status': 'SCHEDULED',
             'result': None,
             'url': None,
         },
         {
             'builder_name': 'builder-c',
             'status': 'COMPLETED',
             'result': 'SUCCESS',
             'url': 'http://build.chromium.org/p/master/builders/some-builder/builds/123',
         },
     ]
     self.assertEqual(
         git_cl.latest_try_jobs(['builder-a', 'builder-b']),
         [Build('builder-a'), Build('builder-b', 100)])
Beispiel #4
0
 def test_execute_with_unfinished_jobs(self):
     builds = {
         Build('MOCK Try Win', 5000): TryJobStatus('COMPLETED', 'FAILURE'),
         Build('MOCK Try Mac', 4000): TryJobStatus('STARTED'),
         Build('MOCK Try Linux', 6000): TryJobStatus('SCHEDULED'),
     }
     git_cl = GitCL(self.tool)
     git_cl.get_issue_number = lambda: '11112222'
     git_cl.latest_try_jobs = lambda _: builds
     self.command.git_cl = lambda: git_cl
     exit_code = self.command.execute(self.command_options(), [], self.tool)
     self.assertEqual(exit_code, 1)
     self.assertLog([
         'INFO: Finished try jobs:\n',
         'INFO:   MOCK Try Win\n',
         'INFO: Scheduled or started try jobs:\n',
         'INFO:   MOCK Try Linux\n',
         'INFO:   MOCK Try Mac\n',
         'INFO: There are some builders with no results:\n',
         'INFO:   MOCK Try Linux\n',
         'INFO:   MOCK Try Mac\n',
         'INFO: Would you like to try to fill in missing results with\n'
         'available results? This assumes that layout test results\n'
         'for the platforms with missing results are the same as\n'
         'results on other platforms.\n',
         'INFO: Aborting.\n',
     ])
Beispiel #5
0
 def test_latest_try_builds_ignores_swarming(self):
     git_cl = GitCL(MockHost())
     git_cl.fetch_raw_try_job_results = lambda: [{
         'builder_name':
         'builder-b',
         'status':
         'COMPLETED',
         'result':
         'SUCCESS',
         'url':
         'http://build.chromium.org/p/master/builders/builder-b/builds/100',
     }, {
         'builder_name':
         'builder-b',
         'status':
         'COMPLETED',
         'result':
         'SUCCESS',
         'url':
         'https://ci.chromium.org/swarming/task/1234abcd1234abcd?server=chromium-swarm.appspot.com',
     }]
     self.assertEqual(
         git_cl.latest_try_jobs(['builder-b']), {
             Build('builder-b', 100): TryJobStatus('COMPLETED', 'SUCCESS'),
         })
    def test_execute_with_trigger_jobs_option(self):
        builds = [
            Build('MOCK Try Win', 5000),
            Build('MOCK Try Mac', 4000),
        ]
        git_cl = GitCL(self.tool)
        git_cl.get_issue_number = lambda: '11112222'
        git_cl.latest_try_jobs = lambda _: builds
        self.command.git_cl = lambda: git_cl

        return_code = self.command.execute(
            self.command_options(trigger_jobs=True), [], self.tool)

        self.assertEqual(return_code, 1)
        self.assertLog([
            'INFO: Triggering try jobs for:\n',
            'INFO:   MOCK Try Linux\n',
            'INFO: Please re-run webkit-patch rebaseline-cl once all pending try jobs have finished.\n',
        ])
        self.assertEqual(self.tool.executive.calls, [[
            'python',
            '/mock-checkout/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/manifest',
            '--work', '--tests-root',
            '/mock-checkout/third_party/WebKit/LayoutTests/external/wpt'
        ], [
            'git', 'cl', 'try', '-m', 'tryserver.blink', '-b', 'MOCK Try Linux'
        ]])
Beispiel #7
0
 def test_latest_try_jobs_cq_only(self):
     git_cl = GitCL(MockHost())
     git_cl.fetch_raw_try_job_results = lambda **_: [
         {
             'builder_name': 'cq-a',
             'experimental': False,
             'result': None,
             'status': 'SCHEDULED',
             'tags': ['user_agent:cq'],
             'url': None,
         },
         {
             'builder_name': 'cq-a-experimental',
             'experimental': True,
             'result': None,
             'status': 'SCHEDULED',
             'tags': ['user_agent:cq'],
             'url': None,
         },
         {
             'builder_name': 'other',
             'experimental': False,
             'status': 'SCHEDULED',
             'result': None,
             'tags': ['user_agent:git_cl_try'],
             'url': None,
         },
     ]
     self.assertEqual(
         git_cl.latest_try_jobs(cq_only=True),
         {
             Build('cq-a'): TryJobStatus('SCHEDULED'),
         })
Beispiel #8
0
 def test_latest_try_jobs_started_build_luci_url(self):
     git_cl = GitCL(MockHost())
     git_cl.fetch_raw_try_job_results = lambda **_: [
         {
             'builder_name': 'builder-a',
             'status': 'STARTED',
             'result': None,
             'url': 'http://ci.chromium.org/p/master/some-builder/100',
         },
     ]
     self.assertEqual(git_cl.latest_try_jobs(['builder-a']),
                      {Build('builder-a', 100): TryJobStatus('STARTED')})
Beispiel #9
0
 def test_execute_no_try_jobs_started_and_no_trigger_jobs(self):
     # If there are no try jobs started yet and --no-trigger-jobs is passed,
     # then we just abort immediately.
     git_cl = GitCL(self.tool)
     git_cl.get_issue_number = lambda: '11112222'
     git_cl.latest_try_jobs = lambda _: {}
     self.command.git_cl = lambda: git_cl
     exit_code = self.command.execute(
         self.command_options(trigger_jobs=False), [], self.tool)
     self.assertEqual(exit_code, 1)
     self.assertLog([
         'INFO: No finished try jobs.\n',
         'INFO: Aborted: no try jobs and --no-trigger-jobs passed.\n',
     ])
Beispiel #10
0
 def test_latest_try_builds_started_builds(self):
     git_cl = GitCL(MockHost())
     git_cl.fetch_try_results = lambda: [
         {
             'builder_name':
             'builder-a',
             'status':
             'STARTED',
             'result':
             None,
             'url':
             'http://build.chromium.org/p/master/builders/some-builder/builds/100',
         },
     ]
     self.assertEqual(git_cl.latest_try_jobs(['builder-a']),
                      [Build('builder-a', 100)])
 def test_execute_with_no_trigger_jobs_option(self):
     builds = [
         Build('MOCK Try Win', 5000),
         Build('MOCK Try Mac', 4000),
     ]
     git_cl = GitCL(self.tool)
     git_cl.get_issue_number = lambda: '11112222'
     git_cl.latest_try_jobs = lambda _: builds
     self.command.git_cl = lambda: git_cl
     return_code = self.command.execute(
         self.command_options(trigger_jobs=False), [], self.tool)
     self.assertEqual(return_code, 1)
     self.assertLog([
         'ERROR: The following builders have no jobs:\n',
         'ERROR:   MOCK Try Linux\n',
         'ERROR: Add --fill-missing to continue rebaselining anyway, '
         'filling in results for missing platforms.\n',
     ])
 def test_execute_with_trigger_jobs_option(self):
     builds = [
         Build('MOCK Try Win', 5000),
         Build('MOCK Try Mac', 4000),
     ]
     git_cl = GitCL(self.tool)
     git_cl.get_issue_number = lambda: '11112222'
     git_cl.latest_try_jobs = lambda _: builds
     self.command.git_cl = lambda: git_cl
     return_code = self.command.execute(
         self.command_options(trigger_jobs=True), [], self.tool)
     self.assertEqual(return_code, 1)
     self.assertLog([
         'INFO: Triggering try jobs for:\n',
         'INFO:   MOCK Try Linux\n',
         'INFO: Once all pending try jobs have finished, please re-run\n'
         'webkit-patch rebaseline-cl to fetch new baselines.\n',
     ])
Beispiel #13
0
 def test_execute_no_try_jobs_started_triggers_jobs(self):
     # If there are no try jobs started yet, by default the tool will
     # trigger new try jobs.
     git_cl = GitCL(self.tool)
     git_cl.get_issue_number = lambda: '11112222'
     git_cl.latest_try_jobs = lambda _: {}
     self.command.git_cl = lambda: git_cl
     exit_code = self.command.execute(self.command_options(), [], self.tool)
     self.assertEqual(exit_code, 1)
     self.assertLog([
         'INFO: No finished try jobs.\n',
         'INFO: Triggering try jobs:\n',
         'INFO:   MOCK Try Linux\n',
         'INFO:   MOCK Try Mac\n',
         'INFO:   MOCK Try Win\n',
         'INFO: Once all pending try jobs have finished, please re-run\n'
         'webkit-patch rebaseline-cl to fetch new baselines.\n'
     ])
Beispiel #14
0
 def test_execute_one_missing_build(self):
     builds = {
         Build('MOCK Try Win', 5000): TryJobStatus('COMPLETED', 'FAILURE'),
         Build('MOCK Try Mac', 4000): TryJobStatus('COMPLETED', 'FAILURE'),
     }
     git_cl = GitCL(self.tool)
     git_cl.get_issue_number = lambda: '11112222'
     git_cl.latest_try_jobs = lambda _: builds
     self.command.git_cl = lambda: git_cl
     exit_code = self.command.execute(self.command_options(), [], self.tool)
     self.assertEqual(exit_code, 1)
     self.assertLog([
         'INFO: Finished try jobs:\n',
         'INFO:   MOCK Try Mac\n',
         'INFO:   MOCK Try Win\n',
         'INFO: Triggering try jobs:\n',
         'INFO:   MOCK Try Linux\n',
         'INFO: Once all pending try jobs have finished, please re-run\n'
         'webkit-patch rebaseline-cl to fetch new baselines.\n',
     ])
Beispiel #15
0
 def test_execute_with_passing_jobs(self):
     builds = {
         Build('MOCK Try Win', 5000): TryJobStatus('COMPLETED', 'FAILURE'),
         Build('MOCK Try Mac', 4000): TryJobStatus('COMPLETED', 'SUCCESS'),
         Build('MOCK Try Linux', 6000): TryJobStatus('COMPLETED', 'SUCCESS'),
     }
     git_cl = GitCL(self.tool)
     git_cl.get_issue_number = lambda: '11112222'
     git_cl.latest_try_jobs = lambda _: builds
     self.command.git_cl = lambda: git_cl
     exit_code = self.command.execute(self.command_options(), [], self.tool)
     self.assertEqual(exit_code, 0)
     self.assertLog([
         'INFO: Finished try jobs found for all try bots.\n',
         'INFO: Rebaselining one/flaky-fail.html\n',
         'INFO: Rebaselining one/missing.html\n',
         'INFO: Rebaselining one/slow-fail.html\n',
         'INFO: Rebaselining one/text-fail.html\n',
         'INFO: Rebaselining two/image-fail.html\n'
     ])
    def setUp(self):
        BaseTestCase.setUp(self)
        LoggingTestCase.setUp(self)

        builds = [
            Build('MOCK Try Win', 5000),
            Build('MOCK Try Mac', 4000),
            Build('MOCK Try Linux', 6000),
        ]

        git_cl = GitCL(self.tool)
        git_cl.get_issue_number = lambda: '11112222'
        git_cl.latest_try_jobs = lambda _: builds
        self.command.git_cl = lambda: git_cl

        git = MockGit(filesystem=self.tool.filesystem,
                      executive=self.tool.executive)
        git.changed_files = lambda **_: [
            'third_party/WebKit/LayoutTests/one/text-fail.html',
            'third_party/WebKit/LayoutTests/one/flaky-fail.html',
        ]
        self.tool.git = lambda: git

        self.tool.builders = BuilderList({
            'MOCK Try Win': {
                'port_name': 'test-win-win7',
                'specifiers': ['Win7', 'Release'],
                'is_try_builder': True,
            },
            'MOCK Try Linux': {
                'port_name': 'test-linux-trusty',
                'specifiers': ['Trusty', 'Release'],
                'is_try_builder': True,
            },
            'MOCK Try Mac': {
                'port_name': 'test-mac-mac10.11',
                'specifiers': ['Mac10.11', 'Release'],
                'is_try_builder': True,
            },
        })
        layout_test_results = LayoutTestResults({
            'tests': {
                'one': {
                    'crash.html': {
                        'expected': 'PASS',
                        'actual': 'CRASH',
                        'is_unexpected': True
                    },
                    'expected-fail.html': {
                        'expected': 'FAIL',
                        'actual': 'IMAGE+TEXT'
                    },
                    'flaky-fail.html': {
                        'expected': 'PASS',
                        'actual': 'PASS TEXT',
                        'is_unexpected': True
                    },
                    'missing.html': {
                        'expected': 'PASS',
                        'actual': 'MISSING',
                        'is_unexpected': True
                    },
                    'slow-fail.html': {
                        'expected': 'SLOW',
                        'actual': 'TEXT',
                        'is_unexpected': True
                    },
                    'text-fail.html': {
                        'expected': 'PASS',
                        'actual': 'TEXT',
                        'is_unexpected': True
                    },
                    'unexpected-pass.html': {
                        'expected': 'FAIL',
                        'actual': 'PASS',
                        'is_unexpected': True
                    },
                },
                'two': {
                    'image-fail.html': {
                        'expected': 'PASS',
                        'actual': 'IMAGE',
                        'is_unexpected': True
                    }
                },
            },
        })

        for build in builds:
            self.tool.buildbot.set_results(build, layout_test_results)
            self.tool.buildbot.set_retry_sumary_json(
                build,
                json.dumps({
                    'failures': [
                        'one/flaky-fail.html',
                        'one/missing.html',
                        'one/slow-fail.html',
                        'one/text-fail.html',
                        'two/image-fail.html',
                    ],
                    'ignored': [],
                }))

        # Write to the mock filesystem so that these tests are considered to exist.
        tests = [
            'one/flaky-fail.html',
            'one/missing.html',
            'one/slow-fail.html',
            'one/text-fail.html',
            'two/image-fail.html',
        ]
        for test in tests:
            self._write(
                self.mac_port.host.filesystem.join(
                    self.mac_port.layout_tests_dir(), test), 'contents')
class WPTExpectationsUpdater(object):
    def __init__(self, host):
        self.host = host
        self.port = self.host.port_factory.get()
        self.git_cl = GitCL(host)
        self.finder = PathFinder(self.host.filesystem)
        self.ports_with_no_results = set()
        self.ports_with_all_pass = set()

    def run(self, args=None):
        """Downloads text new baselines and adds test expectations lines."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        issue_number = self.get_issue_number()
        if issue_number == 'None':
            _log.error('No issue on current branch.')
            return 1

        build_to_status = self.get_latest_try_jobs()
        _log.debug('Latest try jobs: %r', build_to_status)
        if not build_to_status:
            _log.error('No try job information was collected.')
            return 1

        # The manifest may be used below to do check which tests are reference tests.
        WPTManifest.ensure_manifest(self.host)

        # Here we build up a dict of failing test results for all platforms.
        test_expectations = {}
        for build, job_status in build_to_status.iteritems():
            if job_status.result == 'SUCCESS':
                self.ports_with_all_pass.add(self.port_name(build))

            port_results = self.get_failing_results_dict(build)
            test_expectations = self.merge_dicts(test_expectations,
                                                 port_results)

        # And then we merge results for different platforms that had the same results.
        for test_name, platform_result in test_expectations.iteritems():
            # platform_result is a dict mapping platforms to results.
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        test_expectations = self.download_text_baselines(test_expectations)
        test_expectation_lines = self.create_line_list(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return 0

    def get_issue_number(self):
        """Returns current CL number. Can be replaced in unit tests."""
        return self.git_cl.get_issue_number()

    def get_latest_try_jobs(self):
        """Returns the latest finished try jobs as Build objects."""
        return self.git_cl.latest_try_jobs(self._get_try_bots())

    def get_failing_results_dict(self, build):
        """Returns a nested dict of failing test results.

        Retrieves a full list of layout test results from a builder result URL.
        Collects the builder name, platform and a list of tests that did not
        run as expected.

        Args:
            build: A Build object.

        Returns:
            A dictionary with the structure: {
                test-with-failing-result: {
                    'full-port-name': {
                        'expected': 'TIMEOUT',
                        'actual': 'CRASH',
                        'bug': 'crbug.com/11111'
                    }
                }
            }
            If results could be fetched but none are failing,
            this will return an empty dictionary.
        """
        port_name = self.port_name(build)
        if port_name in self.ports_with_all_pass:
            # All tests passed, so there should be no failing results.
            return {}
        layout_test_results = self.host.buildbot.fetch_results(build)
        if layout_test_results is None:
            _log.warning('No results for build %s', build)
            self.ports_with_no_results.add(self.port_name(build))
            return {}
        test_results = [
            result
            for result in layout_test_results.didnt_run_as_expected_results()
            if not result.did_pass()
        ]
        return self.generate_results_dict(self.port_name(build), test_results)

    @memoized
    def port_name(self, build):
        return self.host.builders.port_name_for_builder_name(
            build.builder_name)

    def generate_results_dict(self, full_port_name, test_results):
        """Makes a dict with results for one platform.

        Args:
            full_port_name: The fully-qualified port name, e.g. "win-win10".
            test_results: A list of LayoutTestResult objects.

        Returns:
            A dict mapping the full port name to a dict with the results for
            the given test and platform.
        """
        test_dict = {}
        for result in test_results:
            test_name = result.test_name()

            if not self.port.is_wpt_test(test_name):
                continue

            test_dict[test_name] = {
                full_port_name: {
                    'expected': result.expected_results(),
                    'actual': result.actual_results(),
                    'bug': 'crbug.com/626703'
                }
            }
        return test_dict

    def merge_dicts(self, target, source, path=None):
        """Recursively merges nested dictionaries.

        Args:
            target: First dictionary, which is updated based on source.
            source: Second dictionary, not modified.
            path: A list of keys, only used for making error messages.

        Returns:
            The updated target dictionary.
        """
        path = path or []
        for key in source:
            if key in target:
                if (isinstance(target[key], dict)) and isinstance(
                        source[key], dict):
                    self.merge_dicts(target[key], source[key],
                                     path + [str(key)])
                elif target[key] == source[key]:
                    pass
                else:
                    raise ValueError(
                        'The key: %s already exist in the target dictionary.' %
                        '.'.join(path))
            else:
                target[key] = source[key]
        return target

    def merge_same_valued_keys(self, dictionary):
        """Merges keys in dictionary with same value.

        Traverses through a dict and compares the values of keys to one another.
        If the values match, the keys are combined to a tuple and the previous
        keys are removed from the dict.

        Args:
            dictionary: A dictionary with a dictionary as the value.

        Returns:
            A new dictionary with updated keys to reflect matching values of keys.
            Example: {
                'one': {'foo': 'bar'},
                'two': {'foo': 'bar'},
                'three': {'foo': 'bar'}
            }
            is converted to a new dictionary with that contains
            {('one', 'two', 'three'): {'foo': 'bar'}}
        """
        merged_dict = {}
        matching_value_keys = set()
        keys = sorted(dictionary.keys())
        while keys:
            current_key = keys[0]
            found_match = False
            if current_key == keys[-1]:
                merged_dict[current_key] = dictionary[current_key]
                keys.remove(current_key)
                break

            for next_item in keys[1:]:
                if dictionary[current_key] == dictionary[next_item]:
                    found_match = True
                    matching_value_keys.update([current_key, next_item])

                if next_item == keys[-1]:
                    if found_match:
                        merged_dict[tuple(
                            matching_value_keys)] = dictionary[current_key]
                        keys = [
                            k for k in keys if k not in matching_value_keys
                        ]
                    else:
                        merged_dict[current_key] = dictionary[current_key]
                        keys.remove(current_key)
            matching_value_keys = set()
        return merged_dict

    def get_expectations(self, results, test_name=''):
        """Returns a set of test expectations to use based on results.

        Returns a set of one or more test expectations based on the expected
        and actual results of a given test name. This function is to decide
        expectations for tests that could not be rebaselined.

        Args:
            results: A dictionary that maps one test to its results. Example:
                {
                    'test_name': {
                        'expected': 'PASS',
                        'actual': 'FAIL',
                        'bug': 'crbug.com/11111'
                    }
                }
            test_name: The test name string (optional).

        Returns:
            A set of one or more test expectation strings with the first letter
            capitalized. Example: set(['Failure', 'Timeout']).
        """
        # If the result is MISSING, this implies that the test was not
        # rebaselined and has an actual result but no baseline. We can't
        # add a Missing expectation (this is not allowed), but no other
        # expectation is correct.
        # We also want to skip any new manual tests that are not automated;
        # see crbug.com/708241 for context.
        if (results['actual'] == 'MISSING'
                or '-manual.' in test_name and results['actual'] == 'TIMEOUT'):
            return {'Skip'}
        expectations = set()
        failure_types = ('TEXT', 'IMAGE+TEXT', 'IMAGE', 'AUDIO')
        other_types = ('TIMEOUT', 'CRASH', 'PASS')
        for actual in results['actual'].split():
            if actual in failure_types:
                expectations.add('Failure')
            if actual in other_types:
                expectations.add(actual.capitalize())
        return expectations

    def create_line_list(self, merged_results):
        """Creates list of test expectations lines.

        Traverses through the given |merged_results| dictionary and parses the
        value to create one test expectations line per key.

        Args:
            merged_results: A dictionary with the format:
                {
                    'test_name': {
                        'platform': {
                            'expected: 'PASS',
                            'actual': 'FAIL',
                            'bug': 'crbug.com/11111'
                        }
                    }
                }

        Returns:
            A list of test expectations lines with the format:
            ['BUG_URL [PLATFORM(S)] TEST_NAME [EXPECTATION(S)]']
        """
        line_list = []
        for test_name, port_results in sorted(merged_results.iteritems()):
            if not self.port.is_wpt_test(test_name):
                _log.warning(
                    'Non-WPT test "%s" unexpectedly passed to create_line_list.',
                    test_name)
                continue
            for port_names, results in sorted(port_results.iteritems()):
                line_list.append(
                    self._create_line(test_name, port_names, results))
        return line_list

    def _create_line(self, test_name, port_names, results):
        """Constructs and returns a test expectation line string."""
        port_names = self.tuple_or_value_to_list(port_names)

        # The set of ports with no results is assumed to have have no
        # overlap with the set of port names passed in here.
        assert (set(port_names) & self.ports_with_no_results) == set()

        # The ports with no results are generally ports of builders that
        # failed, maybe for unrelated reasons. At this point, we add ports
        # with no results to the list of platforms because we're guessing
        # that this new expectation might be cross-platform and should
        # also apply to any ports that we weren't able to get results for.
        port_names.extend(self.ports_with_no_results)

        specifier_part = self.specifier_part(port_names, test_name)

        line_parts = [results['bug']]
        if specifier_part:
            line_parts.append(specifier_part)
        line_parts.append(test_name)
        line_parts.append('[ %s ]' %
                          ' '.join(self.get_expectations(results, test_name)))

        return ' '.join(line_parts)

    def specifier_part(self, port_names, test_name):
        """Returns the specifier part for a new test expectations line.

        Args:
            port_names: A list of full port names that the line should apply to.
            test_name: The test name for the expectation line.

        Returns:
            The specifier part of the new expectation line, e.g. "[ Mac ]".
            This will be an empty string if the line should apply to all platforms.
        """
        specifiers = []
        for name in sorted(port_names):
            specifiers.append(
                self.host.builders.version_specifier_for_port_name(name))

        specifiers.extend(self.skipped_specifiers(test_name))
        specifiers = self.simplify_specifiers(
            specifiers, self.port.configuration_specifier_macros())
        if not specifiers:
            return ''
        return '[ %s ]' % ' '.join(specifiers)

    @staticmethod
    def tuple_or_value_to_list(tuple_or_value):
        """Converts a tuple to a list, and a string value to a one-item list."""
        if isinstance(tuple_or_value, tuple):
            return list(tuple_or_value)
        return [tuple_or_value]

    def skipped_specifiers(self, test_name):
        """Returns a list of platform specifiers for which the test is skipped."""
        specifiers = []
        for port in self.all_try_builder_ports():
            if port.skips_test(test_name):
                specifiers.append(
                    self.host.builders.version_specifier_for_port_name(
                        port.name()))
        return specifiers

    @memoized
    def all_try_builder_ports(self):
        """Returns a list of Port objects for all try builders."""
        return [
            self.host.port_factory.get_from_builder_name(name)
            for name in self._get_try_bots()
        ]

    @staticmethod
    def simplify_specifiers(specifiers, configuration_specifier_macros):  # pylint: disable=unused-argument
        """Converts some collection of specifiers to an equivalent and maybe shorter list.

        The input strings are all case-insensitive, but the strings in the
        return value will all be capitalized.

        Args:
            specifiers: A collection of lower-case specifiers.
            configuration_specifier_macros: A dict mapping "macros" for
                groups of specifiers to lists of specific specifiers. In
                practice, this is a dict mapping operating systems to
                supported versions, e.g. {"win": ["win7", "win10"]}.

        Returns:
            A shortened list of specifiers. For example, ["win7", "win10"]
            would be converted to ["Win"]. If the given list covers all
            supported platforms, then an empty list is returned.
            This list will be sorted and have capitalized specifier strings.
        """
        specifiers = {specifier.lower() for specifier in specifiers}
        for macro_specifier, version_specifiers in configuration_specifier_macros.iteritems(
        ):
            macro_specifier = macro_specifier.lower()
            version_specifiers = {
                specifier.lower()
                for specifier in version_specifiers
            }
            if version_specifiers.issubset(specifiers):
                specifiers -= version_specifiers
                specifiers.add(macro_specifier)
        if specifiers == {
                macro.lower()
                for macro in configuration_specifier_macros.keys()
        }:
            return []
        return sorted(specifier.capitalize() for specifier in specifiers)

    def write_to_test_expectations(self, line_list):
        """Writes the given lines to the TestExpectations file.

        The place in the file where the new lines are inserted is after a marker
        comment line. If this marker comment line is not found, then everything
        including the marker line is appended to the end of the file.

        Args:
            line_list: A list of lines to add to the TestExpectations file.
        """
        if not line_list:
            _log.info('No lines to write to TestExpectations.')
            return
        _log.info('Lines to write to TestExpectations:')
        for line in line_list:
            _log.info('  %s', line)

        expectations_file_path = self.port.path_to_generic_test_expectations_file(
        )
        file_contents = self.host.filesystem.read_text_file(
            expectations_file_path)

        line_list = [
            line for line in line_list if
            self._test_name_from_expectation_string(line) not in file_contents
        ]
        if not line_list:
            return

        marker_comment_index = file_contents.find(MARKER_COMMENT)
        if marker_comment_index == -1:
            file_contents += '\n%s\n' % MARKER_COMMENT
            file_contents += '\n'.join(line_list)
        else:
            end_of_marker_line = (file_contents[marker_comment_index:].find(
                '\n')) + marker_comment_index
            file_contents = file_contents[:end_of_marker_line + 1] + '\n'.join(
                line_list) + file_contents[end_of_marker_line:]

        self.host.filesystem.write_text_file(expectations_file_path,
                                             file_contents)

    @staticmethod
    def _test_name_from_expectation_string(expectation_string):
        return TestExpectationLine.tokenize_line(
            filename='', expectation_string=expectation_string,
            line_number=0).name

    def download_text_baselines(self, test_results):
        """Fetches new baseline files for tests that should be rebaselined.

        Invokes `webkit-patch rebaseline-cl` in order to download new baselines
        (-expected.txt files) for testharness.js tests that did not crash or
        time out. Then, the platform-specific test is removed from the overall
        failure test dictionary and the resulting dictionary is returned.

        Args:
            test_results: A dict mapping test name to platform to test results.

        Returns:
            An updated test_results dictionary which should only contain
            test failures for tests that couldn't be rebaselined.
        """
        tests_to_rebaseline, test_results = self.get_tests_to_rebaseline(
            test_results)
        if not tests_to_rebaseline:
            _log.info('No tests to rebaseline.')
            return test_results
        _log.info('Tests to rebaseline:')
        for test in tests_to_rebaseline:
            _log.info('  %s', test)

        webkit_patch = self.finder.path_from_tools_scripts('webkit-patch')
        self.host.executive.run_command([
            'python',
            webkit_patch,
            'rebaseline-cl',
            '--verbose',
            '--no-trigger-jobs',
            '--fill-missing',
        ] + tests_to_rebaseline)
        return test_results

    def get_tests_to_rebaseline(self, test_results):
        """Returns a list of tests to download new baselines for.

        Creates a list of tests to rebaseline depending on the tests' platform-
        specific results. In general, this will be non-ref tests that failed
        due to a baseline mismatch (rather than crash or timeout).

        Args:
            test_results: A dictionary of failing test results, mapping tests
                to platforms to result dicts.

        Returns:
            A pair: A set of tests to be rebaselined, and a modified copy of
            the test results dictionary. The tests to be rebaselined should
            include testharness.js tests that failed due to a baseline mismatch.
        """
        new_test_results = copy.deepcopy(test_results)
        tests_to_rebaseline = set()
        for test_path in test_results:
            for platform, result in test_results[test_path].iteritems():
                if self.can_rebaseline(test_path, result):
                    del new_test_results[test_path][platform]
                    tests_to_rebaseline.add(test_path)
        return sorted(tests_to_rebaseline), new_test_results

    def can_rebaseline(self, test_path, result):
        if self.is_reference_test(test_path):
            return False
        if result['actual'] in ('CRASH', 'TIMEOUT', 'MISSING'):
            return False
        return True

    def is_reference_test(self, test_path):
        """Checks whether a given file is a testharness.js test."""
        return bool(self.port.reference_files(test_path))

    def _get_try_bots(self):
        return self.host.builders.all_try_builder_names()
    def setUp(self):
        BaseTestCase.setUp(self)
        LoggingTestCase.setUp(self)

        builds = [
            Build('MOCK Try Win', 5000),
            Build('MOCK Try Mac', 4000),
            Build('MOCK Try Linux', 6000),
        ]

        git_cl = GitCL(self.tool)
        git_cl.get_issue_number = lambda: '11112222'
        git_cl.latest_try_jobs = lambda _: builds
        self.command.git_cl = lambda: git_cl

        git = MockGit(filesystem=self.tool.filesystem,
                      executive=self.tool.executive)
        git.changed_files = lambda **_: [
            'third_party/WebKit/LayoutTests/fast/dom/prototype-inheritance.html',
            'third_party/WebKit/LayoutTests/fast/dom/prototype-taco.html',
        ]
        self.tool.git = lambda: git

        self.tool.builders = BuilderList({
            'MOCK Try Win': {
                'port_name': 'test-win-win7',
                'specifiers': ['Win7', 'Release'],
                'is_try_builder': True,
            },
            'MOCK Try Linux': {
                'port_name': 'test-linux-trusty',
                'specifiers': ['Trusty', 'Release'],
                'is_try_builder': True,
            },
            'MOCK Try Mac': {
                'port_name': 'test-mac-mac10.11',
                'specifiers': ['Mac10.11', 'Release'],
                'is_try_builder': True,
            },
        })
        layout_test_results = LayoutTestResults({
            'tests': {
                'fast': {
                    'dom': {
                        'prototype-inheritance.html': {
                            'expected': 'PASS',
                            'actual': 'TEXT',
                            'is_unexpected': True,
                        },
                        'prototype-banana.html': {
                            'expected': 'FAIL',
                            'actual': 'PASS',
                            'is_unexpected': True,
                        },
                        'prototype-taco.html': {
                            'expected': 'PASS',
                            'actual': 'PASS TEXT',
                            'is_unexpected': True,
                        },
                        'prototype-chocolate.html': {
                            'expected': 'FAIL',
                            'actual': 'IMAGE+TEXT'
                        },
                        'prototype-crashy.html': {
                            'expected': 'PASS',
                            'actual': 'CRASH',
                            'is_unexpected': True,
                        },
                        'prototype-newtest.html': {
                            'expected': 'PASS',
                            'actual': 'MISSING',
                            'is_unexpected': True,
                            'is_missing_text': True,
                        },
                        'prototype-slowtest.html': {
                            'expected': 'SLOW',
                            'actual': 'TEXT',
                            'is_unexpected': True,
                        },
                    }
                },
                'svg': {
                    'dynamic-updates': {
                        'SVGFEDropShadowElement-dom-stdDeviation-attr.html': {
                            'expected': 'PASS',
                            'actual': 'IMAGE',
                            'has_stderr': True,
                            'is_unexpected': True,
                        }
                    }
                }
            }
        })

        for build in builds:
            self.tool.buildbot.set_results(build, layout_test_results)
            self.tool.buildbot.set_retry_sumary_json(
                build,
                json.dumps({
                    'failures': [
                        'fast/dom/prototype-inheritance.html',
                        'fast/dom/prototype-newtest.html',
                        'fast/dom/prototype-slowtest.html',
                        'fast/dom/prototype-taco.html',
                        'svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html',
                    ],
                    'ignored': [],
                }))

        # Write to the mock filesystem so that these tests are considered to exist.
        tests = [
            'fast/dom/prototype-taco.html',
            'fast/dom/prototype-inheritance.html',
            'fast/dom/prototype-newtest.html',
            'svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html',
        ]
        for test in tests:
            self._write(
                self.mac_port.host.filesystem.join(
                    self.mac_port.layout_tests_dir(), test), 'contents')