Exemplo n.º 1
0
    def test_get_failing_results_dict_webdriver_failing_results_(self):
        host = self.mock_host()
        host.results_fetcher.set_results(
            Build('MOCK Try Trusty', 123),
            WebTestResults({
                'tests': {
                    'external': {
                        'wpt': {
                            'x': {
                                'failing-test.html': {
                                    'expected': 'PASS',
                                    'actual': 'IMAGE',
                                    'is_unexpected': True,
                                },
                            },
                        },
                    },
                },
            }))

        host.results_fetcher.set_webdriver_test_results(
            Build('MOCK Try Trusty', 123), "tryserver.blink",
            WebTestResults({
                'tests': {
                    'external': {
                        'wpt': {
                            'y': {
                                'webdriver-fail.html': {
                                    'expected': 'PASS',
                                    'actual': 'FAIL',
                                    'is_unexpected': True,
                                },
                            },
                        },
                    },
                },
            }))
        updater = WPTExpectationsUpdater(host)
        results = updater.get_failing_results_dicts(
            Build('MOCK Try Trusty', 123))
        self.assertEqual(len(results), 2)
        self.assertEqual(results, [{
            'external/wpt/x/failing-test.html': {
                DesktopConfig('test-linux-trusty'):
                SimpleTestResult(
                    actual='IMAGE',
                    expected='PASS',
                    bug='crbug.com/626703',
                ),
            }
        }, {
            'external/wpt/y/webdriver-fail.html': {
                DesktopConfig('test-linux-trusty'):
                SimpleTestResult(
                    actual='FAIL',
                    expected='PASS',
                    bug='crbug.com/626703',
                ),
            },
        }])
Exemplo n.º 2
0
    def _build(result_dict):
        """Converts a parsed try result dict to a Build object."""
        builder_name = result_dict['builder_name']
        url = result_dict['url']
        if url is None:
            return Build(builder_name, None)

        # LUCI jobs
        # TODO(martiniss): Switch to using build number once `git cl
        # try-results` uses buildbucket v2 API.
        tags = result_dict.get('tags', [])
        for tag in tags:
            if tag.startswith("build_address:"):
                build_number = tag.split('/')[-1]
                return Build(builder_name, int(build_number))

        # BuildBot jobs
        match = re.match(r'.*/builds/(\d+)/?$', url)
        if match:
            build_number = match.group(1)
            return Build(builder_name, int(build_number))

        # Swarming tasks
        match = re.match(r'.*/task/([0-9a-f]+)(/?|\?.*)$', url)
        assert match, '%s did not match expected format' % url
        task_id = match.group(1)
        return Build(builder_name, task_id)
Exemplo n.º 3
0
    def test_rebaseline_test_passes_unexpectedly_but_on_another_port(self):
        # Similar to test_rebaseline_test_passes_unexpectedly, except that the
        # build was run on a different port than the port we are rebaselining
        # (possible when rebaseline-cl --fill-missing), in which case we don't
        # update the expectations.
        self._write(
            self.test_expectations_path,
            '# results: [ Failure ]\nuserscripts/all-pass.html [ Failure ]\n')
        self._write('userscripts/all-pass.html', 'Dummy test contents')
        test_baseline_set = TestBaselineSet(self.tool)
        self.tool.results_fetcher.set_results(
            Build('MOCK Mac10.11'),
            WebTestResults({
                'tests': {
                    'userscripts': {
                        'all-pass.html': {
                            'expected': 'FAIL',
                            'actual': 'PASS',
                            'is_unexpected': True
                        }
                    }
                }
            }))
        test_baseline_set.add('userscripts/all-pass.html',
                              Build('MOCK Mac10.11'), 'MOCK Mac10.10')

        self.command.rebaseline(self.options(), test_baseline_set)

        new_expectations = self._read(self.test_expectations_path)
        self.assertMultiLineEqual(
            new_expectations,
            '# results: [ Failure ]\nuserscripts/all-pass.html [ Failure ]\n')
        self.assertEqual(self.tool.executive.calls, [])
Exemplo n.º 4
0
 def test_get_failing_results_dict_some_failing_results(self):
     host = self.mock_host()
     host.results_fetcher.set_results(
         Build('MOCK Try Mac10.10', 123),
         WebTestResults({
             'tests': {
                 'external': {
                     'wpt': {
                         'x': {
                             'failing-test.html': {
                                 'expected': 'PASS',
                                 'actual': 'IMAGE',
                                 'is_unexpected': True,
                             },
                         },
                     },
                 },
             },
         }))
     updater = WPTExpectationsUpdater(host)
     results_dict = updater.get_failing_results_dict(
         Build('MOCK Try Mac10.10', 123))
     self.assertEqual(
         results_dict, {
             'external/wpt/x/failing-test.html': {
                 'test-mac-mac10.10':
                 SimpleTestResult(
                     actual='IMAGE',
                     expected='PASS',
                     bug='crbug.com/626703',
                 ),
             },
         })
Exemplo n.º 5
0
 def test_run_commit_queue_for_cl_pass(self):
     host = MockHost()
     host.filesystem.write_text_file(
         MOCK_WEB_TESTS + 'W3CImportExpectations', '')
     importer = TestImporter(host)
     # Only the latest job for each builder is counted.
     importer.git_cl = MockGitCL(host,
                                 status='lgtm',
                                 try_job_results={
                                     Build('cq-builder-a', 120):
                                     TryJobStatus('COMPLETED', 'FAILURE'),
                                     Build('cq-builder-a', 123):
                                     TryJobStatus('COMPLETED', 'SUCCESS'),
                                 })
     success = importer.run_commit_queue_for_cl()
     self.assertTrue(success)
     self.assertLog([
         'INFO: Triggering CQ try jobs.\n',
         'INFO: All jobs finished.\n',
         'INFO: CQ appears to have passed; trying to commit.\n',
         'INFO: Update completed.\n',
     ])
     self.assertEqual(importer.git_cl.calls, [
         ['git', 'cl', 'try'],
         ['git', 'cl', 'upload', '-f', '--send-mail'],
         ['git', 'cl', 'set-commit'],
     ])
Exemplo n.º 6
0
    def test_rebaseline_keeps_flaky_expectations(self):
        # Flaky expectations should be kept even if the test passes.
        self._write(
            self.test_expectations_path,
            '# results: [ Pass Failure ]\nuserscripts/flaky-test.html [ Pass Failure ]\n'
        )
        self._write('userscripts/flaky-test.html', 'Dummy test contents')
        self.tool.results_fetcher.set_results(
            Build('MOCK Mac10.11'),
            WebTestResults({
                'tests': {
                    'userscripts': {
                        'flaky-test.html': {
                            'expected': 'PASS FAIL',
                            'actual': 'PASS',
                        }
                    }
                }
            }))
        test_baseline_set = TestBaselineSet(self.tool)
        test_baseline_set.add('userscripts/flaky-test.html',
                              Build('MOCK Mac10.11'))

        self.command.rebaseline(self.options(), test_baseline_set)

        new_expectations = self._read(self.test_expectations_path)
        self.assertMultiLineEqual(
            new_expectations,
            '# results: [ Pass Failure ]\nuserscripts/flaky-test.html [ Pass Failure ]\n'
        )
        self.assertEqual(self.tool.executive.calls, [])
Exemplo n.º 7
0
 def test_latest_try_jobs_failures(self):
     git_cl = GitCL(MockHost())
     git_cl._host.web = MockWeb(responses=[{
         'status_code':
         200,
         'body':
         SEARCHBUILDS_RESPONSE_PREFIX + b"""{
                 "builds": [
                     {
                         "status": "FAILURE",
                         "builder": {
                             "builder": "builder-a"
                         },
                         "number": 100
                     },
                     {
                         "status": "INFRA_FAILURE",
                         "builder": {
                             "builder": "builder-b"
                         },
                         "number": 200
                     }
                 ]
             }"""
     }])
     self.assertEqual(
         git_cl.latest_try_jobs(builder_names=['builder-a', 'builder-b']), {
             Build('builder-a', 100): TryJobStatus('COMPLETED', 'FAILURE'),
             Build('builder-b', 200): TryJobStatus('COMPLETED', 'FAILURE'),
         })
Exemplo n.º 8
0
 def test_run_commit_queue_for_cl_fail_cq(self):
     host = self.mock_host()
     host.filesystem.write_text_file(
         MOCK_WEB_TESTS + 'W3CImportExpectations', '')
     importer = self._get_test_importer(host)
     importer.git_cl = MockGitCL(host,
                                 status='lgtm',
                                 try_job_results={
                                     Build('cq-builder-a', 120):
                                     TryJobStatus('COMPLETED', 'SUCCESS'),
                                     Build('cq-builder-a', 123):
                                     TryJobStatus('COMPLETED', 'FAILURE'),
                                     Build('cq-builder-b', 200):
                                     TryJobStatus('COMPLETED', 'SUCCESS'),
                                 })
     importer.fetch_new_expectations_and_baselines = lambda: None
     success = importer.run_commit_queue_for_cl()
     self.assertFalse(success)
     self.assertLog([
         'INFO: Triggering CQ try jobs.\n',
         'INFO: All jobs finished.\n',
         'ERROR: CQ appears to have failed; aborting.\n',
     ])
     self.assertEqual(importer.git_cl.calls, [
         ['git', 'cl', 'try'],
         ['git', 'cl', 'set-close'],
     ])
Exemplo n.º 9
0
 def test_run_commit_queue_for_cl_fail_to_land(self):
     host = self.mock_host()
     host.filesystem.write_text_file(
         MOCK_WEB_TESTS + 'W3CImportExpectations', '')
     importer = self._get_test_importer(host)
     # Only the latest job for each builder is counted.
     importer.git_cl = MockGitCL(host,
                                 status='lgtm',
                                 try_job_results={
                                     Build('cq-builder-a', 120):
                                     TryJobStatus('COMPLETED', 'FAILURE'),
                                     Build('cq-builder-a', 123):
                                     TryJobStatus('COMPLETED', 'SUCCESS'),
                                 })
     importer.git_cl.wait_for_closed_status = lambda: False
     success = importer.run_commit_queue_for_cl()
     self.assertFalse(success)
     self.assertLog([
         'INFO: Triggering CQ try jobs.\n',
         'INFO: All jobs finished.\n',
         'INFO: CQ appears to have passed; trying to commit.\n',
         'ERROR: Cannot submit CL; aborting.\n',
     ])
     self.assertEqual(importer.git_cl.calls, [
         ['git', 'cl', 'try'],
         ['git', 'cl', 'upload', '-f', '--send-mail'],
         ['git', 'cl', 'set-commit'],
         ['git', 'cl', 'set-close'],
     ])
Exemplo n.º 10
0
 def test_filter_latest_jobs_higher_build_last(self):
     self.assertEqual(
         filter_latest_builds(
             [Build('foo', 3),
              Build('bar', 5),
              Build('foo', 5)]),
         [Build('bar', 5), Build('foo', 5)])
Exemplo n.º 11
0
 def test_get_failing_results_dict_no_results(self):
     host = self.mock_host()
     host.results_fetcher = MockTestResultsFetcher()
     host.results_fetcher.set_results(Build('MOCK Try Mac10.10', 123), None)
     updater = WPTExpectationsUpdater(host)
     self.assertEqual(
         updater.get_failing_results_dict(Build('MOCK Try Mac10.10', 123)), {})
Exemplo n.º 12
0
    def test_rebaseline_test_passes_unexpectedly(self):
        # The test passes without any output. Its expectation should be updated
        # without calling rebaseline-test-internal.
        self._write(self.test_expectations_path,
                    'Bug(foo) userscripts/all-pass.html [ Failure ]\n')
        self._write('userscripts/all-pass.html', 'Dummy test contents')
        test_baseline_set = TestBaselineSet(self.tool)
        self.tool.results_fetcher.set_results(
            Build('MOCK Mac10.11'),
            WebTestResults({
                'tests': {
                    'userscripts': {
                        'all-pass.html': {
                            'expected': 'FAIL',
                            'actual': 'PASS',
                            'is_unexpected': True
                        }
                    }
                }
            }))
        test_baseline_set.add('userscripts/all-pass.html',
                              Build('MOCK Mac10.11'))

        self.command.rebaseline(self.options(), test_baseline_set)

        new_expectations = self._read(self.test_expectations_path)
        self.assertMultiLineEqual(
            new_expectations,
            'Bug(foo) [ Linux Mac10.10 Win ] userscripts/all-pass.html [ Failure ]\n'
        )
        self.assertEqual(self.tool.executive.calls, [])
Exemplo n.º 13
0
 def test_run_commit_queue_for_cl_pass(self):
     host = self.mock_host()
     host.filesystem.write_text_file(
         MOCK_WEB_TESTS + 'W3CImportExpectations', '')
     importer = self._get_test_importer(host)
     # Only the latest job for each builder is counted.
     importer.git_cl = MockGitCL(host,
                                 status='lgtm',
                                 try_job_results={
                                     Build('cq-builder-a', 120):
                                     TryJobStatus('COMPLETED', 'FAILURE'),
                                     Build('cq-builder-a', 123):
                                     TryJobStatus('COMPLETED', 'SUCCESS'),
                                 })
     success = importer.run_commit_queue_for_cl()
     self.assertTrue(success)
     self.assertLog([
         'INFO: Triggering CQ try jobs.\n',
         'INFO: All jobs finished.\n',
         'INFO: CQ appears to have passed; sending to the rubber-stamper '
         'bot for CR+1 and commit.\n',
         'INFO: If the rubber-stamper bot rejects the CL, you either need '
         'to modify the benign file patterns, or manually CR+1 and land the '
         'import yourself if it touches code files. See https://chromium.'
         'googlesource.com/infra/infra/+/refs/heads/master/go/src/infra/'
         'appengine/rubber-stamper/README.md\n',
         'INFO: Update completed.\n',
     ])
     self.assertEqual(importer.git_cl.calls, [
         ['git', 'cl', 'try'],
         [
             'git', 'cl', 'upload', '-f', '--send-mail',
             '--enable-auto-submit', '--reviewers', RUBBER_STAMPER_BOT
         ],
     ])
Exemplo n.º 14
0
    def test_rebaseline_test_passes_unexpectedly_everywhere(self):
        # Similar to test_rebaseline_test_passes_unexpectedly, except that the
        # test passes on all ports.
        self._write(
            self.test_expectations_path,
            '# results: [ Failure ]\nuserscripts/all-pass.html [ Failure ]\n')
        self._write('userscripts/all-pass.html', 'Dummy test contents')
        test_baseline_set = TestBaselineSet(self.tool)
        for builder in [
                'MOCK Win7', 'MOCK Win10', 'MOCK Mac10.10', 'MOCK Mac10.11',
                'MOCK Precise', 'MOCK Trusty'
        ]:
            self.tool.results_fetcher.set_results(
                Build(builder),
                WebTestResults({
                    'tests': {
                        'userscripts': {
                            'all-pass.html': {
                                'expected': 'FAIL',
                                'actual': 'PASS',
                                'is_unexpected': True
                            }
                        }
                    }
                }))
            test_baseline_set.add('userscripts/all-pass.html', Build(builder))

        self.command.rebaseline(self.options(), test_baseline_set)

        new_expectations = self._read(self.test_expectations_path)
        self.assertMultiLineEqual(new_expectations, '# results: [ Failure ]\n')
        self.assertEqual(self.tool.executive.calls, [])
Exemplo n.º 15
0
 def test_has_failing_try_results_only_success_and_started(self):
     self.assertFalse(
         GitCL.some_failed({
             Build('some-builder', 90):
             TryJobStatus('COMPLETED', 'SUCCESS'),
             Build('some-builder', 100):
             TryJobStatus('STARTED'),
         }))
Exemplo n.º 16
0
 def test_all_success_with_started_build(self):
     self.assertFalse(
         GitCL.all_success({
             Build('some-builder', 1):
             TryJobStatus('COMPLETED', 'SUCCESS'),
             Build('some-builder', 2):
             TryJobStatus('STARTED'),
         }))
Exemplo n.º 17
0
 def test_getters(self):
     test_baseline_set = TestBaselineSet(host=self.host)
     test_baseline_set.add('a/x.html', Build('MOCK Mac10.12'))
     test_baseline_set.add('a/x.html', Build('MOCK Win10'))
     self.assertEqual(test_baseline_set.test_prefixes(), ['a/x.html'])
     self.assertEqual(
         test_baseline_set.build_port_pairs('a/x.html'),
         [(Build(builder_name='MOCK Mac10.12'), 'test-mac-mac10.12'),
          (Build(builder_name='MOCK Win10'), 'test-win-win10')])
Exemplo n.º 18
0
 def test_str_basic(self):
     test_baseline_set = TestBaselineSet(host=self.host)
     test_baseline_set.add('a/x.html', Build('MOCK Mac10.12'))
     test_baseline_set.add('a/x.html', Build('MOCK Win10'))
     self.assertEqual(str(test_baseline_set), (
         '<TestBaselineSet with:\n'
         '  a/x.html: Build(builder_name=\'MOCK Mac10.12\', build_number=None, build_id=None), test-mac-mac10.12\n'
         '  a/x.html: Build(builder_name=\'MOCK Win10\', build_number=None, build_id=None), test-win-win10>'
     ))
Exemplo n.º 19
0
 def __init__(self, *args, **kwargs):
     self.linux_build = Build('linux-rel', 100)
     self.mac_build = Build('mac-rel', 101)
     self.win_build = Build('win7-rel', 102)
     self.mock_try_results = {
         self.linux_build: TryJobStatus('COMPLETED', 'SUCCESS'),
         self.win_build: TryJobStatus('COMPLETED', 'SUCCESS'),
         self.mac_build: TryJobStatus('COMPLETED', 'SUCCESS')
     }
     super(TryFlagTest, self).__init__(*args, **kwargs)
Exemplo n.º 20
0
 def test_filter_latest(self):
     try_job_results = {
         Build('builder-a', 100): TryJobStatus('COMPLETED', 'FAILURE'),
         Build('builder-a', 200): TryJobStatus('COMPLETED', 'SUCCESS'),
         Build('builder-b', 50): TryJobStatus('SCHEDULED'),
     }
     self.assertEqual(
         GitCL.filter_latest(try_job_results), {
             Build('builder-a', 200): TryJobStatus('COMPLETED', 'SUCCESS'),
             Build('builder-b', 50): TryJobStatus('SCHEDULED'),
         })
Exemplo n.º 21
0
 def test_submit_cl_timeout_and_already_merged(self):
     # Here we simulate a case where we timeout waiting for the CQ to submit a
     # CL because we miss the notification that it was merged. We then get an
     # error when trying to close the CL because it's already been merged.
     host = self.mock_host()
     host.filesystem.write_text_file(
         MOCK_WEB_TESTS + 'W3CImportExpectations', '')
     importer = self._get_test_importer(host)
     # Define some error text that looks like a typical ScriptError.
     git_error_text = (
         'This is a git Script Error\n'
         '...there is usually a stack trace here with some calls\n'
         '...and maybe other calls\n'
         'And finally, there is the exception:\n'
         'GerritError: Conflict: change is merged\n')
     importer.git_cl = MockGitCL(
         host,
         status='lgtm',
         git_error_output={'set-close': git_error_text},
         # Only the latest job for each builder is counted.
         try_job_results={
             Build('cq-builder-a', 120):
             TryJobStatus('COMPLETED', 'FAILURE'),
             Build('cq-builder-a', 123):
             TryJobStatus('COMPLETED', 'SUCCESS')
         })
     importer._need_sheriff_attention = lambda: False
     importer.git_cl.wait_for_closed_status = lambda timeout_seconds: False
     success = importer.run_commit_queue_for_cl()
     # Since the CL is already merged, we absorb the error and treat it as success.
     self.assertTrue(success)
     self.assertLog([
         'INFO: Triggering CQ try jobs.\n',
         'INFO: All jobs finished.\n',
         'INFO: CQ appears to have passed; sending to the rubber-stamper '
         'bot for CR+1 and commit.\n',
         'INFO: If the rubber-stamper bot rejects the CL, you either need '
         'to modify the benign file patterns, or manually CR+1 and land the '
         'import yourself if it touches code files. See https://chromium.'
         'googlesource.com/infra/infra/+/refs/heads/main/go/src/infra/'
         'appengine/rubber-stamper/README.md\n',
         'ERROR: Cannot submit CL; aborting.\n',
         'ERROR: CL is already merged; treating as success.\n',
     ])
     self.assertEqual(importer.git_cl.calls, [
         ['git', 'cl', 'try'],
         [
             'git', 'cl', 'upload', '-f', '--send-mail',
             '--enable-auto-submit', '--reviewers', RUBBER_STAMPER_BOT
         ],
         ['git', 'cl', 'set-close'],
     ])
Exemplo n.º 22
0
 def test_non_prefix_mode(self):
     test_baseline_set = TestBaselineSet(host=self.host, prefix_mode=False)
     # This test does not exist in setUp.
     test_baseline_set.add('wpt/foo.html', Build('some-wpt-bot'))
     # But it should still appear in various getters since no test lookup is
     # done when prefix_mode=False.
     self.assertEqual(
         list(test_baseline_set),
         [('wpt/foo.html', Build('some-wpt-bot'), 'linux-trusty')])
     self.assertEqual(test_baseline_set.all_tests(), ['wpt/foo.html'])
     self.assertEqual(test_baseline_set.test_prefixes(), ['wpt/foo.html'])
     self.assertEqual(test_baseline_set.build_port_pairs('wpt/foo.html'),
                      [(Build('some-wpt-bot'), 'linux-trusty')])
Exemplo n.º 23
0
    def test_run_single_platform_failure(self):
        """Tests the main run method in a case where one test fails on one platform."""
        host = self.mock_host()

        # Fill in an initial value for TestExpectations
        expectations_path = host.port_factory.get(
        ).path_to_generic_test_expectations_file()
        host.filesystem.write_text_file(expectations_path,
                                        MARKER_COMMENT + '\n')

        # Set up fake try job results.
        updater = WPTExpectationsUpdater(host)
        updater.git_cl = MockGitCL(
            updater.host, {
                Build('MOCK Try Mac10.10', 333):
                TryJobStatus('COMPLETED', 'FAILURE'),
                Build('MOCK Try Mac10.11', 111):
                TryJobStatus('COMPLETED', 'SUCCESS'),
                Build('MOCK Try Trusty', 222):
                TryJobStatus('COMPLETED', 'SUCCESS'),
                Build('MOCK Try Precise', 333):
                TryJobStatus('COMPLETED', 'SUCCESS'),
                Build('MOCK Try Win10', 444):
                TryJobStatus('COMPLETED', 'SUCCESS'),
                Build('MOCK Try Win7', 555):
                TryJobStatus('COMPLETED', 'SUCCESS'),
            })

        # Set up failing results for one try bot. It shouldn't matter what
        # results are for the other builders since we shouldn't need to even
        # fetch results, since the try job status already tells us that all
        # of the tests passed.
        host.results_fetcher.set_results(
            Build('MOCK Try Mac10.10', 333),
            WebTestResults({
                'tests': {
                    'external': {
                        'wpt': {
                            'test': {
                                'path.html': {
                                    'expected': 'PASS',
                                    'actual': 'TIMEOUT',
                                    'is_unexpected': True,
                                }
                            }
                        }
                    }
                }
            }))
        self.assertEqual(0, updater.run(args=[]))

        # Results are only fetched for failing builds.
        self.assertEqual(host.results_fetcher.fetched_builds,
                         [Build('MOCK Try Mac10.10', 333)])

        self.assertEqual(
            host.filesystem.read_text_file(expectations_path),
            '# ====== New tests from wpt-importer added here ======\n'
            'crbug.com/626703 [ Mac10.10 ] external/wpt/test/path.html [ Timeout ]\n'
        )
Exemplo n.º 24
0
    def test_generic_baseline_paths(self):
        test_baseline_set = TestBaselineSet(self.tool)
        # Multiple ports shouldn't produce duplicate baseline paths.
        test_baseline_set.add('passes/text.html', Build('MOCK Win7'))
        test_baseline_set.add('passes/text.html', Build('MOCK Win10'))

        # pylint: disable=protected-access
        baseline_paths = self.command._generic_baseline_paths(
            test_baseline_set)
        self.assertEqual(baseline_paths, [
            '/test.checkout/wtests/passes/text-expected.png',
            '/test.checkout/wtests/passes/text-expected.txt',
            '/test.checkout/wtests/passes/text-expected.wav',
        ])
Exemplo n.º 25
0
    def test_rebaseline_keeps_skip_expectations(self):
        # [ Skip ] expectations should always be kept.
        self._write(self.test_expectations_path,
                    ('# tags: [ Mac Win ]\n'
                     '# results: [ Skip ]\n'
                     '[ Mac ] userscripts/skipped-test.html [ Skip ]\n'
                     '[ Win ] userscripts/skipped-test.html [ Skip ]\n'))
        self._write('userscripts/skipped-test.html', 'Dummy test contents')
        self.tool.results_fetcher.set_results(
            Build('MOCK Mac10.11'),
            WebTestResults({
                'tests': {
                    'userscripts': {
                        'skipped-test.html': {
                            'expected': 'SKIP',
                            'actual': 'SKIP',
                        }
                    }
                }
            }))
        self.tool.results_fetcher.set_results(
            Build('MOCK Win7'),
            WebTestResults({
                'tests': {
                    'userscripts': {
                        'skipped-test.html': {
                            'expected': 'SKIP',
                            'actual': 'SKIP',
                        }
                    }
                }
            }))
        test_baseline_set = TestBaselineSet(self.tool)
        test_baseline_set.add('userscripts/skipped-test.html',
                              Build('MOCK Mac10.11'))
        test_baseline_set.add('userscripts/skipped-test.html',
                              Build('MOCK Win7'))

        self.command.rebaseline(self.options(), test_baseline_set)

        new_expectations = self._read(self.test_expectations_path)
        self.assertMultiLineEqual(
            new_expectations,
            ('# tags: [ Mac Win ]\n'
             '# results: [ Skip ]\n'
             '[ Mac ] userscripts/skipped-test.html [ Skip ]\n'
             '[ Win ] userscripts/skipped-test.html [ Skip ]\n'))
        self.assertEqual(self.tool.executive.calls, [])
Exemplo n.º 26
0
 def try_job_results(self,
                     issue_number=None,
                     builder_names=None,
                     cq_only=False,
                     patchset=None):
     """Returns a dict mapping Build objects to TryJobStatus objects."""
     if not issue_number:
         issue_number = self.get_issue_number()
     raw_results_json = self.fetch_raw_try_job_results(
         issue_number, patchset)
     build_to_status = {}
     if 'builds' not in raw_results_json:
         return build_to_status
     for build in raw_results_json['builds']:
         builder_name = build['builder']['builder']
         if builder_names and builder_name not in builder_names:
             continue
         is_cq = 'tags' in build and {
             'key': 'user_agent',
             'value': 'cq'
         } in build['tags']
         is_experimental = 'tags' in build and {
             'key': 'cq_experimental',
             'value': 'true'
         } in build['tags']
         if cq_only and not (is_cq and not is_experimental):
             continue
         build_number = build.get('number')
         status = build['status']
         build_to_status[Build(
             builder_name,
             build_number)] = TryJobStatus.from_bb_status(status)
     return build_to_status
Exemplo n.º 27
0
    def test_rebaseline_handles_smoke_tests(self):
        # This test is just like test_rebaseline_handles_platform_skips, except that we check for
        # a test not being in the SmokeTests file, instead of using overrides files.
        # If a test is not part of the smoke tests, we count that as passing on ports that only
        # run smoke tests, and do not think that we still need to rebaseline it.
        self._write(
            self.test_expectations_path,
            '# tags: [ Linux Mac10.10 Win ]\n# results: [ Failure ]\nuserscripts/first-test.html [ Failure ]\n'
        )
        self._write('SmokeTests', 'fast/html/article-element.html')
        self._setup_mock_build_data()
        test_baseline_set = TestBaselineSet(self.tool)
        test_baseline_set.add('userscripts/first-test.html',
                              Build('MOCK Mac10.11'))

        self.command.rebaseline(self.options(), test_baseline_set)

        new_expectations = self._read(self.test_expectations_path)
        self.assertMultiLineEqual(
            new_expectations,
            ('# tags: [ Linux Mac10.10 Win ]\n'
             '# results: [ Failure ]\n'
             '[ Linux ] userscripts/first-test.html [ Failure ]\n'
             '[ Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
             '[ Win ] userscripts/first-test.html [ Failure ]\n'))
Exemplo n.º 28
0
    def test_rebaseline_handles_skips_in_file(self):
        # This test is like test_rebaseline_handles_platform_skips, except that the
        # Skip is in the same (generic) file rather than a platform file. In this case,
        # the Skip line should be left unmodified. Note that the first line is now
        # qualified as "[Linux Mac Win]"; if it was unqualified, it would conflict with
        # the second line.
        self._write(self.test_expectations_path,
                    ('# tags: [ Linux Mac Mac10.10 Win ]\n'
                     '# results: [ Failure Skip ]\n'
                     '[ Linux ] userscripts/first-test.html [ Failure ]\n'
                     '[ Mac ] userscripts/first-test.html [ Failure ]\n'
                     '[ Win ] userscripts/first-test.html [ Skip ]\n'))
        self._setup_mock_build_data()
        test_baseline_set = TestBaselineSet(self.tool)
        test_baseline_set.add('userscripts/first-test.html',
                              Build('MOCK Mac10.11'))

        self.command.rebaseline(self.options(), test_baseline_set)

        new_expectations = self._read(self.test_expectations_path)
        self.assertMultiLineEqual(
            new_expectations,
            ('# tags: [ Linux Mac Mac10.10 Win ]\n'
             '# results: [ Failure Skip ]\n'
             '[ Linux ] userscripts/first-test.html [ Failure ]\n'
             '[ Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
             '[ Win ] userscripts/first-test.html [ Skip ]\n'))
Exemplo n.º 29
0
    def test_rebaseline_handles_platform_skips(self):
        # This test is just like test_rebaseline_updates_expectations_file_all_platforms(),
        # except that if a particular port happens to SKIP a test in an overrides file,
        # we count that as passing, and do not think that we still need to rebaseline it.
        self._write(
            self.test_expectations_path,
            '# tags: [ Linux Mac10.10 Win ]\n# results: [ Failure ]\nuserscripts/first-test.html [ Failure ]\n'
        )
        self._write('NeverFixTests', ('# tags: [ Android ]\n'
                                      '# results: [ Skip ]\n'
                                      '[ Android ] userscripts [ Skip ]\n'))
        self._setup_mock_build_data()
        test_baseline_set = TestBaselineSet(self.tool)
        test_baseline_set.add('userscripts/first-test.html',
                              Build('MOCK Mac10.11'))

        self.command.rebaseline(self.options(), test_baseline_set)

        new_expectations = self._read(self.test_expectations_path)
        self.assertMultiLineEqual(
            new_expectations,
            ('# tags: [ Linux Mac10.10 Win ]\n'
             '# results: [ Failure ]\n'
             '[ Linux ] userscripts/first-test.html [ Failure ]\n'
             '[ Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
             '[ Win ] userscripts/first-test.html [ Failure ]\n'))
Exemplo n.º 30
0
 def _setup_mock_build_data(self):
     for builder in ['MOCK Win7', 'MOCK Win7 (dbg)', 'MOCK Mac10.11']:
         self.tool.results_fetcher.set_results(
             Build(builder),
             WebTestResults({
                 'tests': {
                     'userscripts': {
                         'first-test.html': {
                             'expected': 'PASS',
                             'actual': 'FAIL',
                             'is_unexpected': True,
                             'artifacts': {
                                 'actual_image': ['first-test-actual.png'],
                                 'expected_image':
                                 ['first-test-expected.png'],
                                 'actual_text': ['first-test-actual.txt'],
                                 'expected_text':
                                 ['first-test-expected.txt']
                             }
                         },
                         'second-test.html': {
                             'expected': 'FAIL',
                             'actual': 'FAIL',
                             'artifacts': {
                                 'actual_image': ['second-test-actual.png'],
                                 'expected_image':
                                 ['second-test-expected.png'],
                                 'actual_audio': ['second-test-actual.wav'],
                                 'expected_audio':
                                 ['second-test-expected.wav']
                             }
                         }
                     }
                 }
             }))