def test_run_no_try_results(self):
     updater = WPTExpectationsUpdater(self.mock_host())
     updater.git_cl = MockGitCL(updater.host, {})
     with self.assertRaises(ScriptError) as e:
         updater.run(args=[])
     self.assertEqual(e.exception.message,
                      'No try job information was collected.')
Esempio n. 2
0
 def fetch_new_expectations_and_baselines(self):
     """Adds new expectations and downloads baselines based on try job results, then commits and uploads the change."""
     _log.info('Adding test expectations lines to LayoutTests/TestExpectations.')
     expectation_updater = WPTExpectationsUpdater(self.host)
     expectation_updater.run(args=[])
     message = 'Update test expectations and baselines.'
     self.check_run(['git', 'commit', '-a', '-m', message])
     self.git_cl.run(['upload', '-t', message, '--gerrit'])
Esempio n. 3
0
    def fetch_new_expectations_and_baselines(self):
        """Modifies expectation lines and baselines based on try job results.

        Assuming that there are some try job results available, this
        adds new expectation lines to TestExpectations and downloads new
        baselines based on the try job results.

        This is the same as invoking the `wpt-update-expectations` script.
        """
        _log.info('Adding test expectations lines to LayoutTests/TestExpectations.')
        expectation_updater = WPTExpectationsUpdater(self.host)
        expectation_updater.run(args=[])
    def test_run_single_platform_failure(self):
        """Tests the main run method in a case where one test fails on one platform."""
        host = self.mock_host()

        # Fill in an initial value for TestExpectations
        expectations_path = host.port_factory.get(
        ).path_to_generic_test_expectations_file()
        host.filesystem.write_text_file(expectations_path,
                                        MARKER_COMMENT + '\n')

        # Set up fake try job results.
        updater = WPTExpectationsUpdater(host)
        updater.git_cl = MockGitCL(
            updater.host, {
                Build('MOCK Try Mac10.10', 333):
                TryJobStatus('COMPLETED', 'FAILURE'),
                Build('MOCK Try Mac10.11', 111):
                TryJobStatus('COMPLETED', 'SUCCESS'),
                Build('MOCK Try Trusty', 222):
                TryJobStatus('COMPLETED', 'SUCCESS'),
                Build('MOCK Try Precise', 333):
                TryJobStatus('COMPLETED', 'SUCCESS'),
                Build('MOCK Try Win10', 444):
                TryJobStatus('COMPLETED', 'SUCCESS'),
                Build('MOCK Try Win7', 555):
                TryJobStatus('COMPLETED', 'SUCCESS'),
            })

        # Set up failing results for one try bot. It shouldn't matter what
        # results are for the other builders since we shouldn't need to even
        # fetch results, since the try job status already tells us that all
        # of the tests passed.
        host.buildbot.set_results(
            Build('MOCK Try Mac10.10', 333),
            LayoutTestResults({
                'tests': {
                    'external': {
                        'wpt': {
                            'test': {
                                'path.html': {
                                    'expected': 'PASS',
                                    'actual': 'TIMEOUT',
                                    'is_unexpected': True,
                                }
                            }
                        }
                    }
                }
            }))
        self.assertEqual(0, updater.run(args=[]))

        # Results are only fetched for failing builds.
        self.assertEqual(host.buildbot.fetched_builds,
                         [Build('MOCK Try Mac10.10', 333)])

        self.assertEqual(
            host.filesystem.read_text_file(expectations_path),
            '# ====== New tests from wpt-importer added here ======\n'
            'crbug.com/626703 [ Mac10.10 ] external/wpt/test/path.html [ Timeout ]\n'
        )
Esempio n. 5
0
 def test_run_no_issue_number(self):
     # TODO(qyearsley): For testing: Consider making a MockGitCL class
     # and use that class to set fake return values when using git cl.
     updater = WPTExpectationsUpdater(self.mock_host())
     updater.get_issue_number = lambda: 'None'
     self.assertEqual(1, updater.run(args=[]))
     self.assertLog(['ERROR: No issue on current branch.\n'])
 def test_run_no_issue_number(self):
     updater = WPTExpectationsUpdater(self.mock_host())
     updater.git_cl = MockGitCL(updater.host, issue_number='None')
     with self.assertRaises(ScriptError) as e:
         updater.run(args=[])
     self.assertEqual(e.exception.message, 'No issue on current branch.')
Esempio n. 7
0
 def test_run_no_try_results(self):
     updater = WPTExpectationsUpdater(self.mock_host())
     updater.get_latest_try_jobs = lambda: []
     self.assertEqual(1, updater.run(args=[]))
     self.assertLog(['ERROR: No try job information was collected.\n'])
 def fetch_new_expectations_and_baselines(self):
     """Adds new expectations and downloads baselines based on try job results, then commits and uploads the change."""
     _log.info('Adding test expectations lines to LayoutTests/TestExpectations.')
     expectation_updater = WPTExpectationsUpdater(self.host)
     expectation_updater.run(args=[])
 def test_run_no_try_results(self):
     updater = WPTExpectationsUpdater(self.mock_host())
     updater.git_cl = MockGitCL(updater.host, {})
     self.assertEqual(1, updater.run(args=[]))
     self.assertLog(['ERROR: No try job information was collected.\n'])
 def test_run_no_issue_number(self):
     updater = WPTExpectationsUpdater(self.mock_host())
     updater.git_cl = MockGitCL(updater.host, issue_number='None')
     self.assertEqual(1, updater.run(args=[]))
     self.assertLog(['ERROR: No issue on current branch.\n'])