コード例 #1
0
    def test_test_metadata(self):
        tr = test_results.TestResult('')
        base_path = '//' + RELATIVE_WEB_TESTS

        tr.test_name = "test-name"
        self.assertDictEqual(
            self.sink(True, tr)['testMetadata'],
            {
                'name': 'test-name',
                'location': {
                    'repo': 'https://chromium.googlesource.com/chromium/src',
                    'fileName': base_path + 'test-name',
                },
            },
        )

        tr.test_name = "///test-name"
        self.assertDictEqual(
            self.sink(True, tr)['testMetadata'],
            {
                'name': '///test-name',
                'location': {
                    'repo': 'https://chromium.googlesource.com/chromium/src',
                    'fileName': base_path + '///test-name',
                },
            },
        )
コード例 #2
0
    def test_timeout(self):
        tr = test_results.TestResult(test_name='test-name')
        tr.type = ResultType.Timeout
        sent_data = self.sink(True, tr)

        # Timeout is considered as 'ABORT'
        self.assertEqual(sent_data['status'], 'ABORT')
コード例 #3
0
 def _mark_interrupted_tests_as_skipped(self, test_run_results):
     for test_input in self._test_inputs:
         if test_input.test_name not in test_run_results.results_by_name:
             result = test_results.TestResult(test_input.test_name, [test_failures.FailureEarlyExit()])
             # FIXME: We probably need to loop here if there are multiple iterations.
             # FIXME: Also, these results are really neither expected nor unexpected. We probably
             # need a third type of result.
             test_run_results.add(result, expected=False, test_is_slow=self._test_is_slow(test_input.test_name))
コード例 #4
0
 def get_result(self, test_name, result_type=ResultType.Pass, run_time=0):
     failures = []
     if result_type == ResultType.Timeout:
         failures = [test_failures.FailureTimeout()]
     elif result_type == ResultType.Crash:
         failures = [test_failures.FailureCrash()]
     return test_results.TestResult(test_name,
                                    failures=failures,
                                    test_run_time=run_time)
コード例 #5
0
    def test_test_location(self):
        tr = test_results.TestResult('')
        prefix = '//third_party/blink/web_tests/'
        sink = lambda tr: self.sink(True, tr)['testLocation']['fileName']

        tr.test_name = "test-name"
        self.assertEqual(sink(tr), prefix + 'test-name')
        tr.test_name = "///test-name"
        self.assertEqual(sink(tr), prefix + '///test-name')
コード例 #6
0
    def test_device_failure(self):
        tr = test_results.TestResult(test_name='test-name')
        tr.type = ResultType.Failure
        tr.device_failed = True
        sent_data = self.sink(True, tr)

        # If the device failed, 'expected' and 'status' must be False and 'ABORT'
        self.assertEqual(sent_data['expected'], False)
        self.assertEqual(sent_data['status'], 'ABORT')
コード例 #7
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(None, None, None, None)
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout(dummy_1)]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash(dummy_1)]
    elif result_type == test_expectations.FAIL:
        failures = [test_failures.TestFailure(dummy_1, dummy_2)]
    return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
コード例 #8
0
    def test_sink(self):
        tr = test_results.TestResult(test_name='test-name')
        tr.total_run_time = 123.456
        tr.type = ResultType.Crash
        sent_data = self.sink(True, tr)

        self.assertEqual(sent_data['testId'], 'test-name')
        self.assertEqual(sent_data['expected'], True)
        self.assertEqual(sent_data['status'], 'CRASH')
        self.assertEqual(sent_data['duration'], '123.456s')
コード例 #9
0
 def get_result(self,
                test_name,
                result_type=test_expectations.PASS,
                run_time=0):
     failures = []
     if result_type == test_expectations.TIMEOUT:
         failures = [test_failures.FailureTimeout()]
     elif result_type == test_expectations.CRASH:
         failures = [test_failures.FailureCrash()]
     return test_results.TestResult(test_name,
                                    failures=failures,
                                    test_run_time=run_time)
コード例 #10
0
    def test_with_result_sink_section(self, urlopen):
        ctx = {'address': 'localhost:123', 'auth_token': 'secret'}
        self.luci_context(result_sink=ctx)
        r = CreateTestResultSink(self.port)
        self.assertIsNotNone(r)
        r.sink(True, test_results.TestResult('test'))

        urlopen.assert_called_once()
        req = urlopen.call_args[0][0]
        self.assertEqual(urlparse(req.get_full_url()).netloc, ctx['address'])
        self.assertEqual(req.get_header('Authorization'),
                         'ResultSink ' + ctx['auth_token'])
コード例 #11
0
def get_result(test_name, result_type=ResultType.Pass, run_time=0):
    failures = []
    dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(
        None, None, None, None)
    if result_type == ResultType.Timeout:
        failures = [test_failures.FailureTimeout(dummy_1)]
    elif result_type == ResultType.Crash:
        failures = [test_failures.FailureCrash(dummy_1)]
    elif result_type == ResultType.Failure:
        failures = [test_failures.TestFailure(dummy_1, dummy_2)]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
コード例 #12
0
    def test_with_result_sink_section(self):
        ctx = {'address': 'localhost:123', 'auth_token': 'secret'}
        self.luci_context(result_sink=ctx)
        rs = CreateTestResultSink(self.port)
        self.assertIsNotNone(rs)

        response = requests.Response()
        response.status_code = 200
        with mock.patch.object(rs._session, 'post',
                               return_value=response) as m:
            rs.sink(True, test_results.TestResult('test'))
            self.assertTrue(m.called)
            self.assertEqual(
                urlparse(m.call_args[0][0]).netloc, ctx['address'])
コード例 #13
0
    def test_summary_html(self):
        tr = test_results.TestResult(test_name='test-name')
        tr.artifacts.AddArtifact('stderr', '/tmp/stderr', False)
        tr.artifacts.AddArtifact('crash_log', '/tmp/crash_log', False)
        tr.artifacts.AddArtifact('command', '/tmp/cmd', False)

        sent_data = self.sink(True, tr)
        p = re.compile(
            '<text-artifact artifact-id="(command|stderr|crash_log)" />')

        self.assertListEqual(
            p.findall(sent_data['summaryHtml']),
            # The artifact tags should be sorted by the artifact names.
            ['command', 'crash_log', 'stderr'],
        )
コード例 #14
0
    def test_artifacts_with_duplicate_paths(self):
        tr = test_results.TestResult(test_name='test-name')
        tr.artifacts.AddArtifact('artifact', '/tmp/foo', False)
        tr.artifacts.AddArtifact('artifact', '/tmp/bar', False)

        sent_data = self.sink(True, tr)
        self.assertDictEqual(
            sent_data['artifacts'], {
                'artifact': {
                    'filePath': '/tmp/foo'
                },
                'artifact-1': {
                    'filePath': '/tmp/bar'
                }
            })
コード例 #15
0
    def test_artifacts(self):
        tr = test_results.TestResult(test_name='test-name')
        tr.artifacts.AddArtifact('test-image.png', '/tmp/test-image.png', True)
        tr.artifacts.AddArtifact('stdout', '/tmp/stdout', True)

        sent_data = self.sink(True, tr)
        self.assertDictEqual(
            sent_data['artifacts'], {
                'test-image.png': {
                    'filePath': '/tmp/test-image.png'
                },
                'stdout': {
                    'filePath': '/tmp/stdout'
                }
            })
コード例 #16
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout()]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch()]
    elif result_type == test_expectations.TEXT:
        failures = [test_failures.FailureTextMismatch()]
    elif result_type == test_expectations.IMAGE:
        failures = [test_failures.FailureImageHashMismatch()]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash()]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak()]
    return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
コード例 #17
0
 def test_sink_without_expectations(self):
     tr = test_results.TestResult(test_name='test-name')
     tr.type = ResultType.Crash
     expected_tags = [
         {
             'key': 'test_name',
             'value': 'test-name'
         },
         {
             'key': 'web_tests_device_failed',
             'value': 'False'
         },
         {
             'key': 'web_tests_result_type',
             'value': 'CRASH'
         },
         {
             'key': 'web_tests_flag_specific_config_name',
             'value': '',
         },
         {
             'key': 'web_tests_base_timeout',
             'value': '6'
         },
         {
             'key': 'web_tests_used_expectations_file',
             'value': 'TestExpectations',
         },
         {
             'key': 'web_tests_used_expectations_file',
             'value': 'WebDriverExpectations',
         },
         {
             'key': 'web_tests_used_expectations_file',
             'value': 'NeverFixTests',
         },
         {
             'key': 'web_tests_used_expectations_file',
             'value': 'StaleTestExpectations',
         },
         {
             'key': 'web_tests_used_expectations_file',
             'value': 'SlowTests',
         },
     ]
     sent_data = self.sink(True, tr)
     self.assertEqual(sent_data['tags'], expected_tags)
コード例 #18
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(
        None, None, None, None)
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout(dummy_1)]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.TEXT:
        failures = [test_failures.FailureTextMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.IMAGE:
        failures = [test_failures.FailureImageHashMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash(dummy_1)]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak(dummy_1)]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
コード例 #19
0
 def assertFilename(self, test_name, expected_filename):
     sent_data = self.sink(True, test_results.TestResult(test_name))
     self.assertEqual(sent_data['testMetadata']['location']['fileName'],
                      '//' + RELATIVE_WEB_TESTS + expected_filename)
コード例 #20
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  retry_attempt):
        batch_size = self._options.derived_batch_size

        # If we're retrying a test, then it's because we think it might be flaky
        # and rerunning it might provide a different result. We must restart
        # content shell to get a valid result, as otherwise state can leak
        # from previous tests. To do so, we set a batch size of 1, as that
        # prevents content shell reuse.
        if not self._options.must_use_derived_batch_size and retry_attempt >= 1:
            batch_size = 1
        self._expectations = expectations
        self._test_inputs = test_inputs

        test_run_results = TestRunResults(
            self._expectations,
            len(test_inputs) + len(tests_to_skip),
            self._test_result_sink,
        )
        self._current_run_results = test_run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = ResultType.Skip
            test_run_results.add(
                result,
                expected=True,
                test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel, self._options.virtual_parallel,
            batch_size == 1)

        self._reorder_tests_by_args(locked_shards)
        self._reorder_tests_by_args(unlocked_shards)

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))

        if retry_attempt < 1:
            self._printer.print_workers_and_shards(self._port, num_workers,
                                                   len(all_shards),
                                                   len(locked_shards))

        if self._options.dry_run:
            return test_run_results

        self._printer.write_update(
            'Starting %s ...' % grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs,
                          batch_size) for shard in all_shards)

            if self._shards_to_redo:
                num_workers -= len(self._shards_to_redo)
                if num_workers > 0:
                    with message_pool.get(self, self._worker_factory,
                                          num_workers,
                                          self._port.host) as pool:
                        pool.run(('test_list', shard.name, shard.test_inputs,
                                  batch_size)
                                 for shard in self._shards_to_redo)
                else:
                    self._mark_interrupted_tests_as_skipped(
                        self._current_run_results)
                    raise TestRunInterruptedException(
                        'All workers have device failures. Exiting.')
        except TestRunInterruptedException as error:
            _log.warning(error.reason)
            test_run_results.interrupted = True
        except KeyboardInterrupt:
            self._printer.flush()
            self._printer.writeln('Interrupted, exiting ...')
            test_run_results.keyboard_interrupted = True
        except Exception as error:
            _log.debug('%s("%s") raised, exiting', error.__class__.__name__,
                       error)
            raise
        finally:
            test_run_results.run_time = time.time() - start_time

        return test_run_results
コード例 #21
0
ファイル: layout_test_runner.py プロジェクト: byobrowser/byob
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  retry_attempt):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._retry_attempt = retry_attempt

        test_run_results = TestRunResults(
            self._expectations,
            len(test_inputs) + len(tests_to_skip))
        self._current_run_results = test_run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        if retry_attempt < 1:
            self._printer.print_expected(
                test_run_results,
                self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            test_run_results.add(result,
                                 expected=True,
                                 test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel, self._options.batch_size == 1)

        self._reorder_tests_by_args(locked_shards)
        self._reorder_tests_by_args(unlocked_shards)

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))

        if retry_attempt < 1:
            self._printer.print_workers_and_shards(num_workers,
                                                   len(all_shards),
                                                   len(locked_shards))

        if self._options.dry_run:
            return test_run_results

        self._printer.write_update('Starting %s ...' %
                                   grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs)
                         for shard in all_shards)

            if self._shards_to_redo:
                num_workers -= len(self._shards_to_redo)
                if num_workers > 0:
                    with message_pool.get(self, self._worker_factory,
                                          num_workers,
                                          self._port.host) as pool:
                        pool.run(('test_list', shard.name, shard.test_inputs)
                                 for shard in self._shards_to_redo)
        except TestRunInterruptedException as error:
            _log.warning(error.reason)
            test_run_results.interrupted = True
        except KeyboardInterrupt:
            self._printer.flush()
            self._printer.writeln('Interrupted, exiting ...')
            test_run_results.keyboard_interrupted = True
        except Exception as error:
            _log.debug('%s("%s") raised, exiting', error.__class__.__name__,
                       error)
            raise
        finally:
            test_run_results.run_time = time.time() - start_time

        return test_run_results
コード例 #22
0
    def test_sink_with_expectations(self):
        class FakeTestExpectation(object):
            def __init__(self):
                self.raw_results = ['Failure']

        class FakeExpectations(object):
            def __init__(self):
                self.system_condition_tags = ['tag1', 'tag2']

            def get_expectations(self, _):
                return FakeTestExpectation()

        # Values should be extracted from expectations.
        tr = test_results.TestResult(test_name='test-name')
        tr.type = ResultType.Crash
        expectations = FakeExpectations()
        expected_tags = [
            {
                'key': 'test_name',
                'value': 'test-name'
            },
            {
                'key': 'web_tests_device_failed',
                'value': 'False'
            },
            {
                'key': 'web_tests_result_type',
                'value': 'CRASH'
            },
            {
                'key': 'web_tests_flag_specific_config_name',
                'value': '',
            },
            {
                'key': 'web_tests_base_timeout',
                'value': '6'
            },
            {
                'key': 'web_tests_used_expectations_file',
                'value': 'TestExpectations',
            },
            {
                'key': 'web_tests_used_expectations_file',
                'value': 'WebDriverExpectations',
            },
            {
                'key': 'web_tests_used_expectations_file',
                'value': 'NeverFixTests',
            },
            {
                'key': 'web_tests_used_expectations_file',
                'value': 'StaleTestExpectations',
            },
            {
                'key': 'web_tests_used_expectations_file',
                'value': 'SlowTests',
            },
            {
                'key': 'raw_typ_expectation',
                'value': 'Failure'
            },
            {
                'key': 'typ_tag',
                'value': 'tag1'
            },
            {
                'key': 'typ_tag',
                'value': 'tag2'
            },
        ]
        sent_data = self.sink(True, tr, expectations)
        self.assertEqual(sent_data['tags'], expected_tags)