コード例 #1
0
 def test_packed_test(self):
     upload = Upload(
         suite='webkitpy-tests',
         commits=[Upload.create_commit(
             repository_id='webkit',
             id='5',
             branch='trunk',
         )],
         results={
             'dir1/sub-dir1/test1': Upload.create_test_result(actual=Upload.Expectations.FAIL),
             'dir1/sub-dir1/test2': Upload.create_test_result(actual=Upload.Expectations.TIMEOUT),
             'dir1/sub-dir2/test3': {},
             'dir1/sub-dir2/test4': {},
             'dir2/sub-dir3/test5': {},
             'dir2/test6': {},
         }
     )
     generated_dict = self.normalize(json.loads(json.dumps(upload, cls=Upload.Encoder)))
     self.assertEqual(generated_dict['test_results']['results'], self.normalize({
         'dir1': {
             'sub-dir1': {
                 'test1': {'actual': Upload.Expectations.FAIL},
                 'test2': {'actual': Upload.Expectations.TIMEOUT},
             }, 'sub-dir2': {
                 'test3': {},
                 'test4': {},
             }
         }, 'dir2': {
             'sub-dir3': {'test5': {}},
             'test6': {},
         },
     }))
コード例 #2
0
    def test_encoding(self):
        start_time, end_time = time.time() - 3, time.time()
        upload = Upload(
            suite='webkitpy-tests',
            configuration=Upload.create_configuration(
                platform='mac',
                version='10.13.0',
                version_name='High Sierra',
                architecture='x86_64',
                sdk='17A405',
            ),
            details=Upload.create_details(link='https://webkit.org'),
            commits=[Upload.create_commit(
                repository_id='webkit',
                id='5',
                branch='trunk',
            )],
            run_stats=Upload.create_run_stats(
                start_time=start_time,
                end_time=end_time,
                tests_skipped=0,
            ),
            results={
                'webkitpy.test1': {},
                'webkitpy.test2': Upload.create_test_result(expected=Upload.Expectations.PASS, actual=Upload.Expectations.FAIL),
            },
        )
        generated_dict = self.normalize(json.loads(json.dumps(upload, cls=Upload.Encoder)))

        self.assertEqual(generated_dict['version'], 0)
        self.assertEqual(generated_dict['suite'], 'webkitpy-tests')
        self.assertEqual(generated_dict['configuration'], self.normalize(dict(
            platform='mac',
            is_simulator=False,
            version='10.13.0',
            version_name='High Sierra',
            architecture='x86_64',
            sdk='17A405',
        )))
        self.assertEqual(generated_dict['commits'], [dict(
            repository_id='webkit',
            id='5',
            branch='trunk',
        )])
        self.assertEqual(generated_dict['test_results']['details'], self.normalize(dict(link='https://webkit.org')))
        self.assertEqual(generated_dict['test_results']['run_stats'], self.normalize(dict(
            start_time=start_time,
            end_time=end_time,
            tests_skipped=0,
        )))
        self.assertEqual(generated_dict['test_results']['results'], self.normalize({
            'webkitpy.test1': {},
            'webkitpy.test2': Upload.create_test_result(expected=Upload.Expectations.PASS, actual=Upload.Expectations.FAIL),
        }))
コード例 #3
0
    def _results_to_upload_json_trie(self, expectations, results):
        FAILURE_TO_TEXT = {
            test_expectations.PASS: Upload.Expectations.PASS,
            test_expectations.CRASH: Upload.Expectations.CRASH,
            test_expectations.TIMEOUT: Upload.Expectations.TIMEOUT,
            test_expectations.IMAGE: Upload.Expectations.IMAGE,
            test_expectations.TEXT: Upload.Expectations.TEXT,
            test_expectations.AUDIO: Upload.Expectations.AUDIO,
            test_expectations.MISSING: Upload.Expectations.WARNING,
            test_expectations.IMAGE_PLUS_TEXT: ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]),
        }

        results_trie = {}
        for result in itervalues(results.results_by_name):
            if result.type == test_expectations.SKIP:
                continue

            expected = expectations.filtered_expectations_for_test(
                result.test_name,
                self._options.pixel_tests or bool(result.reftest_type),
                self._options.world_leaks,
            )
            if expected == {test_expectations.PASS}:
                expected = None
            else:
                expected = ' '.join([FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL) for e in expected])

            json_results_generator.add_path_to_trie(
                result.test_name,
                Upload.create_test_result(
                    expected=expected,
                    actual=FAILURE_TO_TEXT.get(result.type, Upload.Expectations.FAIL) if result.type else None,
                    time=int(result.test_run_time * 1000),
                ), results_trie)
        return results_trie
コード例 #4
0
    def _run_tests(self, names):
        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # We autoinstall everything up so that we can run tests concurrently
        # and not have to worry about autoinstalling packages concurrently.
        self.printer.write_update("Checking autoinstalled packages ...")
        from webkitpy.thirdparty import autoinstall_everything
        autoinstall_everything()

        start_time = time.time()

        if getattr(self._options, 'coverage', False):
            _log.warning("Checking code coverage, so running things serially")
            self._options.child_processes = 1

            import webkitpy.thirdparty.autoinstalled.coverage as coverage
            cov = coverage.coverage(omit=[
                "/usr/*",
                "*/webkitpy/thirdparty/autoinstalled/*",
                "*/webkitpy/thirdparty/BeautifulSoup.py",
                "*/webkitpy/thirdparty/BeautifulSoup_legacy.py",
            ])
            cov.start()

        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader)
        test_runner.run(parallel_tests,
                        getattr(self._options, 'child_processes', 1))
        test_runner.run(serial_tests, 1)
        end_time = time.time()

        self.printer.print_result(time.time() - start)

        if getattr(self._options, 'json', False):
            _print_results_as_json(
                sys.stdout, itertools.chain(parallel_tests, serial_tests),
                test_runner.failures, test_runner.errors)

        if getattr(self._options, 'json_file_name', None):
            self._options.json_file_name = os.path.abspath(
                self._options.json_file_name)
            with open(self._options.json_file_name, 'w') as json_file:
                _print_results_as_json(
                    json_file, itertools.chain(parallel_tests, serial_tests),
                    test_runner.failures, test_runner.errors)

        if getattr(self._options, 'coverage', False):
            cov.stop()
            cov.save()

        failed_uploads = 0
        if getattr(self._options, 'report_urls', None):
            self.printer.meter.writeln('\n')
            self.printer.write_update('Preparing upload data ...')

            # Empty test results indicate a PASS.
            results = {test: {} for test in test_runner.tests_run}
            for test, errors in test_runner.errors:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.ERROR, log='/n'.join(errors))
            for test, failures in test_runner.failures:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.FAIL, log='/n'.join(failures))

            _host.initialize_scm()
            upload = Upload(
                suite='webkitpy-tests',
                configuration=Upload.create_configuration(
                    platform=_host.platform.os_name,
                    version=str(_host.platform.os_version),
                    version_name=_host.platform.os_version_name(),
                    style=self.upload_style,
                    sdk=_host.platform.build_version(),
                    flavor=self._options.result_report_flavor,
                ),
                details=Upload.create_details(options=self._options),
                commits=[
                    Upload.create_commit(
                        repository_id='webkit',
                        id=_host.scm().native_revision(_webkit_root),
                        branch=_host.scm().native_branch(_webkit_root),
                    )
                ],
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(test_runner.tests_run) -
                    len(parallel_tests) - len(serial_tests),
                ),
                results=results,
            )
            for url in self._options.report_urls:
                self.printer.write_update('Uploading to {} ...'.format(url))
                failed_uploads = failed_uploads if upload.upload(
                    url, log_line_func=self.printer.meter.writeln) else (
                        failed_uploads + 1)
            self.printer.meter.writeln('Uploads completed!')

        if getattr(self._options, 'coverage', False):
            cov.report(show_missing=False)

        return not self.printer.num_errors and not self.printer.num_failures and not failed_uploads
コード例 #5
0
ファイル: manager.py プロジェクト: eocanha/webkit
    def run(self, args, json_output=None):
        if json_output:
            json_output = self.host.filesystem.abspath(json_output)
            if not self.host.filesystem.isdir(
                    self.host.filesystem.dirname(json_output)
            ) or self.host.filesystem.isdir(json_output):
                raise RuntimeError('Cannot write to {}'.format(json_output))

        start_time = time.time()

        self._stream.write_update('Checking build ...')
        if not self._port.check_api_test_build(
                self._binaries_for_arguments(args)):
            _log.error('Build check failed')
            return Manager.FAILED_BUILD_CHECK

        self._initialize_devices()

        self._stream.write_update('Collecting tests ...')
        try:
            test_names = self._collect_tests(args)
        except ScriptError:
            self._stream.writeln('Failed to collect tests')
            return Manager.FAILED_COLLECT_TESTS
        self._stream.write_update('Found {} tests'.format(len(test_names)))
        if len(test_names) == 0:
            self._stream.writeln('No tests found')
            return Manager.FAILED_COLLECT_TESTS

        if self._port.get_option('dump'):
            for test in test_names:
                self._stream.writeln(test)
            return Manager.SUCCESS

        test_names = [
            test for test in test_names
            for _ in range(self._options.repeat_each)
        ]
        if self._options.repeat_each != 1:
            _log.debug('Repeating each test {} times'.format(
                self._options.iterations))

        try:
            _log.info('Running tests')
            runner = Runner(self._port, self._stream)
            for i in range(self._options.iterations):
                _log.debug('\nIteration {}'.format(i + 1))
                runner.run(
                    test_names,
                    int(self._options.child_processes)
                    if self._options.child_processes else
                    self._port.default_child_processes())
        except KeyboardInterrupt:
            # If we receive a KeyboardInterrupt, print results.
            self._stream.writeln('')

        end_time = time.time()

        successful = runner.result_map_by_status(runner.STATUS_PASSED)
        disabled = len(runner.result_map_by_status(runner.STATUS_DISABLED))
        _log.info('Ran {} tests of {} with {} successful'.format(
            len(runner.results) - disabled, len(test_names), len(successful)))

        result_dictionary = {
            'Skipped': [],
            'Failed': [],
            'Crashed': [],
            'Timedout': [],
        }

        self._stream.writeln('-' * 30)
        result = Manager.SUCCESS
        if len(successful) * self._options.repeat_each + disabled == len(
                test_names):
            self._stream.writeln('All tests successfully passed!')
            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))
        else:
            self._stream.writeln('Test suite failed')
            self._stream.writeln('')

            skipped = []
            for test in test_names:
                if test not in runner.results:
                    skipped.append(test)
                    result_dictionary['Skipped'].append({
                        'name': test,
                        'output': None
                    })
            if skipped:
                self._stream.writeln('Skipped {} tests'.format(len(skipped)))
                self._stream.writeln('')
                if self._options.verbose:
                    for test in skipped:
                        self._stream.writeln('    {}'.format(test))

            self._print_tests_result_with_status(runner.STATUS_FAILED, runner)
            self._print_tests_result_with_status(runner.STATUS_CRASHED, runner)
            self._print_tests_result_with_status(runner.STATUS_TIMEOUT, runner)

            for test, result in iteritems(runner.results):
                status_to_string = {
                    runner.STATUS_FAILED: 'Failed',
                    runner.STATUS_CRASHED: 'Crashed',
                    runner.STATUS_TIMEOUT: 'Timedout',
                }.get(result[0])
                if not status_to_string:
                    continue
                result_dictionary[status_to_string].append({
                    'name': test,
                    'output': result[1]
                })

            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))

            result = Manager.FAILED_TESTS

        if self._options.report_urls:
            self._stream.writeln('\n')
            self._stream.write_update('Preparing upload data ...')

            status_to_test_result = {
                runner.STATUS_PASSED: None,
                runner.STATUS_FAILED: Upload.Expectations.FAIL,
                runner.STATUS_CRASHED: Upload.Expectations.CRASH,
                runner.STATUS_TIMEOUT: Upload.Expectations.TIMEOUT,
            }
            upload = Upload(
                suite=self._options.suite or 'api-tests',
                configuration=self._port.configuration_for_upload(
                    self._port.target_host(0)),
                details=Upload.create_details(options=self._options),
                commits=self._port.commits_for_upload(),
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(result_dictionary['Skipped']),
                ),
                results={
                    test: Upload.create_test_result(
                        actual=status_to_test_result[result[0]])
                    for test, result in iteritems(runner.results)
                    if result[0] in status_to_test_result
                },
            )
            for url in self._options.report_urls:
                self._stream.write_update('Uploading to {} ...'.format(url))
                if not upload.upload(url, log_line_func=self._stream.writeln):
                    result = Manager.FAILED_UPLOAD
            self._stream.writeln('Uploads completed!')

        return result
コード例 #6
0
ファイル: main.py プロジェクト: zszyj/webkit
    def _run_tests(self, names, will_run_lldb_webkit_tests):
        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # We autoinstall everything up so that we can run tests concurrently
        # and not have to worry about autoinstalling packages concurrently.
        self.printer.write_update("Checking autoinstalled packages ...")
        from webkitpy.thirdparty import autoinstall_everything
        autoinstall_everything()

        start_time = time.time()
        config = Config(_host.executive, self.finder.filesystem)
        configuration_to_use = self._options.configuration or config.default_configuration(
        )

        if will_run_lldb_webkit_tests:
            self.printer.write_update('Building lldbWebKitTester ...')
            build_lldbwebkittester = self.finder.filesystem.join(
                _webkit_root, 'Tools', 'Scripts', 'build-lldbwebkittester')
            try:
                _host.executive.run_and_throw_if_fail(
                    [
                        build_lldbwebkittester,
                        config.flag_for_configuration(configuration_to_use)
                    ],
                    quiet=(not bool(self._options.verbose)))
            except ScriptError as e:
                _log.error(e.message_with_output(output_limit=None))
                return False
            os.environ['LLDB_WEBKIT_TESTER_EXECUTABLE'] = str(
                self.finder.filesystem.join(
                    config.build_directory(configuration_to_use),
                    'lldbWebKitTester'))
            if not self.finder.filesystem.exists(
                    os.environ['LLDB_WEBKIT_TESTER_EXECUTABLE']):
                _log.error('Failed to find lldbWebKitTester.')
                return False

        if self._options.coverage:
            _log.warning("Checking code coverage, so running things serially")
            self._options.child_processes = 1

            import webkitpy.thirdparty.autoinstalled.coverage as coverage
            cov = coverage.coverage(omit=[
                "/usr/*", "*/webkitpy/thirdparty/autoinstalled/*",
                "*/webkitpy/thirdparty/BeautifulSoup.py"
            ])
            cov.start()

        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader)
        test_runner.run(parallel_tests, self._options.child_processes)
        test_runner.run(serial_tests, 1)
        end_time = time.time()

        self.printer.print_result(time.time() - start)

        if self._options.json:
            _print_results_as_json(
                sys.stdout, itertools.chain(parallel_tests, serial_tests),
                test_runner.failures, test_runner.errors)

        if self._options.json_file_name:
            self._options.json_file_name = os.path.abspath(
                self._options.json_file_name)
            with open(self._options.json_file_name, 'w') as json_file:
                _print_results_as_json(
                    json_file, itertools.chain(parallel_tests, serial_tests),
                    test_runner.failures, test_runner.errors)

        if self._options.coverage:
            cov.stop()
            cov.save()

        failed_uploads = 0
        if self._options.report_urls:
            self.printer.meter.writeln('\n')
            self.printer.write_update('Preparing upload data ...')

            # Empty test results indicate a PASS.
            results = {test: {} for test in test_runner.tests_run}
            for test, errors in test_runner.errors:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.ERROR, log='/n'.join(errors))
            for test, failures in test_runner.failures:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.FAIL, log='/n'.join(failures))

            _host.initialize_scm()
            upload = Upload(
                suite='webkitpy-tests',
                configuration=Upload.create_configuration(
                    platform=_host.platform.os_name,
                    version=str(_host.platform.os_version),
                    version_name=_host.platform.os_version_name(),
                    style='asan'
                    if config.asan else configuration_to_use.lower(),
                    sdk=_host.platform.build_version(),
                    flavor=self._options.result_report_flavor,
                ),
                details=Upload.create_details(options=self._options),
                commits=[
                    Upload.create_commit(
                        repository_id='webkit',
                        id=_host.scm().native_revision(_webkit_root),
                        branch=_host.scm().native_branch(_webkit_root),
                    )
                ],
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(test_runner.tests_run) -
                    len(parallel_tests) - len(serial_tests),
                ),
                results=results,
            )
            for url in self._options.report_urls:
                self.printer.write_update('Uploading to {} ...'.format(url))
                failed_uploads = failed_uploads if upload.upload(
                    url, log_line_func=self.printer.meter.writeln) else (
                        failed_uploads + 1)
            self.printer.meter.writeln('Uploads completed!')

        if self._options.coverage:
            cov.report(show_missing=False)

        return not self.printer.num_errors and not self.printer.num_failures and not failed_uploads