Beispiel #1
0
 def __init__(self, filesystem=None, webkit_finder=None):
     self.filesystem = filesystem or FileSystem()
     self.executive = Executive()
     self.finder = Finder(self.filesystem)
     self.printer = Printer(sys.stderr)
     self.webkit_finder = webkit_finder or WebKitFinder(self.filesystem)
     self._options = None
Beispiel #2
0
 def __init__(self, filesystem=None, webkit_finder=None):
     self.filesystem = filesystem or FileSystem()
     self.executive = Executive()
     self.finder = Finder(self.filesystem)
     self.printer = Printer(sys.stderr)
     self.webkit_finder = webkit_finder or WebKitFinder(self.filesystem)
     self._options = None
    def setUp(self):
        files = {
          '/foo/bar/baz.py': '',
          '/foo/bar/baz_unittest.py': '',
          '/foo2/bar2/baz2.py': '',
          '/foo2/bar2/baz2.pyc': '',
          '/foo2/bar2/baz2_integrationtest.py': '',
          '/foo2/bar2/missing.pyc': '',
          '/tmp/another_unittest.py': '',
        }
        self.fs = MockFileSystem(files)
        self.finder = Finder(self.fs)
        self.finder.add_tree('/foo', 'bar')
        self.finder.add_tree('/foo2')

        # Here we have to jump through a hoop to make sure test-webkitpy doesn't log
        # any messages from these tests :(.
        self.root_logger = logging.getLogger()
        self.log_levels = []
        self.log_handlers = self.root_logger.handlers[:]
        for handler in self.log_handlers:
            self.log_levels.append(handler.level)
            handler.level = logging.CRITICAL
Beispiel #4
0
 def __init__(self, filesystem=None):
     self.finder = Finder(filesystem or FileSystem())
     self.printer = Printer(sys.stderr)
     self._options = None
Beispiel #5
0
class Tester(object):
    def __init__(self, filesystem=None):
        self.finder = Finder(filesystem or FileSystem())
        self.printer = Printer(sys.stderr)
        self._options = None

    def add_tree(self, top_directory, starting_subdirectory=None):
        self.finder.add_tree(top_directory, starting_subdirectory)

    def _parse_args(self):
        parser = optparse.OptionParser(
            usage='usage: %prog [options] [args...]')
        parser.add_option('-a',
                          '--all',
                          action='store_true',
                          default=False,
                          help='run all the tests')
        parser.add_option(
            '-c',
            '--coverage',
            action='store_true',
            default=False,
            help=
            'generate code coverage info (requires http://pypi.python.org/pypi/coverage)'
        )
        parser.add_option(
            '-q',
            '--quiet',
            action='store_true',
            default=False,
            help='run quietly (errors, warnings, and progress only)')
        parser.add_option(
            '-t',
            '--timing',
            action='store_true',
            default=False,
            help='display per-test execution time (implies --verbose)')
        parser.add_option(
            '-v',
            '--verbose',
            action='count',
            default=0,
            help=
            'verbose output (specify once for individual test results, twice for debug messages)'
        )
        parser.add_option('--skip-integrationtests',
                          action='store_true',
                          default=False,
                          help='do not run the integration tests')
        parser.add_option(
            '-p',
            '--pass-through',
            action='store_true',
            default=False,
            help=
            'be debugger friendly by passing captured output through to the system'
        )
        parser.add_option(
            '-j',
            '--child-processes',
            action='store',
            type='int',
            default=(1 if sys.platform == 'win32' else
                     multiprocessing.cpu_count()),
            help='number of tests to run in parallel (default=%default)')

        parser.epilog = (
            '[args...] is an optional list of modules, test_classes, or individual tests. '
            'If no args are given, all the tests will be run.')

        return parser.parse_args()

    def run(self):
        self._options, args = self._parse_args()
        self.printer.configure(self._options)

        self.finder.clean_trees()

        names = self.finder.find_names(args,
                                       self._options.skip_integrationtests,
                                       self._options.all,
                                       self._options.child_processes != 1)
        if not names:
            _log.error('No tests to run')
            return False

        return self._run_tests(names)

    def _run_tests(self, names):
        if self._options.coverage:
            try:
                import webkitpy.thirdparty.autoinstalled.coverage as coverage
            except ImportError:
                _log.error(
                    "Failed to import 'coverage'; can't generate coverage numbers."
                )
                return False
            cov = coverage.coverage()
            cov.start()

        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        _log.debug("Loading the tests...")

        loader = unittest.defaultTestLoader
        suites = []
        for name in names:
            if self.finder.is_module(name):
                # if we failed to load a name and it looks like a module,
                # try importing it directly, because loadTestsFromName()
                # produces lousy error messages for bad modules.
                try:
                    __import__(name)
                except ImportError:
                    _log.fatal('Failed to import %s:' % name)
                    self._log_exception()
                    return False

            suites.append(loader.loadTestsFromName(name, None))

        test_suite = unittest.TestSuite(suites)
        test_runner = Runner(self.printer, self._options, loader)

        _log.debug("Running the tests.")
        result = test_runner.run(test_suite)
        if self._options.coverage:
            cov.stop()
            cov.save()
            cov.report(show_missing=False)
        return result.wasSuccessful()

    def _log_exception(self):
        s = StringIO.StringIO()
        traceback.print_exc(file=s)
        for l in s.buflist:
            _log.error('  ' + l.rstrip())
Beispiel #6
0
class Tester(object):
    def __init__(self, filesystem=None):
        self.finder = Finder(filesystem or FileSystem())
        self.printer = Printer(sys.stderr)
        self._options = None

    def add_tree(self, top_directory, starting_subdirectory=None):
        self.finder.add_tree(top_directory, starting_subdirectory)

    def skip(self, names, reason, bugid):
        self.finder.skip(names, reason, bugid)

    def _parse_args(self, argv=None):
        parser = optparse.OptionParser(
            usage='usage: %prog [options] [args...]')
        parser.add_option('-a',
                          '--all',
                          action='store_true',
                          default=False,
                          help='run all the tests')
        parser.add_option(
            '-c',
            '--coverage',
            action='store_true',
            default=False,
            help=
            'generate code coverage info (requires http://pypi.python.org/pypi/coverage)'
        )
        parser.add_option('-i',
                          '--integration-tests',
                          action='store_true',
                          default=False,
                          help='run integration tests as well as unit tests'),
        parser.add_option(
            '-j',
            '--child-processes',
            action='store',
            type='int',
            default=(1 if sys.platform.startswith('win') else
                     multiprocessing.cpu_count()),
            help='number of tests to run in parallel (default=%default)')
        parser.add_option(
            '-p',
            '--pass-through',
            action='store_true',
            default=False,
            help=
            'be debugger friendly by passing captured output through to the system'
        )
        parser.add_option(
            '-q',
            '--quiet',
            action='store_true',
            default=False,
            help='run quietly (errors, warnings, and progress only)')
        parser.add_option(
            '-t',
            '--timing',
            action='store_true',
            default=False,
            help='display per-test execution time (implies --verbose)')
        parser.add_option(
            '-v',
            '--verbose',
            action='count',
            default=0,
            help=
            'verbose output (specify once for individual test results, twice for debug messages)'
        )
        parser.add_option('--json',
                          action='store_true',
                          default=False,
                          help='write JSON formatted test results to stdout')

        parser.epilog = (
            '[args...] is an optional list of modules, test_classes, or individual tests. '
            'If no args are given, all the tests will be run.')

        return parser.parse_args(argv)

    def run(self):
        self._options, args = self._parse_args()
        self.printer.configure(self._options)

        self.finder.clean_trees()

        names = self.finder.find_names(args, self._options.all)
        if not names:
            _log.error('No tests to run')
            return False

        return self._run_tests(names)

    def _run_tests(self, names):
        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # We autoinstall everything up so that we can run tests concurrently
        # and not have to worry about autoinstalling packages concurrently.
        self.printer.write_update("Checking autoinstalled packages ...")
        from webkitpy.thirdparty import autoinstall_everything
        autoinstall_everything()

        if self._options.coverage:
            _log.warning("Checking code coverage, so running things serially")
            self._options.child_processes = 1

            import webkitpy.thirdparty.autoinstalled.coverage as coverage
            cov = coverage.coverage(omit=[
                "/usr/*", "*/webkitpy/thirdparty/autoinstalled/*",
                "*/webkitpy/thirdparty/BeautifulSoup.py"
            ])
            cov.start()

        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader)
        test_runner.run(parallel_tests, self._options.child_processes)
        test_runner.run(serial_tests, 1)

        self.printer.print_result(time.time() - start)

        if self._options.json:
            _print_results_as_json(
                sys.stdout, itertools.chain(parallel_tests, serial_tests),
                test_runner.failures, test_runner.errors)

        if self._options.coverage:
            cov.stop()
            cov.save()
            cov.report(show_missing=False)

        return not self.printer.num_errors and not self.printer.num_failures

    def _check_imports(self, names):
        for name in names:
            if self.finder.is_module(name):
                # if we failed to load a name and it looks like a module,
                # try importing it directly, because loadTestsFromName()
                # produces lousy error messages for bad modules.
                try:
                    __import__(name)
                except ImportError:
                    _log.fatal('Failed to import %s:' % name)
                    self._log_exception()
                    return False
        return True

    def _test_names(self, loader, names):
        parallel_test_method_prefixes = ['test_']
        serial_test_method_prefixes = ['serial_test_']
        if self._options.integration_tests:
            parallel_test_method_prefixes.append('integration_test_')
            serial_test_method_prefixes.append('serial_integration_test_')

        parallel_tests = []
        loader.test_method_prefixes = parallel_test_method_prefixes
        for name in names:
            parallel_tests.extend(
                self._all_test_names(loader.loadTestsFromName(name, None)))

        serial_tests = []
        loader.test_method_prefixes = serial_test_method_prefixes
        for name in names:
            serial_tests.extend(
                self._all_test_names(loader.loadTestsFromName(name, None)))

        # loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
        # if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
        # tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
        serial_tests = list(set(serial_tests).difference(set(parallel_tests)))

        return (parallel_tests, serial_tests)

    def _all_test_names(self, suite):
        names = []
        if hasattr(suite, '_tests'):
            for t in suite._tests:
                names.extend(self._all_test_names(t))
        else:
            names.append(unit_test_name(suite))
        return names

    def _log_exception(self):
        s = StringIO.StringIO()
        traceback.print_exc(file=s)
        for l in s.buflist:
            _log.error('  ' + l.rstrip())
class FinderTest(unittest.TestCase):
    def setUp(self):
        files = {
          '/foo/bar/baz.py': '',
          '/foo/bar/baz_unittest.py': '',
          '/foo2/bar2/baz2.py': '',
          '/foo2/bar2/baz2.pyc': '',
          '/foo2/bar2/baz2_integrationtest.py': '',
          '/foo2/bar2/missing.pyc': '',
          '/tmp/another_unittest.py': '',
        }
        self.fs = MockFileSystem(files)
        self.finder = Finder(self.fs)
        self.finder.add_tree('/foo', 'bar')
        self.finder.add_tree('/foo2')

        # Here we have to jump through a hoop to make sure test-webkitpy doesn't log
        # any messages from these tests :(.
        self.root_logger = logging.getLogger()
        self.log_levels = []
        self.log_handlers = self.root_logger.handlers[:]
        for handler in self.log_handlers:
            self.log_levels.append(handler.level)
            handler.level = logging.CRITICAL

    def tearDown(self):
        for handler in self.log_handlers:
            handler.level = self.log_levels.pop(0)

    def test_additional_system_paths(self):
        self.assertEqual(self.finder.additional_paths(['/usr']),
                          ['/foo', '/foo2'])

    def test_is_module(self):
        self.assertTrue(self.finder.is_module('bar.baz'))
        self.assertTrue(self.finder.is_module('bar2.baz2'))
        self.assertTrue(self.finder.is_module('bar2.baz2_integrationtest'))

        # Missing the proper namespace.
        self.assertFalse(self.finder.is_module('baz'))

    def test_to_module(self):
        self.assertEqual(self.finder.to_module('/foo/test.py'), 'test')
        self.assertEqual(self.finder.to_module('/foo/bar/test.py'), 'bar.test')
        self.assertEqual(self.finder.to_module('/foo/bar/pytest.py'), 'bar.pytest')

    def test_clean(self):
        self.assertTrue(self.fs.exists('/foo2/bar2/missing.pyc'))
        self.finder.clean_trees()
        self.assertFalse(self.fs.exists('/foo2/bar2/missing.pyc'))

    def check_names(self, names, expected_names, find_all=True):
        self.assertEqual(self.finder.find_names(names, find_all), expected_names)

    def test_default_names(self):
        self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=True)
        self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=False)

        # Should return the names given it, even if they don't exist.
        self.check_names(['foobar'], ['foobar'], find_all=False)

    def test_paths(self):
        self.fs.chdir('/foo/bar')
        self.check_names(['baz_unittest.py'], ['bar.baz_unittest'])
        self.check_names(['./baz_unittest.py'], ['bar.baz_unittest'])
        self.check_names(['/foo/bar/baz_unittest.py'], ['bar.baz_unittest'])
        self.check_names(['.'], ['bar.baz_unittest'])
        self.check_names(['../../foo2/bar2'], ['bar2.baz2_integrationtest'])

        self.fs.chdir('/')
        self.check_names(['bar'], ['bar.baz_unittest'])
        self.check_names(['/foo/bar/'], ['bar.baz_unittest'])

        # This works 'by accident' since it maps onto a package.
        self.check_names(['bar/'], ['bar.baz_unittest'])

        # This should log an error, since it's outside the trees.
        oc = OutputCapture()
        oc.set_log_level(logging.ERROR)
        oc.capture_output()
        try:
            self.check_names(['/tmp/another_unittest.py'], [])
        finally:
            _, _, logs = oc.restore_output()
            self.assertIn('another_unittest.py', logs)

        # Paths that don't exist are errors.
        oc.capture_output()
        try:
            self.check_names(['/foo/bar/notexist_unittest.py'], [])
        finally:
            _, _, logs = oc.restore_output()
            self.assertIn('notexist_unittest.py', logs)

        # Names that don't exist are caught later, at load time.
        self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
Beispiel #8
0
 def __init__(self, filesystem=None):
     self.finder = Finder(filesystem or FileSystem())
     self.printer = Printer(sys.stderr)
     self._options = None
     self.upload_style = 'release'
Beispiel #9
0
class Tester(object):
    def __init__(self, filesystem=None):
        self.finder = Finder(filesystem or FileSystem())
        self.printer = Printer(sys.stderr)
        self._options = None
        self.upload_style = 'release'

    def add_tree(self, top_directory, starting_subdirectory=None):
        self.finder.add_tree(top_directory, starting_subdirectory)

    def skip(self, names, reason, bugid):
        self.finder.skip(names, reason, bugid)

    def _parse_args(self, argv=None):
        parser = optparse.OptionParser(
            usage='usage: %prog [options] [args...]')

        upload_group = optparse.OptionGroup(parser, 'Upload Options')
        upload_group.add_options(upload_options())
        parser.add_option_group(upload_group)

        parser.add_option('-a',
                          '--all',
                          action='store_true',
                          default=False,
                          help='run all the tests')
        parser.add_option(
            '-c',
            '--coverage',
            action='store_true',
            default=False,
            help=
            'generate code coverage info (requires http://pypi.python.org/pypi/coverage)'
        )
        parser.add_option('-i',
                          '--integration-tests',
                          action='store_true',
                          default=False,
                          help='run integration tests as well as unit tests'),
        parser.add_option(
            '-j',
            '--child-processes',
            action='store',
            type='int',
            default=(1 if sys.platform.startswith('win') else
                     multiprocessing.cpu_count()),
            help='number of tests to run in parallel (default=%default)')
        parser.add_option(
            '-p',
            '--pass-through',
            action='store_true',
            default=False,
            help=
            'be debugger friendly by passing captured output through to the system'
        )
        parser.add_option(
            '-q',
            '--quiet',
            action='store_true',
            default=False,
            help='run quietly (errors, warnings, and progress only)')
        parser.add_option(
            '-t',
            '--timing',
            action='store_true',
            default=False,
            help='display per-test execution time (implies --verbose)')
        parser.add_option(
            '-v',
            '--verbose',
            action='count',
            default=0,
            help=
            'verbose output (specify once for individual test results, twice for debug messages)'
        )
        # FIXME: Remove '--json' argument.
        parser.add_option('--json',
                          action='store_true',
                          default=False,
                          help='write JSON formatted test results to stdout')
        parser.add_option(
            '--json-output',
            action='store',
            type='string',
            dest='json_file_name',
            help=
            'Create a file at specified path, listing test results in JSON format.'
        )

        parser.epilog = (
            '[args...] is an optional list of modules, test_classes, or individual tests. '
            'If no args are given, all the tests will be run.')

        return parser.parse_args(argv)

    def run(self):
        self._options, args = self._parse_args()
        self.printer.configure(self._options)

        self.finder.clean_trees()

        names = self.finder.find_names(args, self._options.all)
        if not names:
            _log.error('No tests to run')
            return False

        return self._run_tests(names)

    def _run_tests(self, names):
        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # We autoinstall everything up so that we can run tests concurrently
        # and not have to worry about autoinstalling packages concurrently.
        self.printer.write_update("Checking autoinstalled packages ...")
        from webkitpy.thirdparty import autoinstall_everything
        autoinstall_everything()

        start_time = time.time()

        if getattr(self._options, 'coverage', False):
            _log.warning("Checking code coverage, so running things serially")
            self._options.child_processes = 1

            import webkitpy.thirdparty.autoinstalled.coverage as coverage
            cov = coverage.coverage(omit=[
                "/usr/*",
                "*/webkitpy/thirdparty/autoinstalled/*",
                "*/webkitpy/thirdparty/BeautifulSoup.py",
                "*/webkitpy/thirdparty/BeautifulSoup_legacy.py",
            ])
            cov.start()

        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader)
        test_runner.run(parallel_tests,
                        getattr(self._options, 'child_processes', 1))
        test_runner.run(serial_tests, 1)
        end_time = time.time()

        self.printer.print_result(time.time() - start)

        if getattr(self._options, 'json', False):
            _print_results_as_json(
                sys.stdout, itertools.chain(parallel_tests, serial_tests),
                test_runner.failures, test_runner.errors)

        if getattr(self._options, 'json_file_name', None):
            self._options.json_file_name = os.path.abspath(
                self._options.json_file_name)
            with open(self._options.json_file_name, 'w') as json_file:
                _print_results_as_json(
                    json_file, itertools.chain(parallel_tests, serial_tests),
                    test_runner.failures, test_runner.errors)

        if getattr(self._options, 'coverage', False):
            cov.stop()
            cov.save()

        failed_uploads = 0
        if getattr(self._options, 'report_urls', None):
            self.printer.meter.writeln('\n')
            self.printer.write_update('Preparing upload data ...')

            # Empty test results indicate a PASS.
            results = {test: {} for test in test_runner.tests_run}
            for test, errors in test_runner.errors:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.ERROR, log='/n'.join(errors))
            for test, failures in test_runner.failures:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.FAIL, log='/n'.join(failures))

            _host.initialize_scm()
            upload = Upload(
                suite='webkitpy-tests',
                configuration=Upload.create_configuration(
                    platform=_host.platform.os_name,
                    version=str(_host.platform.os_version),
                    version_name=_host.platform.os_version_name(),
                    style=self.upload_style,
                    sdk=_host.platform.build_version(),
                    flavor=self._options.result_report_flavor,
                ),
                details=Upload.create_details(options=self._options),
                commits=[
                    Upload.create_commit(
                        repository_id='webkit',
                        id=_host.scm().native_revision(_webkit_root),
                        branch=_host.scm().native_branch(_webkit_root),
                    )
                ],
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(test_runner.tests_run) -
                    len(parallel_tests) - len(serial_tests),
                ),
                results=results,
            )
            for url in self._options.report_urls:
                self.printer.write_update('Uploading to {} ...'.format(url))
                failed_uploads = failed_uploads if upload.upload(
                    url, log_line_func=self.printer.meter.writeln) else (
                        failed_uploads + 1)
            self.printer.meter.writeln('Uploads completed!')

        if getattr(self._options, 'coverage', False):
            cov.report(show_missing=False)

        return not self.printer.num_errors and not self.printer.num_failures and not failed_uploads

    def _check_imports(self, names):
        for name in names:
            if self.finder.is_module(name):
                # if we failed to load a name and it looks like a module,
                # try importing it directly, because loadTestsFromName()
                # produces lousy error messages for bad modules.
                try:
                    __import__(name)
                except ImportError:
                    _log.fatal('Failed to import %s:' % name)
                    self._log_exception()
                    return False
        return True

    def _test_names(self, loader, names):
        parallel_test_method_prefixes = ['test_']
        serial_test_method_prefixes = ['serial_test_']
        if getattr(self._options, 'integration_tests', None):
            parallel_test_method_prefixes.append('integration_test_')
            serial_test_method_prefixes.append('serial_integration_test_')

        parallel_tests = []
        loader.test_method_prefixes = parallel_test_method_prefixes
        for name in names:
            parallel_tests.extend(
                self._all_test_names(loader.loadTestsFromName(name, None)))

        serial_tests = []
        loader.test_method_prefixes = serial_test_method_prefixes
        for name in names:
            serial_tests.extend(
                self._all_test_names(loader.loadTestsFromName(name, None)))

        # loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
        # if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
        # tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
        serial_tests = list(set(serial_tests).difference(set(parallel_tests)))

        return (parallel_tests, serial_tests)

    def _all_test_names(self, suite):
        names = []
        if hasattr(suite, '_tests'):
            for t in suite._tests:
                names.extend(self._all_test_names(t))
        else:
            names.append(unit_test_name(suite))
        return names

    def _log_exception(self):
        s = StringIO()
        traceback.print_exc(file=s)
        for l in s.getvalue().splitlines():
            _log.error('  ' + l.rstrip())
Beispiel #10
0
class Tester(object):
    def __init__(self, filesystem=None, webkit_finder=None):
        self.filesystem = filesystem or FileSystem()
        self.executive = Executive()
        self.finder = Finder(self.filesystem)
        self.printer = Printer(sys.stderr)
        self.webkit_finder = webkit_finder or WebKitFinder(self.filesystem)
        self._options = None

    def add_tree(self, top_directory, starting_subdirectory=None):
        self.finder.add_tree(top_directory, starting_subdirectory)

    def skip(self, names, reason, bugid):
        self.finder.skip(names, reason, bugid)

    def _parse_args(self, argv):
        parser = optparse.OptionParser(usage='usage: %prog [options] [args...]')
        parser.add_option('-a', '--all', action='store_true', default=False,
                          help='run all the tests')
        parser.add_option('-c', '--coverage', action='store_true', default=False,
                          help='generate code coverage info')
        parser.add_option('-i', '--integration-tests', action='store_true', default=False,
                          help='run integration tests as well as unit tests'),
        parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform == 'win32' else multiprocessing.cpu_count()),
                          help='number of tests to run in parallel (default=%default)')
        parser.add_option('-p', '--pass-through', action='store_true', default=False,
                          help='be debugger friendly by passing captured output through to the system')
        parser.add_option('-q', '--quiet', action='store_true', default=False,
                          help='run quietly (errors, warnings, and progress only)')
        parser.add_option('-t', '--timing', action='store_true', default=False,
                          help='display per-test execution time (implies --verbose)')
        parser.add_option('-v', '--verbose', action='count', default=0,
                          help='verbose output (specify once for individual test results, twice for debug messages)')

        parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. '
                         'If no args are given, all the tests will be run.')

        return parser.parse_args(argv)

    def run(self):
        argv = sys.argv[1:]
        self._options, args = self._parse_args(argv)

        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # FIXME: unittest2 needs to be in sys.path for its internal imports to work.
        thirdparty_path = self.webkit_finder.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'thirdparty')
        if not thirdparty_path in sys.path:
            sys.path.append(thirdparty_path)

        self.printer.configure(self._options)

        # Do this after configuring the printer, so that logging works properly.
        if self._options.coverage:
            argv = ['-j', '1'] + [arg for arg in argv if arg not in ('-c', '--coverage', '-j', '--child-processes')]
            _log.warning('Checking code coverage, so running things serially')
            return self._run_under_coverage(argv)

        self.finder.clean_trees()

        names = self.finder.find_names(args, self._options.all)
        if not names:
            _log.error('No tests to run')
            return False

        return self._run_tests(names)

    def _run_under_coverage(self, argv):
        # coverage doesn't run properly unless its parent dir is in PYTHONPATH.
        # This means we need to add that dir to the environment. Also, the
        # report output is best when the paths are relative to the Scripts dir.
        dirname = self.filesystem.dirname
        script_dir = dirname(dirname(dirname(__file__)))
        thirdparty_dir = self.filesystem.join(script_dir, 'webkitpy', 'thirdparty')

        env = os.environ.copy()
        python_path = env.get('PYTHONPATH', '')
        python_path = python_path + os.pathsep + thirdparty_dir
        env['PYTHONPATH'] = python_path

        prefix_cmd = [sys.executable, 'webkitpy/thirdparty/coverage']
        exit_code = self.executive.call(prefix_cmd + ['run', __file__] + argv, cwd=script_dir, env=env)
        if not exit_code:
            exit_code = self.executive.call(prefix_cmd + ['report', '--omit', 'webkitpy/thirdparty/*,/usr/*,/Library/*'], cwd=script_dir, env=env)
        return (exit_code == 0)

    def _run_tests(self, names):
        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader, self.webkit_finder)
        test_runner.run(parallel_tests, self._options.child_processes)
        test_runner.run(serial_tests, 1)

        self.printer.print_result(time.time() - start)

        return not self.printer.num_errors and not self.printer.num_failures

    def _check_imports(self, names):
        for name in names:
            if self.finder.is_module(name):
                # if we failed to load a name and it looks like a module,
                # try importing it directly, because loadTestsFromName()
                # produces lousy error messages for bad modules.
                try:
                    __import__(name)
                except ImportError:
                    _log.fatal('Failed to import %s:' % name)
                    self._log_exception()
                    return False
        return True

    def _test_names(self, loader, names):
        parallel_test_method_prefixes = ['test_']
        serial_test_method_prefixes = ['serial_test_']
        if self._options.integration_tests:
            parallel_test_method_prefixes.append('integration_test_')
            serial_test_method_prefixes.append('serial_integration_test_')

        parallel_tests = []
        loader.test_method_prefixes = parallel_test_method_prefixes
        for name in names:
            parallel_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))

        serial_tests = []
        loader.test_method_prefixes = serial_test_method_prefixes
        for name in names:
            serial_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))

        # loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
        # if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
        # tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
        serial_tests = list(set(serial_tests).difference(set(parallel_tests)))

        return (parallel_tests, serial_tests)

    def _all_test_names(self, suite):
        names = []
        if hasattr(suite, '_tests'):
            for t in suite._tests:
                names.extend(self._all_test_names(t))
        else:
            names.append(unit_test_name(suite))
        return names

    def _log_exception(self):
        s = StringIO.StringIO()
        traceback.print_exc(file=s)
        for l in s.buflist:
            _log.error('  ' + l.rstrip())
Beispiel #11
0
class Tester(object):
    def __init__(self, filesystem=None, webkit_finder=None):
        self.filesystem = filesystem or FileSystem()
        self.executive = Executive()
        self.finder = Finder(self.filesystem)
        self.printer = Printer(sys.stderr)
        self.webkit_finder = webkit_finder or WebKitFinder(self.filesystem)
        self._options = None

    def add_tree(self, top_directory, starting_subdirectory=None):
        self.finder.add_tree(top_directory, starting_subdirectory)

    def skip(self, names, reason, bugid):
        self.finder.skip(names, reason, bugid)

    def _parse_args(self, argv):
        parser = optparse.OptionParser(
            usage='usage: %prog [options] [args...]')
        parser.add_option('-a',
                          '--all',
                          action='store_true',
                          default=False,
                          help='run all the tests')
        parser.add_option('-c',
                          '--coverage',
                          action='store_true',
                          default=False,
                          help='generate code coverage info')
        parser.add_option(
            '-j',
            '--child-processes',
            action='store',
            type='int',
            default=(1 if sys.platform == 'win32' else
                     multiprocessing.cpu_count()),
            help='number of tests to run in parallel (default=%default)')
        parser.add_option(
            '-p',
            '--pass-through',
            action='store_true',
            default=False,
            help=
            'be debugger friendly by passing captured output through to the system'
        )
        parser.add_option(
            '-q',
            '--quiet',
            action='store_true',
            default=False,
            help='run quietly (errors, warnings, and progress only)')
        parser.add_option(
            '-t',
            '--timing',
            action='store_true',
            default=False,
            help='display per-test execution time (implies --verbose)')
        parser.add_option(
            '-v',
            '--verbose',
            action='count',
            default=0,
            help=
            'verbose output (specify once for individual test results, twice for debug messages)'
        )

        parser.epilog = (
            '[args...] is an optional list of modules, test_classes, or individual tests. '
            'If no args are given, all the tests will be run.')

        return parser.parse_args(argv)

    def run(self):
        argv = sys.argv[1:]
        self._options, args = self._parse_args(argv)

        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # FIXME: coverage needs to be in sys.path for its internal imports to work.
        thirdparty_path = self.webkit_finder.path_from_webkit_base(
            'tools', 'webkitpy', 'thirdparty')
        if not thirdparty_path in sys.path:
            sys.path.append(thirdparty_path)

        self.printer.configure(self._options)

        # Do this after configuring the printer, so that logging works properly.
        if self._options.coverage:
            argv = ['-j', '1'] + [
                arg for arg in argv
                if arg not in ('-c', '--coverage', '-j', '--child-processes')
            ]
            _log.warning('Checking code coverage, so running things serially')
            return self._run_under_coverage(argv)

        self.finder.clean_trees()

        names = self.finder.find_names(args, self._options.all)
        if not names:
            _log.error('No tests to run')
            return False

        return self._run_tests(names)

    def _run_under_coverage(self, argv):
        # coverage doesn't run properly unless its parent dir is in PYTHONPATH.
        # This means we need to add that dir to the environment. Also, the
        # report output is best when the paths are relative to the Scripts dir.
        dirname = self.filesystem.dirname
        script_dir = dirname(dirname(dirname(__file__)))
        thirdparty_dir = self.filesystem.join(script_dir, 'webkitpy',
                                              'thirdparty')

        env = os.environ.copy()
        python_path = env.get('PYTHONPATH', '')
        python_path = python_path + os.pathsep + thirdparty_dir
        env['PYTHONPATH'] = python_path

        prefix_cmd = [sys.executable, 'webkitpy/thirdparty/coverage']
        exit_code = self.executive.call(prefix_cmd + ['run', __file__] + argv,
                                        cwd=script_dir,
                                        env=env)
        if not exit_code:
            exit_code = self.executive.call(prefix_cmd + [
                'report', '--omit', 'webkitpy/thirdparty/*,/usr/*,/Library/*'
            ],
                                            cwd=script_dir,
                                            env=env)
        return (exit_code == 0)

    def _run_tests(self, names):
        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = unittest.TestLoader()
        tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(tests)
        start = time.time()
        test_runner = Runner(self.printer, loader, self.webkit_finder)
        test_runner.run(tests, self._options.child_processes)

        self.printer.print_result(time.time() - start)

        return not self.printer.num_errors and not self.printer.num_failures

    def _check_imports(self, names):
        for name in names:
            if self.finder.is_module(name):
                # if we failed to load a name and it looks like a module,
                # try importing it directly, because loadTestsFromName()
                # produces lousy error messages for bad modules.
                try:
                    __import__(name)
                except ImportError:
                    _log.fatal('Failed to import %s:' % name)
                    self._log_exception()
                    return False
        return True

    def _test_names(self, loader, names):
        tests = []
        for name in names:
            tests.extend(
                self._all_test_names(loader.loadTestsFromName(name, None)))
        return tests

    def _all_test_names(self, suite):
        names = []
        if hasattr(suite, '_tests'):
            for t in suite._tests:
                names.extend(self._all_test_names(t))
        else:
            names.append(unit_test_name(suite))
        return names

    def _log_exception(self):
        s = StringIO.StringIO()
        traceback.print_exc(file=s)
        for l in s.buflist:
            _log.error('  ' + l.rstrip())
 def __init__(self, filesystem=None):
     self.finder = Finder(filesystem or FileSystem())
     self.printer = Printer(sys.stderr)
     self._options = None
class Tester(object):
    def __init__(self, filesystem=None):
        self.finder = Finder(filesystem or FileSystem())
        self.printer = Printer(sys.stderr)
        self._options = None

    def add_tree(self, top_directory, starting_subdirectory=None):
        self.finder.add_tree(top_directory, starting_subdirectory)

    def skip(self, names, reason, bugid):
        self.finder.skip(names, reason, bugid)

    def _parse_args(self, argv=None):
        parser = optparse.OptionParser(usage='usage: %prog [options] [args...]')
        parser.add_option('-a', '--all', action='store_true', default=False,
                          help='run all the tests')
        parser.add_option('-c', '--coverage', action='store_true', default=False,
                          help='generate code coverage info (requires http://pypi.python.org/pypi/coverage)')
        parser.add_option('-i', '--integration-tests', action='store_true', default=False,
                          help='run integration tests as well as unit tests'),
        parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform == 'win32' else multiprocessing.cpu_count()),
                          help='number of tests to run in parallel (default=%default)')
        parser.add_option('-p', '--pass-through', action='store_true', default=False,
                          help='be debugger friendly by passing captured output through to the system')
        parser.add_option('-q', '--quiet', action='store_true', default=False,
                          help='run quietly (errors, warnings, and progress only)')
        parser.add_option('-t', '--timing', action='store_true', default=False,
                          help='display per-test execution time (implies --verbose)')
        parser.add_option('-v', '--verbose', action='count', default=0,
                          help='verbose output (specify once for individual test results, twice for debug messages)')

        parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. '
                         'If no args are given, all the tests will be run.')

        return parser.parse_args(argv)

    def run(self):
        self._options, args = self._parse_args()
        self.printer.configure(self._options)

        self.finder.clean_trees()

        names = self.finder.find_names(args, self._options.all)
        if not names:
            _log.error('No tests to run')
            return False

        return self._run_tests(names)

    def _run_tests(self, names):
        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # We autoinstall everything up so that we can run tests concurrently
        # and not have to worry about autoinstalling packages concurrently.
        self.printer.write_update("Checking autoinstalled packages ...")
        from webkitpy.thirdparty import autoinstall_everything
        installed_something = autoinstall_everything()

        # FIXME: There appears to be a bug in Python 2.6.1 that is causing multiprocessing
        # to hang after we install the packages in a clean checkout.
        if installed_something:
            _log.warning("We installed new packages, so running things serially at first")
            self._options.child_processes = 1

        if self._options.coverage:
            _log.warning("Checking code coverage, so running things serially")
            self._options.child_processes = 1

            import webkitpy.thirdparty.autoinstalled.coverage as coverage
            cov = coverage.coverage(omit=["/usr/*", "*/webkitpy/thirdparty/autoinstalled/*", "*/webkitpy/thirdparty/BeautifulSoup.py"])
            cov.start()

        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader)
        test_runner.run(parallel_tests, self._options.child_processes)
        test_runner.run(serial_tests, 1)

        self.printer.print_result(time.time() - start)

        if self._options.coverage:
            cov.stop()
            cov.save()
            cov.report(show_missing=False)

        return not self.printer.num_errors and not self.printer.num_failures

    def _check_imports(self, names):
        for name in names:
            if self.finder.is_module(name):
                # if we failed to load a name and it looks like a module,
                # try importing it directly, because loadTestsFromName()
                # produces lousy error messages for bad modules.
                try:
                    __import__(name)
                except ImportError:
                    _log.fatal('Failed to import %s:' % name)
                    self._log_exception()
                    return False
        return True

    def _test_names(self, loader, names):
        parallel_test_method_prefixes = ['test_']
        serial_test_method_prefixes = ['serial_test_']
        if self._options.integration_tests:
            parallel_test_method_prefixes.append('integration_test_')
            serial_test_method_prefixes.append('serial_integration_test_')

        parallel_tests = []
        loader.test_method_prefixes = parallel_test_method_prefixes
        for name in names:
            parallel_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))

        serial_tests = []
        loader.test_method_prefixes = serial_test_method_prefixes
        for name in names:
            serial_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))

        # loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
        # if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
        # tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
        serial_tests = list(set(serial_tests).difference(set(parallel_tests)))

        return (parallel_tests, serial_tests)

    def _all_test_names(self, suite):
        names = []
        if hasattr(suite, '_tests'):
            for t in suite._tests:
                names.extend(self._all_test_names(t))
        else:
            names.append(unit_test_name(suite))
        return names

    def _log_exception(self):
        s = StringIO.StringIO()
        traceback.print_exc(file=s)
        for l in s.buflist:
            _log.error('  ' + l.rstrip())
Beispiel #14
0
class Tester(object):
    def __init__(self, filesystem=None):
        self.finder = Finder(filesystem or FileSystem())
        self.printer = Printer(sys.stderr)
        self._options = None

    def add_tree(self, top_directory, starting_subdirectory=None):
        self.finder.add_tree(top_directory, starting_subdirectory)

    def skip(self, names, reason, bugid):
        self.finder.skip(names, reason, bugid)

    def _parse_args(self, argv=None):
        parser = optparse.OptionParser(usage="usage: %prog [options] [args...]")
        parser.add_option("-a", "--all", action="store_true", default=False, help="run all the tests")
        parser.add_option(
            "-c",
            "--coverage",
            action="store_true",
            default=False,
            help="generate code coverage info (requires http://pypi.python.org/pypi/coverage)",
        )
        parser.add_option(
            "-i",
            "--integration-tests",
            action="store_true",
            default=False,
            help="run integration tests as well as unit tests",
        ),
        parser.add_option(
            "-j",
            "--child-processes",
            action="store",
            type="int",
            default=(1 if sys.platform.startswith("win") else multiprocessing.cpu_count()),
            help="number of tests to run in parallel (default=%default)",
        )
        parser.add_option(
            "-p",
            "--pass-through",
            action="store_true",
            default=False,
            help="be debugger friendly by passing captured output through to the system",
        )
        parser.add_option(
            "-q",
            "--quiet",
            action="store_true",
            default=False,
            help="run quietly (errors, warnings, and progress only)",
        )
        parser.add_option(
            "-t",
            "--timing",
            action="store_true",
            default=False,
            help="display per-test execution time (implies --verbose)",
        )
        parser.add_option(
            "-v",
            "--verbose",
            action="count",
            default=0,
            help="verbose output (specify once for individual test results, twice for debug messages)",
        )
        parser.add_option(
            "--json", action="store_true", default=False, help="write JSON formatted test results to stdout"
        )

        parser.epilog = (
            "[args...] is an optional list of modules, test_classes, or individual tests. "
            "If no args are given, all the tests will be run."
        )

        return parser.parse_args(argv)

    def run(self):
        self._options, args = self._parse_args()
        self.printer.configure(self._options)

        self.finder.clean_trees()

        names = self.finder.find_names(args, self._options.all)
        if not names:
            _log.error("No tests to run")
            return False

        return self._run_tests(names)

    def _run_tests(self, names):
        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # We autoinstall everything up so that we can run tests concurrently
        # and not have to worry about autoinstalling packages concurrently.
        self.printer.write_update("Checking autoinstalled packages ...")
        from webkitpy.thirdparty import autoinstall_everything

        autoinstall_everything()

        if self._options.coverage:
            _log.warning("Checking code coverage, so running things serially")
            self._options.child_processes = 1

            import webkitpy.thirdparty.autoinstalled.coverage as coverage

            cov = coverage.coverage(
                omit=["/usr/*", "*/webkitpy/thirdparty/autoinstalled/*", "*/webkitpy/thirdparty/BeautifulSoup.py"]
            )
            cov.start()

        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader)
        test_runner.run(parallel_tests, self._options.child_processes)
        test_runner.run(serial_tests, 1)

        self.printer.print_result(time.time() - start)

        if self._options.json:
            _print_results_as_json(
                sys.stdout, itertools.chain(parallel_tests, serial_tests), test_runner.failures, test_runner.errors
            )

        if self._options.coverage:
            cov.stop()
            cov.save()
            cov.report(show_missing=False)

        return not self.printer.num_errors and not self.printer.num_failures

    def _check_imports(self, names):
        for name in names:
            if self.finder.is_module(name):
                # if we failed to load a name and it looks like a module,
                # try importing it directly, because loadTestsFromName()
                # produces lousy error messages for bad modules.
                try:
                    __import__(name)
                except ImportError:
                    _log.fatal("Failed to import %s:" % name)
                    self._log_exception()
                    return False
        return True

    def _test_names(self, loader, names):
        parallel_test_method_prefixes = ["test_"]
        serial_test_method_prefixes = ["serial_test_"]
        if self._options.integration_tests:
            parallel_test_method_prefixes.append("integration_test_")
            serial_test_method_prefixes.append("serial_integration_test_")

        parallel_tests = []
        loader.test_method_prefixes = parallel_test_method_prefixes
        for name in names:
            parallel_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))

        serial_tests = []
        loader.test_method_prefixes = serial_test_method_prefixes
        for name in names:
            serial_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))

        # loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
        # if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
        # tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
        serial_tests = list(set(serial_tests).difference(set(parallel_tests)))

        return (parallel_tests, serial_tests)

    def _all_test_names(self, suite):
        names = []
        if hasattr(suite, "_tests"):
            for t in suite._tests:
                names.extend(self._all_test_names(t))
        else:
            names.append(unit_test_name(suite))
        return names

    def _log_exception(self):
        s = StringIO.StringIO()
        traceback.print_exc(file=s)
        for l in s.buflist:
            _log.error("  " + l.rstrip())