env = environ.Env() settings.configure( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache' } }, DATABASES={'default': env.db(default='postgres:///forkit')}, INSTALLED_APPS=('forkit', 'forkit.tests'), ) setup() parser = argparse.ArgumentParser() parser.add_argument('tests', type=six.text_type, nargs='*', default=[]) parser.add_argument('-v', '--verbosity', type=int, default=1) parser.add_argument('--failfast', action='store_true') parser.add_argument('--reverse', action='store_true') parser.add_argument('--keepdb', action='store_true') arg_dict = vars(parser.parse_args()) tests_to_run = arg_dict.pop('tests') test_runner = DiscoverRunner(**arg_dict) failures = test_runner.run_tests(tests_to_run) if failures: sys.exit(1)
def before_all(context): django.setup() context.test_runner = DiscoverRunner() context.test_runner.setup_test_environment() context.browser = Browser('chrome', headless=True)
def run_tests(): sys.stdout.write("\nRunning spirit test suite, using settings %(settings)r\n\n" % {"settings": os.environ['DJANGO_SETTINGS_MODULE'], }) test_runner = DiscoverRunner() failures = test_runner.run_tests(["tests", ]) sys.exit(failures)
def setUp(self): self._old_db_connections = db.connections self._old_destroy_test_db = DatabaseCreation.destroy_test_db self._old_create_test_db = DatabaseCreation.create_test_db self.runner_instance = DiscoverRunner(verbosity=0)
def main(): if not settings.configured: settings.configure(**DEFAULT_SETTINGS) django.setup() failures = DiscoverRunner(failfast=True).run_tests([f"{app_name}.tests"]) sys.exit(failures)
def test_faulthandler_enabled_fileno(self, mocked_enable): # sys.stderr that is not an actual file. with mock.patch('faulthandler.is_enabled', return_value=False), captured_stderr(): DiscoverRunner(enable_faulthandler=True) mocked_enable.assert_called()
def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Labels must be of the form: - app.TestClass.test_method Run a single specific test method - app.TestClass Run all the test methods in a given class - app Search for doctests and unittests in the named application. When looking for tests, the test runner will look in the models and tests modules for the application. A list of 'extra' tests may also be provided; these tests will be added to the test suite. If the settings file has an entry for COVERAGE_MODULES or test_labels is true it will prints the coverage report for modules/apps You can control coverage's output via the COVERAGE_REPORT_TYPE variable; possible values are 'console' (default, output to stdout), 'html' (outputs html to the directory specified by COVERAGE_HTML_DIRECTORY) or 'xml' (outputs an xml file to COVERAGE_XML_FILE). Returns number of tests that failed. """ cov = None do_coverage = (hasattr(settings, 'COVERAGE_MODULES') or hasattr(settings, 'COVERAGE_APPS') or bool(test_labels)) if do_coverage: cov = coverage() cov.erase() cov.start() DiscoverRunner = None try: from django.test.runner import DiscoverRunner except ImportError: raise try: if DiscoverRunner: testrunner = DiscoverRunner(verbosity=self.verbosity, interactive=self.interactive, failfast=self.failfast) retval = testrunner.run_tests(test_labels, extra_tests) except Exception: #if we don't print the exc here, nothing'll be outputted to the terminal print('An error occured while attempting to run the tests:') traceback.print_exc() raise if do_coverage: cov.stop() covtype = getattr(settings, 'COVERAGE_REPORT_TYPE', 'console') cov_reporter = _cov_reporters.get(covtype) if not cov_reporter: raise RuntimeError( 'Invalid COVERAGE_REPORT_TYPE given: %s; valid values: %s' % (covtype, _valid_covtypes_str)) # try to import all modules for the coverage report. modules = [] if test_labels or hasattr(settings, 'COVERAGE_APPS'): # apps entered at the command line prompt override those specified in settings labels = test_labels or settings.COVERAGE_APPS for label in labels: label_split = label.split('.') if len(label_split) > 1: label = label_split[: -1] # remove test class or test method from label label = '.'.join(label) pkg = _get_app_package(label) modules.extend(_package_modules(*pkg)) elif hasattr(settings, 'COVERAGE_MODULES'): modules = [ __import__(module, {}, {}, ['']) for module in settings.COVERAGE_MODULES ] if hasattr(settings, 'COVERAGE_EXCLUDE_MODULES'): for exclude_module_name in settings.COVERAGE_EXCLUDE_MODULES: # Test designed to avoid accidentally removing a module whose # name is prefixed by an excluded module name, but still remove # submodules modules = [ module for module in modules if not module.__name__ == exclude_module_name and not module.__name__.startswith(exclude_module_name + '.') ] cov_reporter(cov, modules) return retval
def test_dotted_test_method_django_testcase(self): count = DiscoverRunner(verbosity=0).build_suite([ 'test_runner_apps.sample.tests_sample.TestDjangoTestCase.test_sample' ], ).countTestCases() self.assertEqual(count, 1)
def test_empty_test_case(self): count = DiscoverRunner(verbosity=0).build_suite( ['test_runner_apps.sample.tests_sample.EmptyTestCase' ], ).countTestCases() self.assertEqual(count, 0)
def test_dotted_test_module(self): count = DiscoverRunner(verbosity=0).build_suite( ['test_runner_apps.sample.tests_sample'], ).countTestCases() self.assertEqual(count, 4)
def test_dotted_test_class_vanilla_unittest(self): count = DiscoverRunner(verbosity=0).build_suite( ['test_runner_apps.sample.tests_sample.TestVanillaUnittest' ], ).countTestCases() self.assertEqual(count, 1)
def test_setup_shuffler_no_shuffle_argument(self): runner = DiscoverRunner() self.assertIs(runner.shuffle, False) runner.setup_shuffler() self.assertIsNone(runner.shuffle_seed)
def test_init_debug_mode(self): runner = DiscoverRunner() self.assertFalse(runner.debug_mode)
def runtests(*test_args): setup() test_runner = DiscoverRunner(verbosity=1) failures = test_runner.run_tests(['wakawaka']) if failures: sys.exit(failures)
def test_pdb_with_parallel(self): msg = ( 'You cannot use --pdb with parallel tests; pass --parallel=1 to ' 'use it.') with self.assertRaisesMessage(ValueError, msg): DiscoverRunner(pdb=True, parallel=2)
def test_discovery_on_package(self): count = DiscoverRunner(verbosity=0).build_suite( ['test_runner_apps.sample.tests'], ).countTestCases() self.assertEqual(count, 1)
def test_number_of_parallel_workers(self): """Number of processes doesn't exceed the number of TestCases.""" runner = DiscoverRunner(parallel=5, verbosity=0) suite = runner.build_suite(['test_runner_apps.tagged']) self.assertEqual(suite.processes, len(suite.subsuites))
def test_overridable_get_test_runner_kwargs(self): self.assertIsInstance(DiscoverRunner().get_test_runner_kwargs(), dict)
def test_faulthandler_disabled(self, mocked_enable): with mock.patch('faulthandler.is_enabled', return_value=False): DiscoverRunner(enable_faulthandler=False) mocked_enable.assert_not_called()
def test_overridable_test_suite(self): self.assertEqual(DiscoverRunner().test_suite, TestSuite)
'pathology', )) import django django.setup() from opal.core import application class Application(application.OpalApplication): pass try: sys.argv.remove('--failfast') failfast = True except ValueError: failfast = False from django.test.runner import DiscoverRunner test_runner = DiscoverRunner(verbosity=1, failfast=failfast) if len(sys.argv) == 2: failures = test_runner.run_tests([ sys.argv[-1], ]) else: failures = test_runner.run_tests([ 'pathology', ]) if failures: sys.exit(failures)
def test_overridable_test_runner(self): self.assertEqual(DiscoverRunner().test_runner, TextTestRunner)
def before_all(context): django.setup() context.test_runner = DiscoverRunner() context.test_runner.setup_test_environment() context.browser = Browser('firefox', headless=False)
def test_overridable_test_loader(self): self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader)
def setUp(self): self.runner_instance = DiscoverRunner(verbosity=0)
def count_tests(**kwargs): kwargs.setdefault('verbosity', 0) suite = DiscoverRunner(**kwargs).build_suite( ['test_runner_apps.tagged.tests_inheritance']) return suite.countTestCases()
'auth': lambda u: u.is_authenticated, 'allowed_types': ['video/mp4'], }, 'cached': { 'key': 'uploads/vids', 'auth': lambda u: True, 'allowed_types': '*', 'acl': 'authenticated-read', 'bucket': 'astoragebucketname', 'cache_control': 'max-age=2592000', 'content_disposition': 'attachment', 'server_side_encryption': 'AES256', } } ) if hasattr(django, 'setup'): django.setup() if django.get_version() < StrictVersion('1.6'): from django.test.simple import DjangoTestSuiteRunner test_runner = DjangoTestSuiteRunner(verbosity=1) else: from django.test.runner import DiscoverRunner test_runner = DiscoverRunner(verbosity=1) failures = test_runner.run_tests(['s3upload', ]) if failures: sys.exit(failures)
def test_excluded_tags_displayed(self): runner = DiscoverRunner(exclude_tags=['foo', 'bar'], verbosity=3) with captured_stdout() as stdout: runner.build_suite(['test_runner_apps.tagged.tests']) self.assertIn('Excluding test tag(s): bar, foo.\n', stdout.getvalue())
def test_file_path(self): with change_cwd(".."): count = DiscoverRunner(verbosity=0).build_suite( ['test_runner_apps/sample/'], ).countTestCases() self.assertEqual(count, 5)
# /usr/bin/python import django import sys from django.test.runner import DiscoverRunner from django.conf import settings settings.configure(INSTALLED_APPS=('ajax_cbv', ), ) if __name__ == "__main__": django.setup() runner = DiscoverRunner() failures = runner.run_tests(['ajax_cbv']) if failures: sys.exit(failures)