def setup_django(self): from django.conf import settings # If Django < 1.2 if self.legacy_django: from django.db import connection from django.test.utils import setup_test_environment # Setup Django test environment setup_test_environment() # Create Django test database self.old_database_name = settings.DATABASE_NAME connection.creation.create_test_db(self.verbosity, autoclobber=True) # If Django >= 1.2 else: from django.test.simple import DjangoTestSuiteRunner # Initialize Django tests runner runner = DjangoTestSuiteRunner(interactive=self.interactive, verbosity=self.verbosity) # Setup test environment runner.setup_test_environment() # Setup test databases self.old_config = runner.setup_databases() self.runner = runner
def run(self): catchbreak = self.unit_test.pop("catchbreak") if catchbreak: from unittest.signals import installHandler installHandler() # Before use the output, we save the original for reset in the end org_out = sys.stdout org_err = sys.stderr try: test_runner = TestRunner(**self.__dict__) if test_runner.exit_request is None: if self.is_django: from django.test.simple import DjangoTestSuiteRunner dtsr = DjangoTestSuiteRunner(**self.unit_test) dtsr.setup_test_environment() old_config = dtsr.setup_databases() test_runner.run(self.test_list) dtsr.teardown_databases(old_config) dtsr.teardown_test_environment() else: test_runner.run(self.test_list) except Exception: print_exception() finally: # Return the output sys.stdout = org_out sys.stderr = org_err if isinstance(test_runner.exit_request, int): sys.exit(test_runner.exit_request)
class DBTestCase(TestCase, AssertQueriesCountMixin): test_runner, old_config = None, None def setUp(self): from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner() self.before_environment_setup() # HOOK self.test_runner.setup_test_environment() self.before_database_setup() # HOOK self.old_config = self.test_runner.setup_databases() self.setup() def tearDown(self): self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() self.teardown() # just to make it look like pep8 def before_environment_setup(self): pass def before_database_setup(self): pass def setup(self): pass def teardown(self): pass
def get_runner(config): runner = DjangoTestSuiteRunner(interactive=False) if config.option.no_db: def cursor_wrapper_exception(*args, **kwargs): raise RuntimeError('No database access is allowed since --no-db was used!') def setup_databases(): # Monkey patch CursorWrapper to warn against database usage django.db.backends.util.CursorWrapper = cursor_wrapper_exception def teardown_databases(db_config): pass runner.setup_databases = setup_databases runner.teardown_databases = teardown_databases elif config.option.reuse_db: if not config.option.create_db: monkey_patch_creation_for_db_reuse() # Leave the database for the next test run runner.teardown_databases = lambda db_config: None return runner
def setUpClass(cls): test_runner = DjangoTestSuiteRunner(interactive=False, verbosity=1) test_db = test_runner.setup_databases() load_fixture() setattr(cls, "test_runner", test_runner) setattr(cls, "test_db", test_db)
def handle(self, *args, **options): USE_SOUTH = getattr(settings, "SOUTH_TESTS_MIGRATE", True) try: if USE_SOUTH: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except: USE_SOUTH = False self._test_runner = DjangoTestSuiteRunner(interactive=False) DjangoTestSuiteRunner.setup_test_environment(self._test_runner) self._created_db = DjangoTestSuiteRunner.setup_databases(self._test_runner) call_command('syncdb', verbosity=0, interactive=False,) if USE_SOUTH: call_command('migrate', verbosity=0, interactive=False,) settings.DEBUG = options.get('debug', False) verbosity = int(options.get('verbosity', 4)) apps_to_run = tuple(options.get('apps', '').split(",")) apps_to_avoid = tuple(options.get('avoid_apps', '').split(",")) run_server = not options.get('no_server', False) tags = options.get('tags', None) server = Server(port=options['port']) paths = self.get_paths(args, apps_to_run, apps_to_avoid) if run_server: try: server.start() except LettuceServerException, e: raise SystemExit(e)
def test_setup_aliased_databases(self): from django.db.backends.dummy.base import DatabaseCreation runner = DjangoTestSuiteRunner(verbosity=0) old_db_connections = db.connections old_destroy_test_db = DatabaseCreation.destroy_test_db old_create_test_db = DatabaseCreation.create_test_db try: destroyed_names = [] DatabaseCreation.destroy_test_db = lambda self, old_database_name, verbosity=1: destroyed_names.append(old_database_name) DatabaseCreation.create_test_db = lambda self, verbosity=1, autoclobber=False: self._get_test_db_name() db.connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.dummy', 'NAME': 'dbname', }, 'other': { 'ENGINE': 'django.db.backends.dummy', 'NAME': 'dbname', } }) old_config = runner.setup_databases() runner.teardown_databases(old_config) self.assertEqual(destroyed_names.count('dbname'), 1) finally: DatabaseCreation.create_test_db = old_create_test_db DatabaseCreation.destroy_test_db = old_destroy_test_db db.connections = old_db_connections
def setup(): global test_runner global old_config from django.test.simple import DjangoTestSuiteRunner test_runner = DjangoTestSuiteRunner() test_runner.setup_test_environment() old_config = test_runner.setup_databases()
class LemonwiseReviewsTestSetup(Plugin): """ Nose plugin that sets up the environment needed to test """ enabled = True name = 'testsetup-lemonwise' old_config = None runner = None def __init__(self, *args, **kwargs): # Import client here to patch Django's own version from lemonwise.utils.testharness import client super(LemonwiseReviewsTestSetup, self).__init__(*args, **kwargs) def options(self, parser, env): super(LemonwiseReviewsTestSetup, self).options(parser, env) def begin(self): from django.test.simple import DjangoTestSuiteRunner setup_environ(settings) self.runner = DjangoTestSuiteRunner() self.runner.setup_test_environment() self.old_config = self.runner.setup_databases() def finalize(self, result): self.runner.teardown_databases(self.old_config) self.runner.teardown_test_environment()
def setup_django_test_database(): from django.test.simple import DjangoTestSuiteRunner from django.test.utils import setup_test_environment from south.management.commands import patch_for_test_db_setup runner = DjangoTestSuiteRunner(verbosity=0, failfast=False) patch_for_test_db_setup() setup_test_environment() return runner, runner.setup_databases()
def setup(): global test_runner global old_config from django.test.simple import DjangoTestSuiteRunner from ella.utils.installedapps import call_modules test_runner = DjangoTestSuiteRunner() test_runner.setup_test_environment() old_config = test_runner.setup_databases() call_modules(('register', ))
def setUpModule(): if django is None: # pragma: no cover raise unittest.SkipTest("Django not installed") django_test_utils.setup_test_environment() runner = DjangoTestSuiteRunner() runner_state = runner.setup_databases() test_state.update({ 'runner': runner, 'runner_state': runner_state, })
def setup_database(real_server): ''' Setup DB, sync DB ''' logger.info("Setting up test database.") world.test_runner=DjangoTestSuiteRunner(interactive=False) djangoTestSuiteRunner.setup_test_environment(world.test_runner) world.created_db=DjangoTestSuiteRunner.setup_databases(world.test_runner) call_command('syncdb', interactive=False, verbosity=0)
def setup(): global test_runner global old_config from django.test.simple import DjangoTestSuiteRunner from ella.utils.installedapps import call_modules test_runner = DjangoTestSuiteRunner() test_runner.setup_test_environment() old_config = test_runner.setup_databases() call_modules(('register', )) from ella_flatcomments.utils import disconnect_legacy_signals disconnect_legacy_signals()
def setup_database(actual_server): #This will setup your database, sync it, and run migrations if you are using South. #It does this before the Test Django server is set up. logger.info("Setting up a test database...") # Uncomment if you are using South # patch_for_test_db_setup() world.test_runner = DjangoTestSuiteRunner(interactive=False) DjangoTestSuiteRunner.setup_test_environment(world.test_runner) world.created_db = DjangoTestSuiteRunner.setup_databases(world.test_runner) call_command('syncdb', interactive=False, verbosity=0)
def setup_databases(self, *args, **kwargs): ''' Wrap sys.stdout to filter the noisy output while the base class implementation sets up the test database. ''' ret = None orig = sys.stdout sys.stdout = FilteredStream(sys.stdout) try: ret = DjangoTestSuiteRunner.setup_databases(self, *args, **kwargs) finally: sys.stdout = orig return ret
def setup(): global test_runner global old_config try: from django.test.simple import DjangoTestSuiteRunner as TestSuiteRunner except ImportError: # DjangoTestSuiteRunner was deprecated in django 1.8: # https://docs.djangoproject.com/en/1.8/internals/deprecation/#deprecation-removed-in-1-8 from django.test.runner import DiscoverRunner as TestSuiteRunner test_runner = TestSuiteRunner() test_runner.setup_test_environment() old_config = test_runner.setup_databases()
def test_setup_databases(self): """ Test that setup_databases() doesn't fail with dummy database backend. """ runner = DjangoTestSuiteRunner(verbosity=0) old_db_connections = db.connections try: db.connections = db.ConnectionHandler({}) old_config = runner.setup_databases() runner.teardown_databases(old_config) except Exception as e: self.fail("setup_databases/teardown_databases unexpectedly raised " "an error: %s" % e) finally: db.connections = old_db_connections
def setup_database(actual_server): ''' This will setup your database, sync it, and run migrations if you are using South. It does this before the Test Django server is set up. ''' logger.info("Setting up a test database...") # Uncomment if you are using South # patch_for_test_db_setup() world.test_runner = DjangoTestSuiteRunner(interactive=False) DjangoTestSuiteRunner.setup_test_environment(world.test_runner) world.created_db = DjangoTestSuiteRunner.setup_databases(world.test_runner) call_command('syncdb', interactive=False, verbosity=0)
class DjangoConfig(Plugin): configSection = 'django-runner' commandLineSwitch = (None, 'django-runner', 'Initialises the django test environment and re-orders the test suite') def startTestRun(self, event): """Nose2 hook for the beginning of test running. Init the django environ and re-order the tests according to the django documented test runner behaviour. """ try: # Django >= 1.6 from django.test.runner import reorder_suite except ImportError: # Django < 1.6 from django.test.simple import reorder_suite from django.test.simple import DjangoTestSuiteRunner from django.test.utils import setup_test_environment # Init the django default runner so we can call it's functions as needed self.dtsr = DjangoTestSuiteRunner() setup_test_environment() event.suite = reorder_suite(event.suite, (unittest.TestCase,)) self.old_config = self.dtsr.setup_databases() if self.session.verbosity > 1: # ensure that deprecation warnings are displayed during testing # the following state is assumed: # logging.capturewarnings is true # a "default" level warnings filter has been added for # DeprecationWarning. See django.conf.LazySettings._configure_logging self.logger = logging.getLogger('py.warnings') handler = logging.StreamHandler() self.logger.addHandler(handler) def afterTestRun(self, event): """Nose2 hook for the end of the test run""" from django.test.utils import teardown_test_environment if self.session.verbosity > 1: # remove the testing-specific handler handler = logging.StreamHandler() self.logger.removeHandler(handler) self.dtsr.teardown_databases(self.old_config) teardown_test_environment()
def setup(): """ Setup the environment for Django (create databases, turn on DEBUG, etc). :returns: teardown function """ # Use Django's test suite runner, as it sets up test databases nicely runner = DjangoTestSuiteRunner() runner.setup_test_environment() old_config = runner.setup_databases() def teardown(): runner.teardown_databases(old_config) runner.teardown_test_environment() return teardown
def setup(): try: global test_runner global old_config from django.test.simple import DjangoTestSuiteRunner test_runner = DjangoTestSuiteRunner() test_runner.setup_test_environment() old_config = test_runner.setup_databases() from django.utils.translation import activate activate('cs') except Exception as e: import traceback, pprint pprint.pprint(traceback.print_exc())
class Command(BaseCommand): option_list = tuple( opt for opt in DumpDataCommand.option_list if "--database" not in opt._long_opts and "--exclude" not in opt._long_opts ) args = "app_label.fixture" def handle(self, fixture, **options): available_fixtures = {} for app in settings.INSTALLED_APPS: try: fixture_gen = import_module(".fixture_gen", app) except ImportError: if module_has_submodule(import_module(app), "fixture_gen"): raise continue for obj in fixture_gen.__dict__.values(): if getattr(obj, "__fixture_gen__", False): available_fixtures[(app.rsplit(".", 1)[-1], obj.__name__)] = obj app_label, fixture_name = fixture.rsplit(".", 1) try: fixture = available_fixtures[(app_label, fixture_name)] except KeyError: available = ", ".join( "%s.%s" % (app_label, fixture_name) for app_label, fixture_name in available_fixtures ) raise CommandError("Fixture generator '%s' not found, available " "choices: %s" % (fixture, available)) requirements, models = linearize_requirements(available_fixtures, fixture) from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner(verbosity=0) self.old_config = self.test_runner.setup_databases() try: for fixture_func in requirements: fixture_func() for alias in connections._connections: call_command("dumpdata", *["%s.%s" % (m._meta.app_label, m._meta.object_name) for m in models], **dict(options, verbosity=0, database=alias) ) finally: self.test_runner.teardown_databases(self.old_config)
class SetupTestSuite(unittest.TestSuite): """ Test Suite configuring Django settings and using DjangoTestSuiteRunner as test runner. Also runs PEP8 and Coverage checks. """ def __init__(self, *args, **kwargs): self.configure() self.cov = coverage() self.cov.start() self.packages = self.resolve_packages() super(SetupTestSuite, self).__init__(tests=self.build_tests(), \ *args, **kwargs) # Setup testrunner. from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner( verbosity=1, interactive=True, failfast=False ) self.test_runner.setup_test_environment() self.old_config = self.test_runner.setup_databases() def build_tests(self): """ Build tests for inclusion in suite from resolved packages. """ from django.core.exceptions import ImproperlyConfigured from django.db.models import get_app from django.test.simple import build_suite tests = [] for package in self.packages: try: app_name = package.rsplit('.')[-1] app = get_app(app_name, emptyOK=True) tests.append(build_suite(app)) except ImproperlyConfigured, e: raise log.info("Warning: %s" % e) except ImportError, e: raise log.info("Warning: %s" % e)
def get_runner(config): runner = DjangoTestSuiteRunner(interactive=False) if config.option.no_db: def cursor_wrapper_exception(*args, **kwargs): raise RuntimeError('No database access is allowed since --no-db was used!') def setup_databases(): # Monkey patch CursorWrapper to warn against database usgae django.db.backends.util.CursorWrapper = cursor_wrapper_exception def teardown_databases(db_config): pass runner.setup_databases = setup_databases runner.teardown_databases = teardown_databases return runner
def handle(self, *args, **options): USE_SOUTH = getattr(settings, "SOUTH_TESTS_MIGRATE", True) try: if USE_SOUTH: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except: USE_SOUTH = False self._test_runner = DjangoTestSuiteRunner(interactive=False) DjangoTestSuiteRunner.setup_test_environment(self._test_runner) self._created_db = DjangoTestSuiteRunner.setup_databases( self._test_runner) call_command( 'syncdb', verbosity=0, interactive=False, ) if USE_SOUTH: call_command( 'migrate', verbosity=0, interactive=False, ) settings.DEBUG = options.get('debug', False) verbosity = int(options.get('verbosity', 4)) apps_to_run = tuple(options.get('apps', '').split(",")) apps_to_avoid = tuple(options.get('avoid_apps', '').split(",")) run_server = not options.get('no_server', False) tags = options.get('tags', None) server = Server(port=options['port']) paths = self.get_paths(args, apps_to_run, apps_to_avoid) if run_server: try: server.start() except LettuceServerException, e: raise SystemExit(e)
def _django_runner(request): if not is_configured(): return from django.test.simple import DjangoTestSuiteRunner import django if hasattr(django, 'setup'): django.setup() runner = DjangoTestSuiteRunner(interactive=False) runner.setup_test_environment() request.addfinalizer(runner.teardown_test_environment) config = runner.setup_databases() def teardown_database(): runner.teardown_databases(config) request.addfinalizer(teardown_database) return runner
def test_setup_aliased_default_database(self): """ Test that setup_datebases() doesn't fail when 'default' is aliased """ runner = DjangoTestSuiteRunner(verbosity=0) old_db_connections = db.connections try: db.connections = db.ConnectionHandler({ 'default': { 'NAME': 'dummy' }, 'aliased': { 'NAME': 'dummy' } }) old_config = runner.setup_databases() runner.teardown_databases(old_config) except Exception as e: self.fail("setup_databases/teardown_databases unexpectedly raised " "an error: %s" % e) finally: db.connections = old_db_connections
def setup_django(self): from django.conf import settings # If Django < 1.2 if self.legacy_django: from django.db import connection from django.test.utils import setup_test_environment # Setup Django test environment setup_test_environment() # Create Django test database self.old_database_name = settings.DATABASE_NAME connection.creation.create_test_db(self.verbosity, autoclobber=True) # If Django >= 1.2 else: from django.test.simple import DjangoTestSuiteRunner # Initialize Django tests runner runner = DjangoTestSuiteRunner(interactive=self.interactive, verbosity=self.verbosity) # New Django tests runner set ``DEBUG`` to False on setup test # environment, so we need to store real ``DEBUG`` value DEBUG = settings.DEBUG # Setup test environment runner.setup_test_environment() # And restore it to real value if needed if settings.DEBUG != DEBUG: settings.DEBUG = DEBUG # Setup test databases self.old_config = runner.setup_databases() self.runner = runner
class Command(BaseCommand): help = u'Run lettuce tests all along installed apps' args = '[PATH to feature file or folder]' requires_model_validation = False option_list = BaseCommand.option_list[1:] + ( make_option( '-v', '--verbosity', action='store', dest='verbosity', default='4', type='choice', choices=map(str, range(5)), help= 'Verbosity level; 0=no output, 1=only dots, 2=only scenario names, 3=colorless output, 4=normal output (colorful)' ), make_option( '-a', '--apps', action='store', dest='apps', default='', help= 'Run ONLY the django apps that are listed here. Comma separated'), make_option( '-A', '--avoid-apps', action='store', dest='avoid_apps', default='', help= 'AVOID running the django apps that are listed here. Comma separated' ), make_option('-S', '--no-server', action='store_true', dest='no_server', default=False, help="will not run django's builtin HTTP server"), make_option( '-T', '--test-server', action='store_true', dest='test_database', default=False, help= "will run django's builtin HTTP server using the test databases"), make_option('-P', '--port', type='int', dest='port', help="the port in which the HTTP server will run at"), make_option( '-d', '--debug-mode', action='store_true', dest='debug', default=False, help= "when put together with builtin HTTP server, forces django to run with settings.DEBUG=True" ), make_option('-s', '--scenarios', action='store', dest='scenarios', default=None, help='Comma separated list of scenarios to run'), make_option("-t", "--tag", dest="tags", type="str", action='append', default=None, help='Tells lettuce to run the specified tags only; ' 'can be used multiple times to define more tags' '(prefixing tags with "-" will exclude them and ' 'prefixing with "~" will match approximate words)'), make_option('--with-xunit', action='store_true', dest='enable_xunit', default=False, help='Output JUnit XML test results to a file'), make_option( '--xunit-file', action='store', dest='xunit_file', default=None, help='Write JUnit XML to this file. Defaults to lettucetests.xml'), make_option("--failfast", dest="failfast", default=False, action="store_true", help='Stop running in the first failure'), make_option("--pdb", dest="auto_pdb", default=False, action="store_true", help='Launches an interactive debugger upon error'), ) def stopserver(self, failed=False): raise SystemExit(int(failed)) def get_paths(self, args, apps_to_run, apps_to_avoid): if args: for path, exists in zip(args, map(os.path.exists, args)): if not exists: sys.stderr.write( "You passed the path '%s', but it does not exist.\n" % path) sys.exit(1) else: paths = args else: paths = harvest_lettuces( apps_to_run, apps_to_avoid) # list of tuples with (path, app_module) return paths def handle(self, *args, **options): setup_test_environment() settings.DEBUG = options.get('debug', False) verbosity = int(options.get('verbosity', 4)) apps_to_run = tuple(options.get('apps', '').split(",")) apps_to_avoid = tuple(options.get('avoid_apps', '').split(",")) run_server = not options.get('no_server', False) test_database = options.get('test_database', False) tags = options.get('tags', None) failfast = options.get('failfast', False) auto_pdb = options.get('auto_pdb', False) if test_database: migrate_south = getattr(settings, "SOUTH_TESTS_MIGRATE", True) try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except: migrate_south = False pass from django.test.simple import DjangoTestSuiteRunner self._testrunner = DjangoTestSuiteRunner() self._testrunner.setup_test_environment() self._old_db_config = self._testrunner.setup_databases() call_command( 'syncdb', verbosity=0, interactive=False, ) if migrate_south: call_command( 'migrate', verbosity=0, interactive=False, ) server = Server(port=options['port']) paths = self.get_paths(args, apps_to_run, apps_to_avoid) if run_server: try: server.start() except LettuceServerException, e: raise SystemExit(e) os.environ['SERVER_NAME'] = server.address os.environ['SERVER_PORT'] = str(server.port) failed = False registry.call_hook('before', 'harvest', locals()) results = [] try: for path in paths: app_module = None if isinstance(path, tuple) and len(path) is 2: path, app_module = path if app_module is not None: registry.call_hook('before_each', 'app', app_module) runner = Runner(path, options.get('scenarios'), verbosity, enable_xunit=options.get('enable_xunit'), xunit_filename=options.get('xunit_file'), tags=tags, failfast=failfast, auto_pdb=auto_pdb) result = runner.run() if app_module is not None: registry.call_hook('after_each', 'app', app_module, result) results.append(result) if not result or result.steps != result.steps_passed: failed = True except SystemExit, e: failed = e.code
class DjangoManager(object): """ A Django plugin for py.test that handles creating and destroying the test environment and test database. Similar to Django's TransactionTestCase, a transaction is started and rolled back for each test. Additionally, the settings are copied before each test and restored at the end of the test, so it is safe to modify settings within tests. """ def __init__(self, verbosity=0, noinput=False): self.verbosity = verbosity self.noinput = noinput self._old_database_name = None self._old_settings = [] self._old_urlconf = None self.suite_runner = None self.old_db_config = None self.testcase = None def pytest_sessionstart(self, session): #capture = py.io.StdCapture() # make sure the normal django syncdb command is run (do not run migrations for tests) # this is faster and less error prone management.get_commands() # load commands dict management._commands['syncdb'] = 'django.core' # make sure `south` migrations are disabled self.suite_runner = DjangoTestSuiteRunner() self.suite_runner.setup_test_environment() self.old_db_config = self.suite_runner.setup_databases() settings.DATABASE_SUPPORTS_TRANSACTIONS = True #unused_out, err = capture.reset() #srsys.stderr.write(err) def pytest_sessionfinish(self, session, exitstatus): capture = py.io.StdCapture() self.suite_runner.teardown_test_environment() self.suite_runner.teardown_databases(self.old_db_config) unused_out, err = capture.reset() sys.stderr.write(err) def pytest_itemstart(self, item): # This lets us control the order of the setup/teardown # Yuck. if _is_unittest(self._get_item_obj(item)): item.setup = lambda: None item.teardown = lambda: None def pytest_runtest_setup(self, item): # Set the URLs if the py.test.urls() decorator has been applied if hasattr(item.obj, 'urls'): self._old_urlconf = settings.ROOT_URLCONF settings.ROOT_URLCONF = item.obj.urls clear_url_caches() item_obj = self._get_item_obj(item) testcase = _get_testcase(item_obj) # We have to run these here since py.test's unittest plugin skips # __call__() testcase.client = Client() testcase._pre_setup() testcase.setUp() def pytest_runtest_teardown(self, item): item_obj = self._get_item_obj(item) testcase = _get_testcase(item_obj) testcase.tearDown() if not isinstance(item_obj, TestCase): testcase._post_teardown() if hasattr(item, 'urls') and self._old_urlconf is not None: settings.ROOT_URLCONF = self._old_urlconf self._old_urlconf = None def _get_item_obj(self, item): try: return item.obj.im_self except AttributeError: return None def pytest_namespace(self): """ Sets up the py.test.params decorator. """ def params(funcarglist): """ A decorator to make parametrised tests easy. Takes a list of dictionaries of keyword arguments for the function. A test is created for each dictionary. Example: @py.test.params([dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)]) def test_equals(a, b): assert a == b """ def wrapper(function): function.funcarglist = funcarglist return function return wrapper def load_fixture(fixture): """ Loads a fixture, useful for loading fixtures in funcargs. Example: def pytest_funcarg__articles(request): py.test.load_fixture('test_articles') return Article.objects.all() """ call_command('loaddata', fixture, **{ 'verbosity': self.verbosity + 1, 'commit': not settings.DATABASE_SUPPORTS_TRANSACTIONS }) def urls(urlconf): """ A decorator to change the URLconf for a particular test, similar to the `urls` attribute on Django's `TestCase`. Example: @py.test.urls('myapp.test_urls') def test_something(client): assert 'Success!' in client.get('/some_path/') """ def wrapper(function): function.urls = urlconf return wrapper return {'params': params, 'load_fixture': load_fixture, 'urls': urls} def pytest_generate_tests(self, metafunc): """ Generates parametrised tests if the py.test.params decorator has been used. """ for funcargs in getattr(metafunc.function, 'funcarglist', ()): metafunc.addcall(funcargs=funcargs)
class SetupTesting(TestSuite): """ Test Suite configuring Django settings and using DjangoTestSuiteRunner as test runner. Also runs PEP8 and Coverage checks. """ def __init__(self, *args, **kwargs): self.configure() self.coverage = coverage() self.coverage.start() self.packages = get_packages( path=BASEDIR, exclude_packages=exclude_packages) self.options = { 'failfast': '', 'autoreload': '', 'label': ['testing'], } super(SetupTesting, self).__init__(tests=self.build_tests(), *args, **kwargs) # Setup testrunner. from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner( verbosity=2, interactive=False, failfast=True ) # South patches the test management command to handle the # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed. try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except ImportError: pass self.test_runner.setup_test_environment() self.old_config = self.test_runner.setup_databases() def flake8_report(self): """ Outputs flake8 report. """ log.info("\n\nFlake8 Report:") base = get_path([BASEDIR, 'tribus']) pys = find_files(path=base, pattern='*.py') flake8_style = get_style_guide() report = flake8_style.check_files(pys) exit_code = print_report(report, flake8_style) def pep257_report(self): """ Outputs flake8 report. """ log.info("\n\nPEP257 Report:") base = get_path([BASEDIR, 'tribus']) pys = find_files(path=base, pattern='*.py') report = pep257.check_files(pys) if len(report) > 0: for r in report: log.info(r) else: log.info("\nNo errors found!") def coverage_report(self): """ Outputs Coverage report to screen and coverage.xml. """ include = ['%s*' % package for package in self.packages] omit = ['*testing*'] log.info("\n\nCoverage Report:") try: self.coverage.stop() self.coverage.report(include=include, omit=omit) except CoverageException as e: log.info("Coverage Exception: %s" % e) if os.environ.get('TRAVIS'): log.info("Submitting coverage to coveralls.io...") try: result = Coveralls() result.wear() except CoverallsException as e: log.error("Coveralls Exception: %s" % e) def build_tests(self): """ Build tests for inclusion in suite from resolved packages. TODO: Cleanup/simplify this method, flow too complex, too much duplication. """ from django.db.models import get_app from django.test.simple import build_suite tests = [] app = get_app(self.options['label'][0]) tests.append(build_suite(app)) return tests def configure(self): """ Configures Django settings. """ from django.conf import settings from django.utils.importlib import import_module try: test_settings = import_module('tribus.config.testing') except ImportError as e: log.info('ImportError: Unable to import test settings: %s' % e) sys.exit(1) setting_attrs = {} for attr in dir(test_settings): if '__' not in attr: setting_attrs[attr] = getattr(test_settings, attr) if not settings.configured: settings.configure(**setting_attrs) def run(self, result, *args, **kwargs): """ Run the test, teardown the environment and generate reports. """ result.failfast = self.options['failfast'] result = super(SetupTesting, self).run(result, *args, **kwargs) self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() #self.coverage_report() #self.flake8_report() #self.pep257_report() return result
class SetupTestSuite(unittest.TestSuite): """ Test Suite configuring Django settings and using DjangoTestSuiteRunner as test runner. Also runs PEP8 and Coverage checks. """ def __init__(self, *args, **kwargs): self.configure() self.cov = coverage() self.cov.start() self.packages = self.resolve_packages() parser = argparse.ArgumentParser() parser.add_argument('-a', '--autoreload', dest='autoreload', action='store_const', const=True, default=False,) parser.add_argument('-f', '--failfast', dest='failfast', action='store_const', const=True, default=False,) parser.add_argument('-l', '--label', dest='label') self.options = vars(parser.parse_args(sys.argv[2:])) sys.argv = sys.argv[:2] super(SetupTestSuite, self).__init__(tests=self.build_tests(), *args, **kwargs) # Setup testrunner. from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner( verbosity=1, interactive=True, failfast=False ) # South patches the test management command to handle the # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed. try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except ImportError: pass self.test_runner.setup_test_environment() self.old_config = self.test_runner.setup_databases() def handle_label_exception(self, exception): """ Check whether or not the exception was caused due to a bad label being provided. If so raise LabelException which will cause an exit, otherwise continue. The check looks for particular error messages, which obviously sucks. TODO: Implement a cleaner test. """ markers = [ 'no such test method', 'should be of the form app.TestCase or app.TestCase.test_method', 'App with label', 'does not refer to a test', ] if any(marker in exception.message for marker in markers): log.info(exception) raise LabelException(exception) else: raise exception def build_tests(self): """ Build tests for inclusion in suite from resolved packages. TODO: Cleanup/simplify this method, flow too complex, too much duplication. """ from django.core.exceptions import ImproperlyConfigured from django.db.models import get_app from django.test.simple import build_suite, build_test tests = [] packages = [self.options['label'], ] if \ self.options['label'] else self.packages for package in packages: try: if not self.options['autoreload']: if self.options['label']: try: tests.append(build_test(package)) except (ImproperlyConfigured, ValueError) as e: self.handle_label_exception(e) else: app = get_app(package) tests.append(build_suite(app)) else: # Wait for exceptions to be resolved. exception = None while True: try: if self.options['label']: try: tests.append(build_test(package)) except (ImproperlyConfigured, ValueError) as e: self.handle_label_exception(e) else: app = get_app(package) tests.append(build_suite(app)) break except LabelException: raise except Exception as e: if exception != str(e): traceback.print_exc() exception = str(e) time.sleep(1) except ImproperlyConfigured as e: log.info("Warning: %s" % e) except ImportError as e: log.info("Warning: %s" % e) return tests def configure(self): """ Configures Django settings. """ from django.conf import settings from django.utils.importlib import import_module try: test_settings = import_module('test_settings') except ImportError as e: log.info('ImportError: Unable to import test settings: %s' % e) sys.exit(1) setting_attrs = {} for attr in dir(test_settings): if '__' not in attr: setting_attrs[attr] = getattr(test_settings, attr) if not settings.configured: settings.configure(**setting_attrs) def coverage_report(self): """ Outputs Coverage report to screen and coverage.xml. """ verbose = '--quiet' not in sys.argv self.cov.stop() if verbose: log.info("\nCoverage Report:") try: include = ['%s*' % package for package in self.packages] omit = ['*tests*'] self.cov.report(include=include, omit=omit) self.cov.xml_report(include=include, omit=omit) except misc.CoverageException as e: log.info("Coverage Exception: %s" % e) def resolve_packages(self): """ Frame hack to determine packages contained in module for testing. We ignore submodules (those containing '.') """ f = sys._getframe() while f: if 'self' in f.f_locals: locals_self = f.f_locals['self'] py_modules = getattr(locals_self, 'py_modules', None) packages = getattr(locals_self, 'packages', None) top_packages = [] if py_modules or packages: if py_modules: for module in py_modules: if '.' not in module: top_packages.append(module) if packages: for package in packages: if '.' not in package: top_packages.append(package) return list(set(top_packages)) f = f.f_back def pep8_report(self): """ Outputs PEP8 report to screen and pep8.txt. """ verbose = '--quiet' not in sys.argv if verbose: # Hook into stdout. old_stdout = sys.stdout sys.stdout = mystdout = StringIO() # Run Pep8 checks, excluding South migrations. pep8_style = pep8.StyleGuide() pep8_style.options.exclude.append('migrations') pep8_style.check_files(self.packages) # Restore stdout. sys.stdout = old_stdout # Save result to pep8.txt. result = mystdout.getvalue() output = open('pep8.txt', 'w') output.write(result) output.close() # Return Pep8 result if result: log.info("\nPEP8 Report:") log.info(result) def run(self, result, *args, **kwargs): """ Run the test, teardown the environment and generate reports. """ result.failfast = self.options['failfast'] result = super(SetupTestSuite, self).run(result, *args, **kwargs) self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() self.coverage_report() self.pep8_report() return result
class SetupTestSuite(unittest.TestSuite): """ Test Suite configuring Django settings and using DiscoverRunner or DjangoTestSuiteRunner as the test runner. Also runs PEP8 and Coverage checks. """ def __init__(self, *args, **kwargs): self.cov = coverage() self.cov.start() self.configure() self.packages = self.resolve_packages() parser = argparse.ArgumentParser() parser.add_argument('-a', '--autoreload', dest='autoreload', action='store_const', const=True, default=False,) parser.add_argument('-f', '--failfast', dest='failfast', action='store_const', const=True, default=False,) parser.add_argument('-l', '--label', dest='label') self.options = vars(parser.parse_args(sys.argv[2:])) sys.argv = sys.argv[:2] runner_options = { 'verbosity': 1, 'interactive': True, 'failfast': False, } if django.VERSION >= (1, 8): from django.test.runner import DiscoverRunner self.test_runner = DiscoverRunner(**runner_options) tests = self.test_runner.build_suite() else: from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner(**runner_options) tests = self.build_tests() super(SetupTestSuite, self).__init__(tests=tests, *args, **kwargs) # South patches the test management command to handle the # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed. if django.VERSION < (1,7): try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except ImportError: pass self.test_runner.setup_test_environment() self.old_config = self.test_runner.setup_databases() def handle_label_exception(self, exception): """ Check whether or not the exception was caused due to a bad label being provided. If so raise LabelException which will cause an exit, otherwise continue. The check looks for particular error messages, which obviously sucks. TODO: Implement a cleaner test. """ markers = [ 'no such test method', 'should be of the form app.TestCase or app.TestCase.test_method', 'App with label', 'does not refer to a test', ] if any(marker in exception.message for marker in markers): log.info(exception) raise LabelException(exception) else: raise exception def build_tests(self): """ Build tests for inclusion in suite from resolved packages for <= 1.8 TODO: Cleanup/simplify this method, flow too complex, too much duplication. """ from django.core.exceptions import ImproperlyConfigured from django.test.simple import build_suite, build_test try: from django.apps import apps get_app = apps.get_app_config except ImportError: from django.db.models import get_app tests = [] packages = [self.options['label'], ] if \ self.options['label'] else self.packages for package in packages: try: if not self.options['autoreload']: if self.options['label']: try: tests.append(build_test(package)) except (ImproperlyConfigured, ValueError) as e: self.handle_label_exception(e) else: app = get_app(package) tests.append(build_suite(app)) else: # Wait for exceptions to be resolved. exception = None while True: try: if self.options['label']: try: tests.append(build_test(package)) except (ImproperlyConfigured, ValueError) as e: self.handle_label_exception(e) else: app = get_app(package) tests.append(build_suite(app)) break except LabelException: raise except Exception as e: if exception != str(e): traceback.print_exc() exception = str(e) time.sleep(1) except ImproperlyConfigured as e: log.info("Warning: %s" % traceback.format_exc()) except ImportError as e: log.info("Warning: %s" % traceback.format_exc()) return tests def configure(self): """ Configures Django settings. """ import django from django.conf import settings try: from django.utils.importlib import import_module except ImportError: from importlib import import_module try: test_settings = import_module('test_settings') except ImportError as e: log.info('ImportError: Unable to import test settings: %s' % e) sys.exit(1) setting_attrs = {} for attr in dir(test_settings): if '__' not in attr: setting_attrs[attr] = getattr(test_settings, attr) if not settings.configured: settings.configure(**setting_attrs) if hasattr(django, 'setup'): django.setup() def coverage_report(self): """ Outputs Coverage report to screen and coverage.xml. """ verbose = '--quiet' not in sys.argv self.cov.stop() if verbose: log.info("\nCoverage Report:") try: include = ['%s*' % package for package in self.packages] omit = ['*tests*'] self.cov.report(include=include, omit=omit) self.cov.save() self.cov.xml_report(include=include, omit=omit) except misc.CoverageException as e: log.info("Coverage Exception: %s" % e) def resolve_packages(self): """ Frame hack to determine packages contained in module for testing. We ignore submodules (those containing '.') """ f = sys._getframe() while f: if 'self' in f.f_locals: locals_self = f.f_locals['self'] py_modules = getattr(locals_self, 'py_modules', None) packages = getattr(locals_self, 'packages', None) top_packages = [] if py_modules or packages: if py_modules: for module in py_modules: if '.' not in module: top_packages.append(module) if packages: for package in packages: if '.' not in package: top_packages.append(package) return list(set(top_packages)) f = f.f_back def pep8_report(self): """ Outputs PEP8 report to screen and pep8.txt. """ verbose = '--quiet' not in sys.argv if verbose: # Hook into stdout. old_stdout = sys.stdout sys.stdout = mystdout = StringIO() # Run Pep8 checks, excluding South migrations. pep8_style = pep8.StyleGuide() pep8_style.options.exclude.append('migrations') pep8_style.options.exclude.append('south_migrations') pep8_style.check_files(self.packages) # Restore stdout. sys.stdout = old_stdout # Save result to pep8.txt. result = mystdout.getvalue() output = open('pep8.txt', 'w') output.write(result) output.close() # Return Pep8 result if result: log.info("\nPEP8 Report:") log.info(result) def run(self, result, *args, **kwargs): """ Run the test, teardown the environment and generate reports. """ result.failfast = self.options['failfast'] result = super(SetupTestSuite, self).run(result, *args, **kwargs) self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() self.coverage_report() self.pep8_report() return result
class Command(BaseCommand): help = u'Run lettuce tests all along installed apps' args = '[PATH to feature file or folder]' requires_model_validation = False option_list = BaseCommand.option_list[1:] + ( make_option('-v', '--verbosity', action='store', dest='verbosity', default='4', type='choice', choices=map(str, range(5)), help='Verbosity level; 0=no output, 1=only dots, 2=only scenario names, 3=colorless output, 4=normal output (colorful)'), make_option('-a', '--apps', action='store', dest='apps', default='', help='Run ONLY the django apps that are listed here. Comma separated'), make_option('-A', '--avoid-apps', action='store', dest='avoid_apps', default='', help='AVOID running the django apps that are listed here. Comma separated'), make_option('-S', '--no-server', action='store_true', dest='no_server', default=False, help="will not run django's builtin HTTP server"), make_option('-T', '--test-server', action='store_true', dest='test_database', default=False, help="will run django's builtin HTTP server using the test databases"), make_option('-P', '--port', type='int', dest='port', help="the port in which the HTTP server will run at"), make_option('-d', '--debug-mode', action='store_true', dest='debug', default=False, help="when put together with builtin HTTP server, forces django to run with settings.DEBUG=True"), make_option('-s', '--scenarios', action='store', dest='scenarios', default=None, help='Comma separated list of scenarios to run'), make_option("-t", "--tag", dest="tags", type="str", action='append', default=None, help='Tells lettuce to run the specified tags only; ' 'can be used multiple times to define more tags' '(prefixing tags with "-" will exclude them and ' 'prefixing with "~" will match approximate words)'), make_option('--with-xunit', action='store_true', dest='enable_xunit', default=False, help='Output JUnit XML test results to a file'), make_option('--xunit-file', action='store', dest='xunit_file', default=None, help='Write JUnit XML to this file. Defaults to lettucetests.xml'), make_option("--failfast", dest="failfast", default=False, action="store_true", help='Stop running in the first failure'), make_option("--pdb", dest="auto_pdb", default=False, action="store_true", help='Launches an interactive debugger upon error'), ) def stopserver(self, failed=False): raise SystemExit(int(failed)) def get_paths(self, args, apps_to_run, apps_to_avoid): if args: for path, exists in zip(args, map(os.path.exists, args)): if not exists: sys.stderr.write("You passed the path '%s', but it does not exist.\n" % path) sys.exit(1) else: paths = args else: paths = harvest_lettuces(apps_to_run, apps_to_avoid) # list of tuples with (path, app_module) return paths def handle(self, *args, **options): setup_test_environment() verbosity = int(options.get('verbosity', 4)) apps_to_run = tuple(options.get('apps', '').split(",")) apps_to_avoid = tuple(options.get('avoid_apps', '').split(",")) run_server = not options.get('no_server', False) test_database = options.get('test_database', False) tags = options.get('tags', None) failfast = options.get('failfast', False) auto_pdb = options.get('auto_pdb', False) if test_database: migrate_south = getattr(settings, "SOUTH_TESTS_MIGRATE", True) try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except: migrate_south = False pass from django.test.simple import DjangoTestSuiteRunner self._testrunner = DjangoTestSuiteRunner() self._testrunner.setup_test_environment() self._old_db_config = self._testrunner.setup_databases() call_command('syncdb', verbosity=0, interactive=False,) if migrate_south: call_command('migrate', verbosity=0, interactive=False,) settings.DEBUG = options.get('debug', False) server = Server(port=options['port']) paths = self.get_paths(args, apps_to_run, apps_to_avoid) if run_server: try: server.start() except LettuceServerException, e: raise SystemExit(e) os.environ['SERVER_NAME'] = str(server.address) os.environ['SERVER_PORT'] = str(server.port) failed = False registry.call_hook('before', 'harvest', locals()) results = [] try: for path in paths: app_module = None if isinstance(path, tuple) and len(path) is 2: path, app_module = path if app_module is not None: registry.call_hook('before_each', 'app', app_module) runner = Runner(path, options.get('scenarios'), verbosity, enable_xunit=options.get('enable_xunit'), xunit_filename=options.get('xunit_file'), tags=tags, failfast=failfast, auto_pdb=auto_pdb) result = runner.run() if app_module is not None: registry.call_hook('after_each', 'app', app_module, result) results.append(result) if not result or result.steps != result.steps_passed: failed = True except SystemExit, e: failed = e.code
class Benchmark(GenericBenchmark): def __init__(self, *args, **kwargs): global options options = LazyStruct(**kwargs) self.test_runner = DjangoTestSuiteRunner() self.prepare() def prepare_oss_list(self): return [oss for oss in [OssGenerator(idx=idx, fs=self.fs_entity) for idx in range(0, options.oss)]] def prepare_mds_list(self): return [MdsGenerator(fs=self.fs_entity)] def step_stats(self): """Generate stats for all servers in a single step""" update_servers = [] for server in self.server_list(): stats = {'node': {}, 'lustre': {'target': {}}} for node_stat in server.stats.keys(): stats['node'][node_stat] = server.stats[node_stat] # make this match up with what comes in from an update scan stats['lustre']['lnet'] = stats['node']['lnet'] for target in server.target_list: stats['lustre']['target'][target.name] = {} for target_stat in target.stats.keys(): stats['lustre']['target'][target.name][target_stat] = target.stats[target_stat] update_servers.append([server.entity, stats]) return update_servers def precreate_stats(self): self.stats_list = [] steps = range(0, options.duration, options.frequency) for idx, v in enumerate(steps): sys.stderr.write("\rPrecreating stats... (%d/%d)" % (idx, len(steps))) self.stats_list.append(self.step_stats()) sys.stderr.write("\rPrecreating stats... Done. \n") def prepare(self): from south.management.commands import patch_for_test_db_setup self.test_runner.setup_test_environment() # This is necessary to ensure that we use django.core.syncdb() # instead of south's hacked syncdb() patch_for_test_db_setup() self.old_db_config = self.test_runner.setup_databases() mgs_host = ManagedHost.objects.create( address="mgs", fqdn="mgs", nodename="mgs") mgs_vol = Volume.objects.create(label="mgs") VolumeNode.objects.create(host = mgs_host, path = uuid.uuid4(), primary = True, use = True, volume = mgs_vol) self.mgs, mounts = ManagedMgs.create_for_volume(mgs_vol.pk, name="MGS") self.fs_entity = ManagedFilesystem.objects.create(name=options.fsname, mgs=self.mgs) self.oss_list = self.prepare_oss_list() self.mds_list = self.prepare_mds_list() if not options.no_precreate: self.precreate_stats() def get_stats_size(self): stats_size = LazyStruct() from django.db import connection cursor = connection.cursor() if 'postgres' in connection.settings_dict['ENGINE']: stats_size.row_count = stats_size.data = stats_size.index = 0 for model in Stats: cursor.execute("select count(id) as rows, pg_relation_size('{0}') as data_length, pg_total_relation_size('{0}') - pg_relation_size('{0}') as index_length from {0}".format(model._meta.db_table)) rows, data, index = cursor.fetchone() stats_size.row_count += rows stats_size.data += data stats_size.index += index else: raise RuntimeError("Unsupported DB: %s" % connection.settings_dict['ENGINE']) return stats_size def server_list(self): return self.mds_list + self.oss_list def store_metrics(self, scan): return scan.store_metrics() def run(self): def t2s(t): return time.strftime("%H:%M:%S", time.localtime(t)) def s2s(s): if s > 600: from datetime import timedelta, datetime d = timedelta(seconds=int(s)) + datetime(1, 1, 1) return "%.2d:%.2d:%.2d:%.2d" % (d.day - 1, d.hour, d.minute, d.second) else: return "%d" % s stats_size_start = self.get_stats_size() scan = UpdateScan() run_start = time.time() run_count = 0 create_interval = 0 create_count = 0 start_la = os.getloadavg() last_width = 0 print "window start: %s, window stop: %s" % (t2s(run_start), t2s(run_start + options.duration)) update_times = range(int(run_start), int(run_start + options.duration), options.frequency) for stats_idx, update_time in enumerate(update_times): new_timing_line = "\r%s" % t2s(update_time) sys.stderr.write(new_timing_line) store_start = time.time() count = 0 if options.no_precreate: step_stats_list = self.step_stats() else: step_stats_list = self.stats_list[stats_idx] server_stats_count = 0 for step_stats in step_stats_list: scan.host = step_stats[0] scan.host_data = {'metrics': {'raw': step_stats[1]}} scan.update_time = update_time count += self.store_metrics(scan) # Since we've hard-coded the server stats, we need to record # the actual number to make the reporting accurate. if options.server_stats == 0: for key in ['meminfo', 'lnet', 'cpustats']: server_stats_count += len(step_stats[1]['node'][key]) # Terrible hack to make reporting accurate. if options.server_stats == 0: options.server_stats = server_stats_count run_count += count store_end = time.time() interval = store_end - store_start rate = count / interval meter = "+" if interval < options.frequency else "-" seconds_left = (len(update_times) - stats_idx) * interval timing_stats = ": inserted %d stats (rate: %lf stats/sec, complete in: %s) %s" % (count, rate, s2s(seconds_left), meter) current_line_width = len(new_timing_line + timing_stats) if current_line_width < last_width: sys.stderr.write(new_timing_line + timing_stats + " " * (last_width - current_line_width)) else: sys.stderr.write(timing_stats) last_width = current_line_width if not options.include_create and update_time == int(run_start): create_interval = interval create_count = count run_end = time.time() end_la = os.getloadavg() stats_size_end = self.get_stats_size() run_info = LazyStruct() run_info.step_count = options.duration / options.frequency run_info.run_count = run_count run_info.run_interval = run_end - run_start - create_interval run_info.run_rate = (run_count - create_count) / run_info.run_interval run_info.create_interval = create_interval run_info.create_count = create_count run_info.start_load_avg = start_la run_info.end_load_avg = end_la run_info.stats_data_used = stats_size_end.data - stats_size_start.data run_info.stats_index_used = stats_size_end.index - stats_size_start.index run_info.stats_rows_used = stats_size_end.row_count - stats_size_start.row_count self.print_report(run_info) def profile_system(self): def _read_lines(filename): fh = open(filename) try: return [line.rstrip("\n") for line in fh.readlines()] finally: fh.close() def _cpu_info(): count = 0 speed = 0 for line in _read_lines("/proc/cpuinfo"): if 'processor' in line: count += 1 continue if 'cpu MHz' in line: speed = float(line.split()[3]) continue return {'count': count, 'speed': speed} def _mem_info(): mem_info = {} for line in _read_lines("/proc/meminfo"): for query in ["MemTotal", "MemFree", "SwapTotal", "SwapFree"]: if query in line: mem_info[query] = float(line.split()[1]) break mem_info['pct_mem_used'] = ((mem_info['MemTotal'] - mem_info['MemFree']) / mem_info['MemTotal']) * 100 try: mem_info['pct_swap_used'] = ((mem_info['SwapTotal'] - mem_info['SwapFree']) / mem_info['SwapTotal']) * 100 except ZeroDivisionError: mem_info['pct_swap_used'] = 0.0 return mem_info profile = LazyStruct() cpu_info = _cpu_info() profile.cpu_count = cpu_info['count'] profile.cpu_speed = cpu_info['speed'] mem_info = _mem_info() profile.mem_total = mem_info['MemTotal'] profile.mem_pct_used = mem_info['pct_mem_used'] profile.swap_total = mem_info['SwapTotal'] profile.swap_pct_used = mem_info['pct_swap_used'] return profile # TODO: Customizable output formats (csv, tsv, etc.) def print_report(self, run_info): print "\n" try: profile = self.profile_system() print "CPUs: %d @ %.2f GHz, Mem: %d MB real (%.2f%% used) / %d MB swap (%.2f%% used)" % (profile.cpu_count, (profile.cpu_speed / 1000), (profile.mem_total / 1000), profile.mem_pct_used, (profile.swap_total / 1000), profile.swap_pct_used) except IOError: print "No system profile available (on a mac?)" print "Load averages (1/5/15): start: %.2f/%.2f/%.2f, end: %.2f/%.2f/%.2f" % (run_info.start_load_avg + run_info.end_load_avg) print "counts: OSS: %d, OSTs/OSS: %d (%d total); stats-per: OSS: %d, MDS: %d" % (options.oss, options.ost, (options.oss * options.ost), ((options.ost * options.ost_stats) + options.server_stats), (options.mdt_stats + options.server_stats)) print "run count (%d stats) / run time (%.2f sec) = run rate (%.2f stats/sec)" % (run_info.run_count, run_info.run_interval, run_info.run_rate) print "%d steps, %d stats/step, duration %d" % (run_info.step_count, run_info.run_count / run_info.step_count, options.duration) def _to_mb(in_bytes): return in_bytes * 1.0 / (1024 * 1024) stats_total_used = run_info.stats_data_used + run_info.stats_index_used print "stats rows: %d, space used: %.2f MB (%.2f MB data, %.2f MB index)" % (run_info.stats_rows_used, _to_mb(stats_total_used), _to_mb(run_info.stats_data_used), _to_mb(run_info.stats_index_used)) def cleanup(self): self.test_runner.teardown_databases(self.old_db_config) self.test_runner.teardown_test_environment()
class PinDbTestCase(TransactionTestCase): multi_db = True def _pre_setup(self): """Munge DB infrastructure before the superclass gets a chance to set up the DBs.""" # clear all module state pindb._init_state() # patch up the db system to use the effective router settings (see override_settings) # we can't just reconstruct objects here because # lots of places do from foo import baz, so they have # a local reference to an object we can't replace. # so reach in and (gulp) mash the object's state. if dj_VERSION < (1, 4): for conn in dj_db.connections._connections.values(): conn.close() dj_db.connections._connections = {} else: for conn in dj_db.connections.all(): conn.close() dj_db.connections._connections = local() dj_db.connections.databases = settings.DATABASES def make_router(import_path): module_path, class_name = import_path.rsplit('.', 1) mod = importlib.import_module(module_path) return getattr(mod, class_name)() dj_db.router.routers = [ make_router(import_path) for import_path in settings.DATABASE_ROUTERS] dj_db.connection = dj_db.connections[dj_db.DEFAULT_DB_ALIAS] dj_db.backend = dj_db.load_backend(dj_db.connection.settings_dict['ENGINE']) self.shim_runner = DjangoTestSuiteRunner() self.setup_databases() super(PinDbTestCase, self)._pre_setup() def _post_teardown(self): """Delete the databases after the superclass' method has closed the connections. We must do the DB deletion in post-teardown, because that's when the superclass closes its connection, which inadvertantly re-creates the sqlite file if we had previously tried to delete it (like in a plain old tearDown method). """ super(PinDbTestCase, self)._post_teardown() pindb.unpin_all() self.teardown_databases(self.old_config) def setup_databases(self, **kwargs): self.old_config = self.shim_runner.setup_databases(**kwargs) def teardown_databases(self, old_config, **kwargs): self.shim_runner.teardown_databases(self.old_config) def _get_response_cookie(self, url): response = self.client.post(url) self.assertTrue(middleware.PINNING_COOKIE in response.cookies) return sorted( anyjson.loads(response.cookies[middleware.PINNING_COOKIE].value) )