Example #1
0
def setup():
    global test_runner
    global old_config
    from django.test.simple import DjangoTestSuiteRunner
    test_runner = DjangoTestSuiteRunner()
    test_runner.setup_test_environment()
    old_config = test_runner.setup_databases()
Example #2
0
def _django_runner(request):
    """Create the django runner, internal to pytest-django

    This does important things like setting up the local memory email
    backend etc.

    XXX It is a little dodgy that this is an autouse fixture.  Perhaps
        an email fixture should be requested in order to be able to
        use the Django email machinery just like you need to request a
        db fixture for access to the Django database, etc.  But
        without duplicating a lot more of Django's test support code
        we need to follow this model.
    """
    if django_settings_is_configured():
        import django
        # Django >= 1.7: Call django.setup() to initialize Django
        setup = getattr(django, 'setup', lambda: None)
        setup()

        from django.test.simple import DjangoTestSuiteRunner

        runner = DjangoTestSuiteRunner(interactive=False)
        runner.setup_test_environment()
        request.addfinalizer(runner.teardown_test_environment)
        return runner
class DBTestCase(TestCase, AssertQueriesCountMixin):
    test_runner, old_config = None, None

    def setUp(self):
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner()
        self.before_environment_setup()                       # HOOK
        self.test_runner.setup_test_environment()
        self.before_database_setup()                          # HOOK
        self.old_config = self.test_runner.setup_databases()
        self.setup()

    def tearDown(self):
        self.test_runner.teardown_databases(self.old_config)
        self.test_runner.teardown_test_environment()
        self.teardown()  # just to make it look like pep8

    def before_environment_setup(self):
        pass

    def before_database_setup(self):
        pass

    def setup(self):
        pass

    def teardown(self):
        pass
Example #4
0
class DBTestCase(TestCase, AssertQueriesCountMixin):
    test_runner, old_config = None, None

    def setUp(self):
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner()
        self.before_environment_setup()  # HOOK
        self.test_runner.setup_test_environment()
        self.before_database_setup()  # HOOK
        self.old_config = self.test_runner.setup_databases()
        self.setup()

    def tearDown(self):
        self.test_runner.teardown_databases(self.old_config)
        self.test_runner.teardown_test_environment()
        self.teardown()  # just to make it look like pep8

    def before_environment_setup(self):
        pass

    def before_database_setup(self):
        pass

    def setup(self):
        pass

    def teardown(self):
        pass
Example #5
0
    def run(self):
        catchbreak = self.unit_test.pop("catchbreak")
        if catchbreak:
            from unittest.signals import installHandler
            installHandler()

        # Before use the output, we save the original for reset in the end
        org_out = sys.stdout
        org_err = sys.stderr
        try:
            test_runner = TestRunner(**self.__dict__)
            if test_runner.exit_request is None:
                if self.is_django:
                    from django.test.simple import DjangoTestSuiteRunner
                    dtsr = DjangoTestSuiteRunner(**self.unit_test)
                    dtsr.setup_test_environment()
                    old_config = dtsr.setup_databases()
                    test_runner.run(self.test_list)
                    dtsr.teardown_databases(old_config)
                    dtsr.teardown_test_environment()
                else:
                    test_runner.run(self.test_list)
        except Exception:
            print_exception()
        finally:
            # Return the output
            sys.stdout = org_out
            sys.stderr = org_err
            if isinstance(test_runner.exit_request, int):
                sys.exit(test_runner.exit_request)
Example #6
0
def _django_runner(request):
    """Create the django runner, internal to pytest-django

    This does important things like setting up the local memory email
    backend etc.

    XXX It is a little dodgy that this is an autouse fixture.  Perhaps
        an email fixture should be requested in order to be able to
        use the Django email machinery just like you need to request a
        db fixture for access to the Django database, etc.  But
        without duplicating a lot more of Django's test support code
        we need to follow this model.
    """
    if django_settings_is_configured():
        import django
        # Django >= 1.7: Call django.setup() to initialize Django
        setup = getattr(django, 'setup', lambda: None)
        setup()

        from django.test.simple import DjangoTestSuiteRunner

        runner = DjangoTestSuiteRunner(interactive=False)
        runner.setup_test_environment()
        request.addfinalizer(runner.teardown_test_environment)
        return runner
Example #7
0
    def handle(self, *args, **options):
        USE_SOUTH = getattr(settings, "SOUTH_TESTS_MIGRATE", True)
        try:
            if USE_SOUTH:
                from south.management.commands import patch_for_test_db_setup
                patch_for_test_db_setup()
        except:
            USE_SOUTH = False

        self._test_runner = DjangoTestSuiteRunner(interactive=False)
        DjangoTestSuiteRunner.setup_test_environment(self._test_runner)
        self._created_db = DjangoTestSuiteRunner.setup_databases(self._test_runner)
        call_command('syncdb', verbosity=0, interactive=False,)

        if USE_SOUTH:
            call_command('migrate', verbosity=0, interactive=False,)

        settings.DEBUG = options.get('debug', False)

        verbosity = int(options.get('verbosity', 4))
        apps_to_run = tuple(options.get('apps', '').split(","))
        apps_to_avoid = tuple(options.get('avoid_apps', '').split(","))
        run_server = not options.get('no_server', False)
        tags = options.get('tags', None)
        server = Server(port=options['port'])

        paths = self.get_paths(args, apps_to_run, apps_to_avoid)
        if run_server:
            try:
                server.start()
            except LettuceServerException, e:
                raise SystemExit(e)
Example #8
0
def setup():
    global test_runner
    global old_config
    from django.test.simple import DjangoTestSuiteRunner
    test_runner = DjangoTestSuiteRunner()
    test_runner.setup_test_environment()
    old_config = test_runner.setup_databases()
Example #9
0
class LemonwiseReviewsTestSetup(Plugin):
    """
    Nose plugin that sets up the environment needed to test
    """
    enabled = True
    name = 'testsetup-lemonwise'

    old_config = None
    runner = None

    def __init__(self, *args, **kwargs):
        # Import client here to patch Django's own version
        from lemonwise.utils.testharness import client

        super(LemonwiseReviewsTestSetup, self).__init__(*args, **kwargs)

    def options(self, parser, env):
        super(LemonwiseReviewsTestSetup, self).options(parser, env)

    def begin(self):
        from django.test.simple import DjangoTestSuiteRunner

        setup_environ(settings)

        self.runner = DjangoTestSuiteRunner()
        self.runner.setup_test_environment()
        self.old_config = self.runner.setup_databases()

    def finalize(self, result):
        self.runner.teardown_databases(self.old_config)
        self.runner.teardown_test_environment()
Example #10
0
    def setup_django(self):
        from django.conf import settings

        # If Django < 1.2
        if self.legacy_django:
            from django.db import connection
            from django.test.utils import setup_test_environment

            # Setup Django test environment
            setup_test_environment()

            # Create Django test database
            self.old_database_name = settings.DATABASE_NAME
            connection.creation.create_test_db(self.verbosity,
                                               autoclobber=True)
        # If Django >= 1.2
        else:
            from django.test.simple import DjangoTestSuiteRunner

            # Initialize Django tests runner
            runner = DjangoTestSuiteRunner(interactive=self.interactive,
                                           verbosity=self.verbosity)

            # Setup test environment
            runner.setup_test_environment()

            # Setup test databases
            self.old_config = runner.setup_databases()
            self.runner = runner
Example #11
0
def setup():
    global test_runner
    global old_config
    from django.test.simple import DjangoTestSuiteRunner
    from ella.utils.installedapps import call_modules
    test_runner = DjangoTestSuiteRunner()
    test_runner.setup_test_environment()
    old_config = test_runner.setup_databases()
    call_modules(('register', ))
Example #12
0
def setup_database(actual_server):
    world.test_runner = DjangoTestSuiteRunner(interactive=False)
    DjangoTestSuiteRunner.setup_test_environment(world.test_runner)
    settings.DEBUG = True
    #world.created_db = DjangoTestSuiteRunner.setup_databases(world.test_runner)
    call_command('syncdb', settings=tsune.settings.ci,interactive=False, verbosity=0)
    #call_command('flush', interactive=False)
    call_command('migrate',settings=tsune.settings.ci, interactive=False, verbosity=0)
    call_command('loaddata', 'LettuceFixtures.json', verbosity=0)
Example #13
0
def setup():
    global test_runner
    global old_config
    from django.test.simple import DjangoTestSuiteRunner
    from ella.utils.installedapps import call_modules
    test_runner = DjangoTestSuiteRunner()
    test_runner.setup_test_environment()
    old_config = test_runner.setup_databases()
    call_modules(('register', ))
def setup_database(actual_server):
	#This will setup your database, sync it, and run migrations if you are using South.
	#It does this before the Test Django server is set up.
	logger.info("Setting up a test database...")
 
	# Uncomment if you are using South
	# patch_for_test_db_setup()
 
	world.test_runner = DjangoTestSuiteRunner(interactive=False)
	DjangoTestSuiteRunner.setup_test_environment(world.test_runner)
	world.created_db = DjangoTestSuiteRunner.setup_databases(world.test_runner)
 
	call_command('syncdb', interactive=False, verbosity=0)
Example #15
0
def setup():
    global test_runner
    global old_config

    try:
        from django.test.simple import DjangoTestSuiteRunner as TestSuiteRunner
    except ImportError:
        # DjangoTestSuiteRunner was deprecated in django 1.8:
        # https://docs.djangoproject.com/en/1.8/internals/deprecation/#deprecation-removed-in-1-8
        from django.test.runner import DiscoverRunner as TestSuiteRunner

    test_runner = TestSuiteRunner()
    test_runner.setup_test_environment()
    old_config = test_runner.setup_databases()
Example #16
0
def setup():
    global test_runner
    global old_config

    try:
        from django.test.simple import DjangoTestSuiteRunner as TestSuiteRunner
    except ImportError:
        # DjangoTestSuiteRunner was deprecated in django 1.8:
        # https://docs.djangoproject.com/en/1.8/internals/deprecation/#deprecation-removed-in-1-8
        from django.test.runner import DiscoverRunner as TestSuiteRunner

    test_runner = TestSuiteRunner()
    test_runner.setup_test_environment()
    old_config = test_runner.setup_databases()
Example #17
0
def setup_database(actual_server):
	'''
	This will setup your database, sync it, and run migrations if you are using South.
	It does this before the Test Django server is set up.
	'''
	logger.info("Setting up a test database...")
 
	# Uncomment if you are using South
	# patch_for_test_db_setup()
 
	world.test_runner = DjangoTestSuiteRunner(interactive=False)
	DjangoTestSuiteRunner.setup_test_environment(world.test_runner)
	world.created_db = DjangoTestSuiteRunner.setup_databases(world.test_runner)
 
	call_command('syncdb', interactive=False, verbosity=0)
Example #18
0
def setup():
    try:
        global test_runner
        global old_config

        from django.test.simple import DjangoTestSuiteRunner
        test_runner = DjangoTestSuiteRunner()
        test_runner.setup_test_environment()
        old_config = test_runner.setup_databases()

        from django.utils.translation import activate
        activate('cs')

    except Exception as e:
        import traceback, pprint
        pprint.pprint(traceback.print_exc())
Example #19
0
def setup():
    """
    Setup the environment for Django (create databases, turn on DEBUG, etc).

    :returns: teardown function
    """
    # Use Django's test suite runner, as it sets up test databases nicely
    runner = DjangoTestSuiteRunner()
    runner.setup_test_environment()
    old_config = runner.setup_databases()

    def teardown():
        runner.teardown_databases(old_config)
        runner.teardown_test_environment()

    return teardown
Example #20
0
class SetupTestSuite(unittest.TestSuite):
    """
    Test Suite configuring Django settings and using
    DjangoTestSuiteRunner as test runner.
    Also runs PEP8 and Coverage checks.
    """
    def __init__(self, *args, **kwargs):
        self.configure()
        self.cov = coverage()
        self.cov.start()
        self.packages = self.resolve_packages()

        super(SetupTestSuite, self).__init__(tests=self.build_tests(), \
                *args, **kwargs)

        # Setup testrunner.
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner(
            verbosity=1,
            interactive=True,
            failfast=False
        )
        self.test_runner.setup_test_environment()
        self.old_config = self.test_runner.setup_databases()

    def build_tests(self):
        """
        Build tests for inclusion in suite from resolved packages.
        """
        from django.core.exceptions import ImproperlyConfigured
        from django.db.models import get_app
        from django.test.simple import build_suite

        tests = []
        for package in self.packages:
            try:
                app_name = package.rsplit('.')[-1]
                app = get_app(app_name, emptyOK=True)
                tests.append(build_suite(app))
            except ImproperlyConfigured, e:
                raise
                log.info("Warning: %s" % e)
            except ImportError, e:
                raise
                log.info("Warning: %s" % e)
class SetupTestSuite(unittest.TestSuite):
    """
    Test Suite configuring Django settings and using
    DjangoTestSuiteRunner as test runner.
    Also runs PEP8 and Coverage checks.
    """
    def __init__(self, *args, **kwargs):
        self.configure()
        self.cov = coverage()
        self.cov.start()
        self.packages = self.resolve_packages()

        super(SetupTestSuite, self).__init__(tests=self.build_tests(), \
                *args, **kwargs)

        # Setup testrunner.
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner(
            verbosity=1,
            interactive=True,
            failfast=False
        )
        self.test_runner.setup_test_environment()
        self.old_config = self.test_runner.setup_databases()

    def build_tests(self):
        """
        Build tests for inclusion in suite from resolved packages.
        """
        from django.core.exceptions import ImproperlyConfigured
        from django.db.models import get_app
        from django.test.simple import build_suite

        tests = []
        for package in self.packages:
            try:
                app_name = package.rsplit('.')[-1]
                app = get_app(app_name, emptyOK=True)
                tests.append(build_suite(app))
            except ImproperlyConfigured, e:
                raise
                log.info("Warning: %s" % e)
            except ImportError, e:
                raise
                log.info("Warning: %s" % e)
Example #22
0
    def handle(self, *args, **options):
        USE_SOUTH = getattr(settings, "SOUTH_TESTS_MIGRATE", True)
        try:
            if USE_SOUTH:
                from south.management.commands import patch_for_test_db_setup
                patch_for_test_db_setup()
        except:
            USE_SOUTH = False

        self._test_runner = DjangoTestSuiteRunner(interactive=False)
        DjangoTestSuiteRunner.setup_test_environment(self._test_runner)
        self._created_db = DjangoTestSuiteRunner.setup_databases(
            self._test_runner)
        call_command(
            'syncdb',
            verbosity=0,
            interactive=False,
        )

        if USE_SOUTH:
            call_command(
                'migrate',
                verbosity=0,
                interactive=False,
            )

        settings.DEBUG = options.get('debug', False)

        verbosity = int(options.get('verbosity', 4))
        apps_to_run = tuple(options.get('apps', '').split(","))
        apps_to_avoid = tuple(options.get('avoid_apps', '').split(","))
        run_server = not options.get('no_server', False)
        tags = options.get('tags', None)
        server = Server(port=options['port'])

        paths = self.get_paths(args, apps_to_run, apps_to_avoid)
        if run_server:
            try:
                server.start()
            except LettuceServerException, e:
                raise SystemExit(e)
Example #23
0
def _django_runner(request):
    if not is_configured():
        return

    from django.test.simple import DjangoTestSuiteRunner

    import django
    if hasattr(django, 'setup'):
        django.setup()

    runner = DjangoTestSuiteRunner(interactive=False)
    runner.setup_test_environment()
    request.addfinalizer(runner.teardown_test_environment)

    config = runner.setup_databases()

    def teardown_database():
        runner.teardown_databases(config)
    request.addfinalizer(teardown_database)

    return runner
Example #24
0
 def setup_test_environment(self, **kwargs):
     DjangoTestSuiteRunner.setup_test_environment(self, **kwargs)
     settings.ECILOP_PORT = paths.ECILOP_PORT = 9865
     paths.ROOT = paths.RootPath(TEST_ROOT_PATH)
     paths.create_paths(True)
     tablespace_paths = []
     for i in range(TEST_DRAFT_COUNT):
         draft_path = paths.ROOT.drafts[str(i)]
         os.makedirs(draft_path.tablespace)
         tablespace_paths.append(draft_path.tablespace)
         os.mkdir(draft_path.apps)
         os.mkdir(draft_path.libs)
         os.mkdir(draft_path.grantors)
         write_file(draft_path.config, '{}')
         write_file(draft_path.rsa_pub, 'public key')
     Popen(['sudo', 'chown', 'postgres'] + tablespace_paths).wait()
     os.symlink('0', paths.ROOT.drafts.curr)
     self._ecilop_process = Popen(
         [paths.ECILOP_EXE_PATH, '--config', paths.ROOT.etc.ecilop_conf],
         stdout=PIPE)
     self._ecilop_process.stdout.readline()
     self._ecilop_process.stdout.readline()
Example #25
0
 def setup_test_environment(self, **kwargs):
     DjangoTestSuiteRunner.setup_test_environment(self, **kwargs)
     settings.ECILOP_PORT = paths.ECILOP_PORT = 9865
     paths.ROOT = paths.RootPath(TEST_ROOT_PATH)
     paths.create_paths(True)
     tablespace_paths = []
     for i in range(TEST_DRAFT_COUNT):
         draft_path = paths.ROOT.drafts[str(i)]
         os.makedirs(draft_path.tablespace)
         tablespace_paths.append(draft_path.tablespace)
         os.mkdir(draft_path.apps)
         os.mkdir(draft_path.libs)
         os.mkdir(draft_path.grantors)
         write_file(draft_path.config, '{}')
         write_file(draft_path.rsa_pub, 'public key')
     Popen(['sudo', 'chown', 'postgres'] + tablespace_paths).wait()
     os.symlink('0', paths.ROOT.drafts.curr)
     self._ecilop_process = Popen(
         [paths.ECILOP_EXE_PATH, '--config', paths.ROOT.etc.ecilop_conf],
         stdout=PIPE)
     self._ecilop_process.stdout.readline()
     self._ecilop_process.stdout.readline()
Example #26
0
def _django_runner(request):
    """Create the django runner, internal to pytest-django

    This does important things like setting up the local memory email
    backend etc.

    XXX It is a little dodgy that this is an autouse fixture.  Perhaps
        an email fixture should be requested in order to be able to
        use the Django email machinery just like you need to request a
        db fixture for access to the Django database, etc.  But
        without duplicating a lot more of Django's test support code
        we need to follow this model.
    """
    if request.config.option.ds:

        import django.conf
        from django.test.simple import DjangoTestSuiteRunner

        runner = DjangoTestSuiteRunner(interactive=False)
        runner.setup_test_environment()
        django.conf.settings.DEBUG_PROPAGATE_EXCEPTIONS = True
        request.addfinalizer(runner.teardown_test_environment)
        return runner
Example #27
0
    def setup_django(self):
        from django.conf import settings

        # If Django < 1.2
        if self.legacy_django:
            from django.db import connection
            from django.test.utils import setup_test_environment

            # Setup Django test environment
            setup_test_environment()

            # Create Django test database
            self.old_database_name = settings.DATABASE_NAME
            connection.creation.create_test_db(self.verbosity,
                                               autoclobber=True)
        # If Django >= 1.2
        else:
            from django.test.simple import DjangoTestSuiteRunner

            # Initialize Django tests runner
            runner = DjangoTestSuiteRunner(interactive=self.interactive,
                                           verbosity=self.verbosity)

            # New Django tests runner set ``DEBUG`` to False on setup test
            # environment, so we need to store real ``DEBUG`` value
            DEBUG = settings.DEBUG

            # Setup test environment
            runner.setup_test_environment()

            # And restore it to real value if needed
            if settings.DEBUG != DEBUG:
                settings.DEBUG = DEBUG

            # Setup test databases
            self.old_config = runner.setup_databases()
            self.runner = runner
Example #28
0
    def setup_test_environment(self, **kwargs):
        DjangoTestSuiteRunner.setup_test_environment(self, **kwargs)

        # Directories for media and processing files that are
        # used or generated during unit tests.
        settings.MEDIA_ROOT = settings.TEST_MEDIA_ROOT
        settings.PROCESSING_ROOT = settings.TEST_PROCESSING_ROOT

        # The Celery daemon uses the regular Django database, while
        # the testing framework uses a separate database.  Therefore,
        # we can't get task results from the daemon during a test.
        #
        # The best solution for now is to not use the daemon, and
        # simply block and wait for the result as the task runs.
        # More info: http://docs.celeryproject.org/en/latest/django/unit-testing.html
        settings.CELERY_ALWAYS_EAGER = True

        # To test functionality of sending emails to the admins,
        # settings.ADMINS must be set. It might not be set for
        # development machines.
        settings.ADMINS = (
            ('Admin One', '*****@*****.**'),
            ('Admin Two', '*****@*****.**'),
        )
Example #29
0
class SetupTesting(TestSuite):

    """
    Test Suite configuring Django settings and using
    DjangoTestSuiteRunner as test runner.
    Also runs PEP8 and Coverage checks.
    """

    def __init__(self, *args, **kwargs):
        self.configure()
        self.coverage = coverage()
        self.coverage.start()
        self.packages = get_packages(
            path=BASEDIR,
            exclude_packages=exclude_packages)
        self.options = {
            'failfast': '',
            'autoreload': '',
            'label': ['testing'],
        }

        super(SetupTesting, self).__init__(tests=self.build_tests(),
                                           *args, **kwargs)

        # Setup testrunner.
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner(
            verbosity=2,
            interactive=False,
            failfast=True
        )
        # South patches the test management command to handle the
        # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed.
        try:
            from south.management.commands import patch_for_test_db_setup
            patch_for_test_db_setup()
        except ImportError:
            pass
        self.test_runner.setup_test_environment()
        self.old_config = self.test_runner.setup_databases()


    def flake8_report(self):
        """
        Outputs flake8 report.
        """
        log.info("\n\nFlake8 Report:")
        base = get_path([BASEDIR, 'tribus'])
        pys = find_files(path=base, pattern='*.py')
        flake8_style = get_style_guide()
        report = flake8_style.check_files(pys)
        exit_code = print_report(report, flake8_style)


    def pep257_report(self):
        """
        Outputs flake8 report.
        """
        log.info("\n\nPEP257 Report:")
        base = get_path([BASEDIR, 'tribus'])
        pys = find_files(path=base, pattern='*.py')
        report = pep257.check_files(pys)

        if len(report) > 0:
            for r in report:
                log.info(r)
        else:
            log.info("\nNo errors found!")


    def coverage_report(self):
        """
        Outputs Coverage report to screen and coverage.xml.
        """

        include = ['%s*' % package for package in self.packages]
        omit = ['*testing*']

        log.info("\n\nCoverage Report:")
        try:
            self.coverage.stop()
            self.coverage.report(include=include, omit=omit)
        except CoverageException as e:
            log.info("Coverage Exception: %s" % e)

        if os.environ.get('TRAVIS'):
            log.info("Submitting coverage to coveralls.io...")
            try:
                result = Coveralls()
                result.wear()
            except CoverallsException as e:
                log.error("Coveralls Exception: %s" % e)


    def build_tests(self):
        """
        Build tests for inclusion in suite from resolved packages.
        TODO: Cleanup/simplify this method, flow too complex,
        too much duplication.
        """
        from django.db.models import get_app
        from django.test.simple import build_suite

        tests = []
        app = get_app(self.options['label'][0])
        tests.append(build_suite(app))

        return tests


    def configure(self):
        """
        Configures Django settings.
        """
        from django.conf import settings
        from django.utils.importlib import import_module

        try:
            test_settings = import_module('tribus.config.testing')
        except ImportError as e:
            log.info('ImportError: Unable to import test settings: %s' % e)
            sys.exit(1)

        setting_attrs = {}
        for attr in dir(test_settings):
            if '__' not in attr:
                setting_attrs[attr] = getattr(test_settings, attr)

        if not settings.configured:
            settings.configure(**setting_attrs)

    def run(self, result, *args, **kwargs):
        """
        Run the test, teardown the environment and generate reports.
        """
        result.failfast = self.options['failfast']
        result = super(SetupTesting, self).run(result, *args, **kwargs)
        self.test_runner.teardown_databases(self.old_config)
        self.test_runner.teardown_test_environment()
        #self.coverage_report()
        #self.flake8_report()
        #self.pep257_report()
        return result
Example #30
0
class SetupTestSuite(unittest.TestSuite):
    """
    Test Suite configuring Django settings and using
    DiscoverRunner or DjangoTestSuiteRunner as the test runner.
    Also runs PEP8 and Coverage checks.
    """
    def __init__(self, *args, **kwargs):
        self.cov = coverage()
        self.cov.start()
        self.configure()
        self.packages = self.resolve_packages()

        parser = argparse.ArgumentParser()
        parser.add_argument('-a', '--autoreload', dest='autoreload',
                action='store_const', const=True, default=False,)
        parser.add_argument('-f', '--failfast', dest='failfast',
                action='store_const', const=True, default=False,)
        parser.add_argument('-l', '--label', dest='label')
        self.options = vars(parser.parse_args(sys.argv[2:]))
        sys.argv = sys.argv[:2]

        runner_options = {
            'verbosity': 1,
            'interactive': True,
            'failfast': False,
        }

        if django.VERSION >= (1, 8):
            from django.test.runner import DiscoverRunner
            self.test_runner = DiscoverRunner(**runner_options)
            tests = self.test_runner.build_suite()
        else:
            from django.test.simple import DjangoTestSuiteRunner
            self.test_runner = DjangoTestSuiteRunner(**runner_options)
            tests = self.build_tests()

        super(SetupTestSuite, self).__init__(tests=tests, *args, **kwargs)

        # South patches the test management command to handle the
        # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed.
        if django.VERSION < (1,7):
            try:
                from south.management.commands import patch_for_test_db_setup
                patch_for_test_db_setup()
            except ImportError:
                pass
        self.test_runner.setup_test_environment()
        self.old_config = self.test_runner.setup_databases()

    def handle_label_exception(self, exception):
        """
        Check whether or not the exception was caused due to a bad label
        being provided. If so raise LabelException which will cause an exit,
        otherwise continue.

        The check looks for particular error messages, which obviously sucks.
        TODO: Implement a cleaner test.
        """
        markers = [
            'no such test method',
            'should be of the form app.TestCase or app.TestCase.test_method',
            'App with label',
            'does not refer to a test',
        ]
        if any(marker in exception.message for marker in markers):
            log.info(exception)
            raise LabelException(exception)
        else:
            raise exception

    def build_tests(self):
        """
        Build tests for inclusion in suite from resolved packages for <= 1.8
        TODO: Cleanup/simplify this method, flow too complex,
        too much duplication.
        """
        from django.core.exceptions import ImproperlyConfigured
        from django.test.simple import build_suite, build_test
        try:
            from django.apps import apps
            get_app = apps.get_app_config
        except ImportError:
            from django.db.models import get_app
        tests = []
        packages = [self.options['label'], ] if \
                self.options['label'] else self.packages
        for package in packages:
            try:
                if not self.options['autoreload']:
                    if self.options['label']:
                        try:
                            tests.append(build_test(package))
                        except (ImproperlyConfigured, ValueError) as e:
                            self.handle_label_exception(e)
                    else:
                        app = get_app(package)
                        tests.append(build_suite(app))
                else:
                    # Wait for exceptions to be resolved.
                    exception = None
                    while True:
                        try:
                            if self.options['label']:
                                try:
                                    tests.append(build_test(package))
                                except (ImproperlyConfigured, ValueError) as e:
                                    self.handle_label_exception(e)
                            else:
                                app = get_app(package)
                                tests.append(build_suite(app))
                            break
                        except LabelException:
                            raise
                        except Exception as e:
                            if exception != str(e):
                                traceback.print_exc()
                            exception = str(e)
                            time.sleep(1)
            except ImproperlyConfigured as e:
                log.info("Warning: %s" % traceback.format_exc())
            except ImportError as e:
                log.info("Warning: %s" % traceback.format_exc())

        return tests

    def configure(self):
        """
        Configures Django settings.
        """

        import django
        from django.conf import settings
        try:
            from django.utils.importlib import import_module
        except ImportError:
            from importlib import import_module
        try:
            test_settings = import_module('test_settings')
        except ImportError as e:
            log.info('ImportError: Unable to import test settings: %s' % e)
            sys.exit(1)

        setting_attrs = {}
        for attr in dir(test_settings):
            if '__' not in attr:
                setting_attrs[attr] = getattr(test_settings, attr)

        if not settings.configured:
            settings.configure(**setting_attrs)

        if hasattr(django, 'setup'):
            django.setup()

    def coverage_report(self):
        """
        Outputs Coverage report to screen and coverage.xml.
        """
        verbose = '--quiet' not in sys.argv
        self.cov.stop()
        if verbose:
            log.info("\nCoverage Report:")
            try:
                include = ['%s*' % package for package in self.packages]
                omit = ['*tests*']
                self.cov.report(include=include, omit=omit)
                self.cov.save()
                self.cov.xml_report(include=include, omit=omit)
            except misc.CoverageException as e:
                log.info("Coverage Exception: %s" % e)

    def resolve_packages(self):
        """
        Frame hack to determine packages contained in module for testing.
        We ignore submodules (those containing '.')
        """
        f = sys._getframe()
        while f:
            if 'self' in f.f_locals:
                locals_self = f.f_locals['self']
                py_modules = getattr(locals_self, 'py_modules', None)
                packages = getattr(locals_self, 'packages', None)

                top_packages = []
                if py_modules or packages:
                    if py_modules:
                        for module in py_modules:
                            if '.' not in module:
                                top_packages.append(module)
                    if packages:
                        for package in packages:
                            if '.' not in package:
                                top_packages.append(package)

                    return list(set(top_packages))
            f = f.f_back

    def pep8_report(self):
        """
        Outputs PEP8 report to screen and pep8.txt.
        """
        verbose = '--quiet' not in sys.argv
        if verbose:
            # Hook into stdout.
            old_stdout = sys.stdout
            sys.stdout = mystdout = StringIO()

            # Run Pep8 checks, excluding South migrations.
            pep8_style = pep8.StyleGuide()
            pep8_style.options.exclude.append('migrations')
            pep8_style.options.exclude.append('south_migrations')
            pep8_style.check_files(self.packages)

            # Restore stdout.
            sys.stdout = old_stdout

            # Save result to pep8.txt.
            result = mystdout.getvalue()
            output = open('pep8.txt', 'w')
            output.write(result)
            output.close()

            # Return Pep8 result
            if result:
                log.info("\nPEP8 Report:")
                log.info(result)

    def run(self, result, *args, **kwargs):
        """
        Run the test, teardown the environment and generate reports.
        """
        result.failfast = self.options['failfast']
        result = super(SetupTestSuite, self).run(result, *args, **kwargs)
        self.test_runner.teardown_databases(self.old_config)
        self.test_runner.teardown_test_environment()
        self.coverage_report()
        self.pep8_report()
        return result
Example #31
0
class Command(BaseCommand):
    help = u'Run lettuce tests all along installed apps'
    args = '[PATH to feature file or folder]'
    requires_model_validation = False

    option_list = BaseCommand.option_list[1:] + (
        make_option('-v', '--verbosity', action='store', dest='verbosity', default='4',
            type='choice', choices=map(str, range(5)),
            help='Verbosity level; 0=no output, 1=only dots, 2=only scenario names, 3=colorless output, 4=normal output (colorful)'),

        make_option('-a', '--apps', action='store', dest='apps', default='',
            help='Run ONLY the django apps that are listed here. Comma separated'),

        make_option('-A', '--avoid-apps', action='store', dest='avoid_apps', default='',
            help='AVOID running the django apps that are listed here. Comma separated'),

        make_option('-S', '--no-server', action='store_true', dest='no_server', default=False,
            help="will not run django's builtin HTTP server"),

        make_option('-T', '--test-server', action='store_true', dest='test_database', default=False,
            help="will run django's builtin HTTP server using the test databases"),

        make_option('-P', '--port', type='int', dest='port',
            help="the port in which the HTTP server will run at"),

        make_option('-d', '--debug-mode', action='store_true', dest='debug', default=False,
            help="when put together with builtin HTTP server, forces django to run with settings.DEBUG=True"),

        make_option('-s', '--scenarios', action='store', dest='scenarios', default=None,
            help='Comma separated list of scenarios to run'),

        make_option("-t", "--tag",
                    dest="tags",
                    type="str",
                    action='append',
                    default=None,
                    help='Tells lettuce to run the specified tags only; '
                    'can be used multiple times to define more tags'
                    '(prefixing tags with "-" will exclude them and '
                    'prefixing with "~" will match approximate words)'),

        make_option('--with-xunit', action='store_true', dest='enable_xunit', default=False,
            help='Output JUnit XML test results to a file'),

        make_option('--xunit-file', action='store', dest='xunit_file', default=None,
            help='Write JUnit XML to this file. Defaults to lettucetests.xml'),

        make_option("--failfast", dest="failfast", default=False,
                    action="store_true", help='Stop running in the first failure'),

        make_option("--pdb", dest="auto_pdb", default=False,
                    action="store_true", help='Launches an interactive debugger upon error'),
    )

    def stopserver(self, failed=False):
        raise SystemExit(int(failed))

    def get_paths(self, args, apps_to_run, apps_to_avoid):
        if args:
            for path, exists in zip(args, map(os.path.exists, args)):
                if not exists:
                    sys.stderr.write("You passed the path '%s', but it does not exist.\n" % path)
                    sys.exit(1)
            else:
                paths = args
        else:
            paths = harvest_lettuces(apps_to_run, apps_to_avoid)  # list of tuples with (path, app_module)

        return paths

    def handle(self, *args, **options):
        setup_test_environment()

        verbosity = int(options.get('verbosity', 4))
        apps_to_run = tuple(options.get('apps', '').split(","))
        apps_to_avoid = tuple(options.get('avoid_apps', '').split(","))
        run_server = not options.get('no_server', False)
        test_database = options.get('test_database', False)
        tags = options.get('tags', None)
        failfast = options.get('failfast', False)
        auto_pdb = options.get('auto_pdb', False)

        if test_database:
            migrate_south = getattr(settings, "SOUTH_TESTS_MIGRATE", True)
            try:
                from south.management.commands import patch_for_test_db_setup
                patch_for_test_db_setup()
            except:
                migrate_south = False
                pass

            from django.test.simple import DjangoTestSuiteRunner
            self._testrunner = DjangoTestSuiteRunner()
            self._testrunner.setup_test_environment()
            self._old_db_config = self._testrunner.setup_databases()

            call_command('syncdb', verbosity=0, interactive=False,)
            if migrate_south:
               call_command('migrate', verbosity=0, interactive=False,)

        settings.DEBUG = options.get('debug', False)

        server = Server(port=options['port'])

        paths = self.get_paths(args, apps_to_run, apps_to_avoid)
        if run_server:
            try:
                server.start()
            except LettuceServerException, e:
                raise SystemExit(e)

        os.environ['SERVER_NAME'] = str(server.address)
        os.environ['SERVER_PORT'] = str(server.port)

        failed = False

        registry.call_hook('before', 'harvest', locals())
        results = []
        try:
            for path in paths:
                app_module = None
                if isinstance(path, tuple) and len(path) is 2:
                    path, app_module = path

                if app_module is not None:
                    registry.call_hook('before_each', 'app', app_module)

                runner = Runner(path, options.get('scenarios'), verbosity,
                                enable_xunit=options.get('enable_xunit'),
                                xunit_filename=options.get('xunit_file'),
                                tags=tags, failfast=failfast, auto_pdb=auto_pdb)

                result = runner.run()
                if app_module is not None:
                    registry.call_hook('after_each', 'app', app_module, result)

                results.append(result)
                if not result or result.steps != result.steps_passed:
                    failed = True
        except SystemExit, e:
            failed = e.code
Example #32
0
class DjangoManager(object):
    """
    A Django plugin for py.test that handles creating and destroying the
    test environment and test database.
    
    Similar to Django's TransactionTestCase, a transaction is started and
    rolled back for each test. Additionally, the settings are copied before
    each test and restored at the end of the test, so it is safe to modify
    settings within tests.
    """
    
    def __init__(self, verbosity=0, noinput=False):
        self.verbosity = verbosity
        self.noinput = noinput
        
        self._old_database_name = None
        self._old_settings = []
        self._old_urlconf = None
        
        self.suite_runner = None
        self.old_db_config = None
        self.testcase = None
        
    def pytest_sessionstart(self, session):
        #capture = py.io.StdCapture()
        # make sure the normal django syncdb command is run (do not run migrations for tests)
        # this is faster and less error prone
        management.get_commands()  # load commands dict
        management._commands['syncdb'] = 'django.core'  # make sure `south` migrations are disabled
        self.suite_runner = DjangoTestSuiteRunner()
        self.suite_runner.setup_test_environment()
        self.old_db_config = self.suite_runner.setup_databases()
        settings.DATABASE_SUPPORTS_TRANSACTIONS = True
        #unused_out, err = capture.reset()
        #srsys.stderr.write(err)
 
    def pytest_sessionfinish(self, session, exitstatus):
        capture = py.io.StdCapture()
        self.suite_runner.teardown_test_environment()
        self.suite_runner.teardown_databases(self.old_db_config)
        unused_out, err = capture.reset()
        sys.stderr.write(err)
    
    def pytest_itemstart(self, item):
        # This lets us control the order of the setup/teardown
        # Yuck.
        if _is_unittest(self._get_item_obj(item)):
            item.setup = lambda: None
            item.teardown = lambda: None
    
    def pytest_runtest_setup(self, item):
        # Set the URLs if the py.test.urls() decorator has been applied
        if hasattr(item.obj, 'urls'):
            self._old_urlconf = settings.ROOT_URLCONF
            settings.ROOT_URLCONF = item.obj.urls
            clear_url_caches()
            
        item_obj = self._get_item_obj(item)
        testcase = _get_testcase(item_obj)
        # We have to run these here since py.test's unittest plugin skips
        # __call__()
        testcase.client = Client()
        testcase._pre_setup()
        testcase.setUp()
        
    def pytest_runtest_teardown(self, item):
        item_obj = self._get_item_obj(item)
        
        testcase = _get_testcase(item_obj)
        testcase.tearDown()
        if not isinstance(item_obj, TestCase):
            testcase._post_teardown()
            
        if hasattr(item, 'urls') and self._old_urlconf is not None:
            settings.ROOT_URLCONF = self._old_urlconf
            self._old_urlconf = None
        
    def _get_item_obj(self, item):
        try:
            return item.obj.im_self
        except AttributeError:
            return None
       
    def pytest_namespace(self):
        """
        Sets up the py.test.params decorator.
        """
        def params(funcarglist):
            """
            A decorator to make parametrised tests easy. Takes a list of 
            dictionaries of keyword arguments for the function. A test is 
            created for each dictionary.

            Example:

                @py.test.params([dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)])  
                def test_equals(a, b):
                    assert a == b
            """
            def wrapper(function):  
                function.funcarglist = funcarglist  
                return function  
            return wrapper

        def load_fixture(fixture):
            """
            Loads a fixture, useful for loading fixtures in funcargs.

            Example:

                def pytest_funcarg__articles(request):
                    py.test.load_fixture('test_articles')
                    return Article.objects.all()
            """
            call_command('loaddata', fixture, **{
                'verbosity': self.verbosity + 1,
                'commit': not settings.DATABASE_SUPPORTS_TRANSACTIONS
            })
        
        def urls(urlconf):
            """
            A decorator to change the URLconf for a particular test, similar 
            to the `urls` attribute on Django's `TestCase`.
            
            Example:
            
                @py.test.urls('myapp.test_urls')
                def test_something(client):
                    assert 'Success!' in client.get('/some_path/')
            """
            def wrapper(function):
                function.urls = urlconf
            return wrapper
        
        return {'params': params, 'load_fixture': load_fixture, 'urls': urls}

    def pytest_generate_tests(self, metafunc):
        """
        Generates parametrised tests if the py.test.params decorator has been 
        used.
        """
        for funcargs in getattr(metafunc.function, 'funcarglist', ()):  
            metafunc.addcall(funcargs=funcargs)
Example #33
0
class Command(BaseCommand):
    help = u"Run lettuce tests all along installed apps"
    args = "[PATH to feature file or folder]"
    requires_model_validation = False

    option_list = BaseCommand.option_list[1:] + (
        make_option(
            "-v",
            "--verbosity",
            action="store",
            dest="verbosity",
            default="4",
            type="choice",
            choices=map(str, range(5)),
            help="Verbosity level; 0=no output, 1=only dots, 2=only scenario names, 3=colorless output, 4=normal output (colorful)",
        ),
        make_option(
            "-a",
            "--apps",
            action="store",
            dest="apps",
            default="",
            help="Run ONLY the django apps that are listed here. Comma separated",
        ),
        make_option(
            "-A",
            "--avoid-apps",
            action="store",
            dest="avoid_apps",
            default="",
            help="AVOID running the django apps that are listed here. Comma separated",
        ),
        make_option(
            "-S",
            "--no-server",
            action="store_true",
            dest="no_server",
            default=False,
            help="will not run django's builtin HTTP server",
        ),
        make_option(
            "-T",
            "--test-server",
            action="store_true",
            dest="test_database",
            default=False,
            help="will run django's builtin HTTP server using the test databases",
        ),
        make_option("-P", "--port", type="int", dest="port", help="the port in which the HTTP server will run at"),
        make_option(
            "-d",
            "--debug-mode",
            action="store_true",
            dest="debug",
            default=False,
            help="when put together with builtin HTTP server, forces django to run with settings.DEBUG=True",
        ),
        make_option(
            "-s",
            "--scenarios",
            action="store",
            dest="scenarios",
            default=None,
            help="Comma separated list of scenarios to run",
        ),
        make_option(
            "-t",
            "--tag",
            dest="tags",
            type="str",
            action="append",
            default=None,
            help="Tells lettuce to run the specified tags only; "
            "can be used multiple times to define more tags"
            '(prefixing tags with "-" will exclude them and '
            'prefixing with "~" will match approximate words)',
        ),
        make_option(
            "--with-xunit",
            action="store_true",
            dest="enable_xunit",
            default=False,
            help="Output JUnit XML test results to a file",
        ),
        make_option(
            "--xunit-file",
            action="store",
            dest="xunit_file",
            default=None,
            help="Write JUnit XML to this file. Defaults to lettucetests.xml",
        ),
        make_option(
            "--failfast", dest="failfast", default=False, action="store_true", help="Stop running in the first failure"
        ),
        make_option(
            "--pdb",
            dest="auto_pdb",
            default=False,
            action="store_true",
            help="Launches an interactive debugger upon error",
        ),
    )

    def stopserver(self, failed=False):
        raise SystemExit(int(failed))

    def get_paths(self, args, apps_to_run, apps_to_avoid):
        if args:
            for path, exists in zip(args, map(os.path.exists, args)):
                if not exists:
                    sys.stderr.write("You passed the path '%s', but it does not exist.\n" % path)
                    sys.exit(1)
            else:
                paths = args
        else:
            paths = harvest_lettuces(apps_to_run, apps_to_avoid)  # list of tuples with (path, app_module)

        return paths

    def handle(self, *args, **options):
        setup_test_environment()

        verbosity = int(options.get("verbosity", 4))
        apps_to_run = tuple(options.get("apps", "").split(","))
        apps_to_avoid = tuple(options.get("avoid_apps", "").split(","))
        run_server = not options.get("no_server", False)
        test_database = options.get("test_database", False)
        tags = options.get("tags", None)
        failfast = options.get("failfast", False)
        auto_pdb = options.get("auto_pdb", False)

        if test_database:
            migrate_south = getattr(settings, "SOUTH_TESTS_MIGRATE", True)
            try:
                from south.management.commands import patch_for_test_db_setup

                patch_for_test_db_setup()
            except:
                migrate_south = False
                pass

            from django.test.simple import DjangoTestSuiteRunner

            self._testrunner = DjangoTestSuiteRunner()
            self._testrunner.setup_test_environment()
            self._old_db_config = self._testrunner.setup_databases()

            call_command("syncdb", verbosity=0, interactive=False)
            if migrate_south:
                call_command("migrate", verbosity=0, interactive=False)

        settings.DEBUG = options.get("debug", False)

        server = Server(port=options["port"])

        paths = self.get_paths(args, apps_to_run, apps_to_avoid)
        if run_server:
            try:
                server.start()
            except LettuceServerException, e:
                raise SystemExit(e)

        os.environ["SERVER_NAME"] = server.address
        os.environ["SERVER_PORT"] = str(server.port)

        failed = False

        registry.call_hook("before", "harvest", locals())
        results = []
        try:
            for path in paths:
                app_module = None
                if isinstance(path, tuple) and len(path) is 2:
                    path, app_module = path

                if app_module is not None:
                    registry.call_hook("before_each", "app", app_module)

                runner = Runner(
                    path,
                    options.get("scenarios"),
                    verbosity,
                    enable_xunit=options.get("enable_xunit"),
                    xunit_filename=options.get("xunit_file"),
                    tags=tags,
                    failfast=failfast,
                    auto_pdb=auto_pdb,
                )

                result = runner.run()
                if app_module is not None:
                    registry.call_hook("after_each", "app", app_module, result)

                results.append(result)
                if not result or result.steps != result.steps_passed:
                    failed = True
        except SystemExit, e:
            failed = e.code
Example #34
0
class SetupTestSuite(unittest.TestSuite):
    """
    Test Suite configuring Django settings and using
    DjangoTestSuiteRunner as test runner.
    Also runs PEP8 and Coverage checks.
    """
    def __init__(self, *args, **kwargs):
        self.configure()
        self.cov = coverage()
        self.cov.start()
        self.packages = self.resolve_packages()

        parser = argparse.ArgumentParser()
        parser.add_argument('-a', '--autoreload', dest='autoreload',
                action='store_const', const=True, default=False,)
        parser.add_argument('-f', '--failfast', dest='failfast',
                action='store_const', const=True, default=False,)
        parser.add_argument('-l', '--label', dest='label')
        self.options = vars(parser.parse_args(sys.argv[2:]))
        sys.argv = sys.argv[:2]

        super(SetupTestSuite, self).__init__(tests=self.build_tests(),
                *args, **kwargs)

        # Setup testrunner.
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner(
            verbosity=1,
            interactive=True,
            failfast=False
        )
        # South patches the test management command to handle the
        # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed.
        try:
            from south.management.commands import patch_for_test_db_setup
            patch_for_test_db_setup()
        except ImportError:
            pass
        self.test_runner.setup_test_environment()
        self.old_config = self.test_runner.setup_databases()

    def handle_label_exception(self, exception):
        """
        Check whether or not the exception was caused due to a bad label
        being provided. If so raise LabelException which will cause an exit,
        otherwise continue.

        The check looks for particular error messages, which obviously sucks.
        TODO: Implement a cleaner test.
        """
        markers = [
            'no such test method',
            'should be of the form app.TestCase or app.TestCase.test_method',
            'App with label',
            'does not refer to a test',
        ]
        if any(marker in exception.message for marker in markers):
            log.info(exception)
            raise LabelException(exception)
        else:
            raise exception

    def build_tests(self):
        """
        Build tests for inclusion in suite from resolved packages.
        TODO: Cleanup/simplify this method, flow too complex,
        too much duplication.
        """
        from django.core.exceptions import ImproperlyConfigured
        from django.db.models import get_app
        from django.test.simple import build_suite, build_test

        tests = []
        packages = [self.options['label'], ] if \
                self.options['label'] else self.packages
        for package in packages:
            try:
                if not self.options['autoreload']:
                    if self.options['label']:
                        try:
                            tests.append(build_test(package))
                        except (ImproperlyConfigured, ValueError) as e:
                            self.handle_label_exception(e)
                    else:
                        app = get_app(package)
                        tests.append(build_suite(app))
                else:
                    # Wait for exceptions to be resolved.
                    exception = None
                    while True:
                        try:
                            if self.options['label']:
                                try:
                                    tests.append(build_test(package))
                                except (ImproperlyConfigured, ValueError) as e:
                                    self.handle_label_exception(e)
                            else:
                                app = get_app(package)
                                tests.append(build_suite(app))
                            break
                        except LabelException:
                            raise
                        except Exception as e:
                            if exception != str(e):
                                traceback.print_exc()
                            exception = str(e)
                            time.sleep(1)
            except ImproperlyConfigured as e:
                log.info("Warning: %s" % e)
            except ImportError as e:
                log.info("Warning: %s" % e)

        return tests

    def configure(self):
        """
        Configures Django settings.
        """
        from django.conf import settings
        from django.utils.importlib import import_module
        try:
            test_settings = import_module('test_settings')
        except ImportError as e:
            log.info('ImportError: Unable to import test settings: %s' % e)
            sys.exit(1)

        setting_attrs = {}
        for attr in dir(test_settings):
            if '__' not in attr:
                setting_attrs[attr] = getattr(test_settings, attr)

        if not settings.configured:
            settings.configure(**setting_attrs)

    def coverage_report(self):
        """
        Outputs Coverage report to screen and coverage.xml.
        """
        verbose = '--quiet' not in sys.argv
        self.cov.stop()
        if verbose:
            log.info("\nCoverage Report:")
            try:
                include = ['%s*' % package for package in self.packages]
                omit = ['*tests*']
                self.cov.report(include=include, omit=omit)
                self.cov.xml_report(include=include, omit=omit)
            except misc.CoverageException as e:
                log.info("Coverage Exception: %s" % e)

    def resolve_packages(self):
        """
        Frame hack to determine packages contained in module for testing.
        We ignore submodules (those containing '.')
        """
        f = sys._getframe()
        while f:
            if 'self' in f.f_locals:
                locals_self = f.f_locals['self']
                py_modules = getattr(locals_self, 'py_modules', None)
                packages = getattr(locals_self, 'packages', None)

                top_packages = []
                if py_modules or packages:
                    if py_modules:
                        for module in py_modules:
                            if '.' not in module:
                                top_packages.append(module)
                    if packages:
                        for package in packages:
                            if '.' not in package:
                                top_packages.append(package)

                    return list(set(top_packages))
            f = f.f_back

    def pep8_report(self):
        """
        Outputs PEP8 report to screen and pep8.txt.
        """
        verbose = '--quiet' not in sys.argv
        if verbose:
            # Hook into stdout.
            old_stdout = sys.stdout
            sys.stdout = mystdout = StringIO()

            # Run Pep8 checks, excluding South migrations.
            pep8_style = pep8.StyleGuide()
            pep8_style.options.exclude.append('migrations')
            pep8_style.check_files(self.packages)

            # Restore stdout.
            sys.stdout = old_stdout

            # Save result to pep8.txt.
            result = mystdout.getvalue()
            output = open('pep8.txt', 'w')
            output.write(result)
            output.close()

            # Return Pep8 result
            if result:
                log.info("\nPEP8 Report:")
                log.info(result)

    def run(self, result, *args, **kwargs):
        """
        Run the test, teardown the environment and generate reports.
        """
        result.failfast = self.options['failfast']
        result = super(SetupTestSuite, self).run(result, *args, **kwargs)
        self.test_runner.teardown_databases(self.old_config)
        self.test_runner.teardown_test_environment()
        self.coverage_report()
        self.pep8_report()
        return result
Example #35
0
class Command(BaseCommand):
    help = u'Run lettuce tests all along installed apps'
    args = '[PATH to feature file or folder]'
    requires_model_validation = False

    option_list = BaseCommand.option_list[1:] + (
        make_option(
            '-v',
            '--verbosity',
            action='store',
            dest='verbosity',
            default='4',
            type='choice',
            choices=map(str, range(5)),
            help=
            'Verbosity level; 0=no output, 1=only dots, 2=only scenario names, 3=colorless output, 4=normal output (colorful)'
        ),
        make_option(
            '-a',
            '--apps',
            action='store',
            dest='apps',
            default='',
            help=
            'Run ONLY the django apps that are listed here. Comma separated'),
        make_option(
            '-A',
            '--avoid-apps',
            action='store',
            dest='avoid_apps',
            default='',
            help=
            'AVOID running the django apps that are listed here. Comma separated'
        ),
        make_option('-S',
                    '--no-server',
                    action='store_true',
                    dest='no_server',
                    default=False,
                    help="will not run django's builtin HTTP server"),
        make_option(
            '-T',
            '--test-server',
            action='store_true',
            dest='test_database',
            default=False,
            help=
            "will run django's builtin HTTP server using the test databases"),
        make_option('-P',
                    '--port',
                    type='int',
                    dest='port',
                    help="the port in which the HTTP server will run at"),
        make_option(
            '-d',
            '--debug-mode',
            action='store_true',
            dest='debug',
            default=False,
            help=
            "when put together with builtin HTTP server, forces django to run with settings.DEBUG=True"
        ),
        make_option('-s',
                    '--scenarios',
                    action='store',
                    dest='scenarios',
                    default=None,
                    help='Comma separated list of scenarios to run'),
        make_option("-t",
                    "--tag",
                    dest="tags",
                    type="str",
                    action='append',
                    default=None,
                    help='Tells lettuce to run the specified tags only; '
                    'can be used multiple times to define more tags'
                    '(prefixing tags with "-" will exclude them and '
                    'prefixing with "~" will match approximate words)'),
        make_option('--with-xunit',
                    action='store_true',
                    dest='enable_xunit',
                    default=False,
                    help='Output JUnit XML test results to a file'),
        make_option(
            '--xunit-file',
            action='store',
            dest='xunit_file',
            default=None,
            help='Write JUnit XML to this file. Defaults to lettucetests.xml'),
        make_option("--failfast",
                    dest="failfast",
                    default=False,
                    action="store_true",
                    help='Stop running in the first failure'),
        make_option("--pdb",
                    dest="auto_pdb",
                    default=False,
                    action="store_true",
                    help='Launches an interactive debugger upon error'),
    )

    def stopserver(self, failed=False):
        raise SystemExit(int(failed))

    def get_paths(self, args, apps_to_run, apps_to_avoid):
        if args:
            for path, exists in zip(args, map(os.path.exists, args)):
                if not exists:
                    sys.stderr.write(
                        "You passed the path '%s', but it does not exist.\n" %
                        path)
                    sys.exit(1)
            else:
                paths = args
        else:
            paths = harvest_lettuces(
                apps_to_run,
                apps_to_avoid)  # list of tuples with (path, app_module)

        return paths

    def handle(self, *args, **options):
        setup_test_environment()

        settings.DEBUG = options.get('debug', False)

        verbosity = int(options.get('verbosity', 4))
        apps_to_run = tuple(options.get('apps', '').split(","))
        apps_to_avoid = tuple(options.get('avoid_apps', '').split(","))
        run_server = not options.get('no_server', False)
        test_database = options.get('test_database', False)
        tags = options.get('tags', None)
        failfast = options.get('failfast', False)
        auto_pdb = options.get('auto_pdb', False)

        if test_database:
            migrate_south = getattr(settings, "SOUTH_TESTS_MIGRATE", True)
            try:
                from south.management.commands import patch_for_test_db_setup
                patch_for_test_db_setup()
            except:
                migrate_south = False
                pass

            from django.test.simple import DjangoTestSuiteRunner
            self._testrunner = DjangoTestSuiteRunner()
            self._testrunner.setup_test_environment()
            self._old_db_config = self._testrunner.setup_databases()

            call_command(
                'syncdb',
                verbosity=0,
                interactive=False,
            )
            if migrate_south:
                call_command(
                    'migrate',
                    verbosity=0,
                    interactive=False,
                )

        server = Server(port=options['port'])

        paths = self.get_paths(args, apps_to_run, apps_to_avoid)
        if run_server:
            try:
                server.start()
            except LettuceServerException, e:
                raise SystemExit(e)

        os.environ['SERVER_NAME'] = server.address
        os.environ['SERVER_PORT'] = str(server.port)

        failed = False

        registry.call_hook('before', 'harvest', locals())
        results = []
        try:
            for path in paths:
                app_module = None
                if isinstance(path, tuple) and len(path) is 2:
                    path, app_module = path

                if app_module is not None:
                    registry.call_hook('before_each', 'app', app_module)

                runner = Runner(path,
                                options.get('scenarios'),
                                verbosity,
                                enable_xunit=options.get('enable_xunit'),
                                xunit_filename=options.get('xunit_file'),
                                tags=tags,
                                failfast=failfast,
                                auto_pdb=auto_pdb)

                result = runner.run()
                if app_module is not None:
                    registry.call_hook('after_each', 'app', app_module, result)

                results.append(result)
                if not result or result.steps != result.steps_passed:
                    failed = True
        except SystemExit, e:
            failed = e.code
Example #36
0
class Benchmark(GenericBenchmark):
    def __init__(self, *args, **kwargs):
        global options
        options = LazyStruct(**kwargs)
        self.test_runner = DjangoTestSuiteRunner()
        self.prepare()

    def prepare_oss_list(self):
        return [oss for oss in
                    [OssGenerator(idx=idx, fs=self.fs_entity)
                        for idx in range(0, options.oss)]]

    def prepare_mds_list(self):
        return [MdsGenerator(fs=self.fs_entity)]

    def step_stats(self):
        """Generate stats for all servers in a single step"""
        update_servers = []
        for server in self.server_list():
            stats = {'node': {},
                    'lustre': {'target': {}}}
            for node_stat in server.stats.keys():
                stats['node'][node_stat] = server.stats[node_stat]

            # make this match up with what comes in from an update scan
            stats['lustre']['lnet'] = stats['node']['lnet']

            for target in server.target_list:
                stats['lustre']['target'][target.name] = {}
                for target_stat in target.stats.keys():
                    stats['lustre']['target'][target.name][target_stat] = target.stats[target_stat]
            update_servers.append([server.entity, stats])

        return update_servers

    def precreate_stats(self):
        self.stats_list = []

        steps = range(0, options.duration, options.frequency)
        for idx, v in enumerate(steps):
            sys.stderr.write("\rPrecreating stats... (%d/%d)" % (idx, len(steps)))
            self.stats_list.append(self.step_stats())

        sys.stderr.write("\rPrecreating stats... Done.        \n")

    def prepare(self):
        from south.management.commands import patch_for_test_db_setup

        self.test_runner.setup_test_environment()
        # This is necessary to ensure that we use django.core.syncdb()
        # instead of south's hacked syncdb()
        patch_for_test_db_setup()
        self.old_db_config = self.test_runner.setup_databases()

        mgs_host = ManagedHost.objects.create(
                address="mgs",
                fqdn="mgs",
                nodename="mgs")
        mgs_vol = Volume.objects.create(label="mgs")
        VolumeNode.objects.create(host = mgs_host,
                                  path = uuid.uuid4(),
                                  primary = True,
                                  use = True,
                                  volume = mgs_vol)
        self.mgs, mounts = ManagedMgs.create_for_volume(mgs_vol.pk, name="MGS")
        self.fs_entity = ManagedFilesystem.objects.create(name=options.fsname,
                                                          mgs=self.mgs)
        self.oss_list = self.prepare_oss_list()
        self.mds_list = self.prepare_mds_list()

        if not options.no_precreate:
            self.precreate_stats()

    def get_stats_size(self):
        stats_size = LazyStruct()
        from django.db import connection
        cursor = connection.cursor()
        if 'postgres' in connection.settings_dict['ENGINE']:
            stats_size.row_count = stats_size.data = stats_size.index = 0

            for model in Stats:
                cursor.execute("select count(id) as rows, pg_relation_size('{0}') as data_length, pg_total_relation_size('{0}') - pg_relation_size('{0}') as index_length from {0}".format(model._meta.db_table))
                rows, data, index = cursor.fetchone()
                stats_size.row_count += rows
                stats_size.data += data
                stats_size.index += index
        else:
            raise RuntimeError("Unsupported DB: %s" % connection.settings_dict['ENGINE'])
        return stats_size

    def server_list(self):
        return self.mds_list + self.oss_list

    def store_metrics(self, scan):
        return scan.store_metrics()

    def run(self):
        def t2s(t):
            return time.strftime("%H:%M:%S", time.localtime(t))

        def s2s(s):
            if s > 600:
                from datetime import timedelta, datetime
                d = timedelta(seconds=int(s)) + datetime(1, 1, 1)
                return "%.2d:%.2d:%.2d:%.2d" % (d.day - 1, d.hour, d.minute, d.second)
            else:
                return "%d" % s

        stats_size_start = self.get_stats_size()

        scan = UpdateScan()
        run_start = time.time()
        run_count = 0
        create_interval = 0
        create_count = 0
        start_la = os.getloadavg()
        last_width = 0
        print "window start: %s, window stop: %s" % (t2s(run_start),
                                       t2s(run_start + options.duration))
        update_times = range(int(run_start),
                             int(run_start + options.duration),
                             options.frequency)
        for stats_idx, update_time in enumerate(update_times):
            new_timing_line = "\r%s" % t2s(update_time)
            sys.stderr.write(new_timing_line)
            store_start = time.time()
            count = 0

            if options.no_precreate:
                step_stats_list = self.step_stats()
            else:
                step_stats_list = self.stats_list[stats_idx]

            server_stats_count = 0
            for step_stats in step_stats_list:
                scan.host = step_stats[0]
                scan.host_data = {'metrics': {'raw': step_stats[1]}}
                scan.update_time = update_time
                count += self.store_metrics(scan)
                # Since we've hard-coded the server stats, we need to record
                # the actual number to make the reporting accurate.
                if options.server_stats == 0:
                    for key in ['meminfo', 'lnet', 'cpustats']:
                        server_stats_count += len(step_stats[1]['node'][key])

            # Terrible hack to make reporting accurate.
            if options.server_stats == 0:
                options.server_stats = server_stats_count

            run_count += count
            store_end = time.time()
            interval = store_end - store_start
            rate = count / interval
            meter = "+" if interval < options.frequency else "-"
            seconds_left = (len(update_times) - stats_idx) * interval
            timing_stats = ": inserted %d stats (rate: %lf stats/sec, complete in: %s) %s" % (count, rate, s2s(seconds_left), meter)
            current_line_width = len(new_timing_line + timing_stats)
            if current_line_width < last_width:
                sys.stderr.write(new_timing_line + timing_stats + " " * (last_width - current_line_width))
            else:
                sys.stderr.write(timing_stats)
            last_width = current_line_width

            if not options.include_create and update_time == int(run_start):
                create_interval = interval
                create_count = count

        run_end = time.time()
        end_la = os.getloadavg()

        stats_size_end = self.get_stats_size()

        run_info = LazyStruct()
        run_info.step_count = options.duration / options.frequency
        run_info.run_count = run_count
        run_info.run_interval = run_end - run_start - create_interval
        run_info.run_rate = (run_count - create_count) / run_info.run_interval
        run_info.create_interval = create_interval
        run_info.create_count = create_count
        run_info.start_load_avg = start_la
        run_info.end_load_avg = end_la
        run_info.stats_data_used = stats_size_end.data - stats_size_start.data
        run_info.stats_index_used = stats_size_end.index - stats_size_start.index
        run_info.stats_rows_used = stats_size_end.row_count - stats_size_start.row_count

        self.print_report(run_info)

    def profile_system(self):
        def _read_lines(filename):
            fh = open(filename)
            try:
                return [line.rstrip("\n") for line in fh.readlines()]
            finally:
                fh.close()

        def _cpu_info():
            count = 0
            speed = 0
            for line in _read_lines("/proc/cpuinfo"):
                if 'processor' in line:
                    count += 1
                    continue

                if 'cpu MHz' in line:
                    speed = float(line.split()[3])
                    continue

            return {'count': count, 'speed': speed}

        def _mem_info():
            mem_info = {}
            for line in _read_lines("/proc/meminfo"):
                for query in ["MemTotal", "MemFree", "SwapTotal", "SwapFree"]:
                    if query in line:
                        mem_info[query] = float(line.split()[1])
                        break

            mem_info['pct_mem_used'] = ((mem_info['MemTotal'] - mem_info['MemFree']) / mem_info['MemTotal']) * 100
            try:
                mem_info['pct_swap_used'] = ((mem_info['SwapTotal'] - mem_info['SwapFree']) / mem_info['SwapTotal']) * 100
            except ZeroDivisionError:
                mem_info['pct_swap_used'] = 0.0
            return mem_info

        profile = LazyStruct()
        cpu_info = _cpu_info()
        profile.cpu_count = cpu_info['count']
        profile.cpu_speed = cpu_info['speed']
        mem_info = _mem_info()
        profile.mem_total = mem_info['MemTotal']
        profile.mem_pct_used = mem_info['pct_mem_used']
        profile.swap_total = mem_info['SwapTotal']
        profile.swap_pct_used = mem_info['pct_swap_used']

        return profile

    # TODO: Customizable output formats (csv, tsv, etc.)
    def print_report(self, run_info):
        print "\n"
        try:
            profile = self.profile_system()
            print "CPUs: %d @ %.2f GHz, Mem: %d MB real (%.2f%% used) / %d MB swap (%.2f%% used)" % (profile.cpu_count, (profile.cpu_speed / 1000), (profile.mem_total / 1000), profile.mem_pct_used, (profile.swap_total / 1000), profile.swap_pct_used)
        except IOError:
            print "No system profile available (on a mac?)"
        print "Load averages (1/5/15): start: %.2f/%.2f/%.2f, end: %.2f/%.2f/%.2f" % (run_info.start_load_avg + run_info.end_load_avg)
        print "counts: OSS: %d, OSTs/OSS: %d (%d total); stats-per: OSS: %d, MDS: %d" % (options.oss, options.ost, (options.oss * options.ost), ((options.ost * options.ost_stats) + options.server_stats), (options.mdt_stats + options.server_stats))
        print "run count (%d stats) / run time (%.2f sec) = run rate (%.2f stats/sec)" % (run_info.run_count, run_info.run_interval, run_info.run_rate)
        print "%d steps, %d stats/step, duration %d" % (run_info.step_count, run_info.run_count / run_info.step_count, options.duration)

        def _to_mb(in_bytes):
            return in_bytes * 1.0 / (1024 * 1024)

        stats_total_used = run_info.stats_data_used + run_info.stats_index_used
        print "stats rows: %d, space used: %.2f MB (%.2f MB data, %.2f MB index)" % (run_info.stats_rows_used, _to_mb(stats_total_used), _to_mb(run_info.stats_data_used), _to_mb(run_info.stats_index_used))

    def cleanup(self):
        self.test_runner.teardown_databases(self.old_db_config)
        self.test_runner.teardown_test_environment()
class SetupTestSuite(unittest.TestSuite):
    """
    Test Suite configuring Django settings and using
    DjangoTestSuiteRunner as test runner.
    Also runs PEP8 and Coverage checks.
    """
    def __init__(self, *args, **kwargs):
        self.configure()
        self.cov = coverage()
        self.cov.start()
        self.packages = self.resolve_packages()

        parser = argparse.ArgumentParser()
        parser.add_argument('-a', '--autoreload', dest='autoreload',
                action='store_const', const=True, default=False,)
        parser.add_argument('-f', '--failfast', dest='failfast',
                action='store_const', const=True, default=False,)
        parser.add_argument('-l', '--label', dest='label')
        self.options = vars(parser.parse_args(sys.argv[2:]))
        sys.argv = sys.argv[:2]

        super(SetupTestSuite, self).__init__(tests=self.build_tests(),
                *args, **kwargs)

        # Setup testrunner.
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner(
            verbosity=1,
            interactive=True,
            failfast=False
        )
        self.test_runner.setup_test_environment()
        self.old_config = self.test_runner.setup_databases()

    def handle_label_exception(self, exception):
        """
        Check whether or not the exception was caused due to a bad label
        being provided. If so raise LabelException which will cause an exit,
        otherwise continue.

        The check looks for particular error messages, which obviously sucks.
        TODO: Implement a cleaner test.
        """
        markers = [
            'no such test method',
            'should be of the form app.TestCase or app.TestCase.test_method',
            'App with label',
            'does not refer to a test',
        ]
        if any(marker in exception.message for marker in markers):
            log.info(exception)
            raise LabelException(exception)
        else:
            raise exception

    def build_tests(self):
        """
        Build tests for inclusion in suite from resolved packages.
        TODO: Cleanup/simplify this method, flow too complex,
        too much duplication.
        """
        from django.core.exceptions import ImproperlyConfigured
        from django.db.models import get_app
        from django.test.simple import build_suite, build_test

        tests = []
        packages = [self.options['label'], ] if \
                self.options['label'] else self.packages
        for package in packages:
            try:
                if not self.options['autoreload']:
                    if self.options['label']:
                        try:
                            tests.append(build_test(package))
                        except (ImproperlyConfigured, ValueError), e:
                            self.handle_label_exception(e)
                    else:
                        app = get_app(package)
                        tests.append(build_suite(app))
                else:
                    # Wait for exceptions to be resolved.
                    exception = None
                    while True:
                        try:
                            if self.options['label']:
                                try:
                                    tests.append(build_test(package))
                                except (ImproperlyConfigured, ValueError), e:
                                    self.handle_label_exception(e)
                            else:
                                app = get_app(package)
                                tests.append(build_suite(app))
                            break
                        except LabelException:
                            raise
                        except Exception, e:
                            if exception != str(e):
                                traceback.print_exc()
                            exception = str(e)
                            time.sleep(1)