Exemplo n.º 1
0
def get_runner(config):
    runner = DjangoTestSuiteRunner(interactive=False)

    if config.option.no_db:

        def cursor_wrapper_exception(*args, **kwargs):
            raise RuntimeError(
                'No database access is allowed since --no-db was used!')

        def setup_databases():
            # Monkey patch CursorWrapper to warn against database usage
            django.db.backends.util.CursorWrapper = cursor_wrapper_exception

        def teardown_databases(db_config):
            pass

        runner.setup_databases = setup_databases
        runner.teardown_databases = teardown_databases

    elif config.option.reuse_db:

        if not config.option.create_db:
            monkey_patch_creation_for_db_reuse()

        # Leave the database for the next test run
        runner.teardown_databases = lambda db_config: None

    return runner
Exemplo n.º 2
0
def teardown_database(actual_server):
	'''
	This will destroy your test database after all of your tests have executed.
	'''
	logger.info("Destroying the test database ...")
 
	DjangoTestSuiteRunner.teardown_databases(world.test_runner, world.created_db)
Exemplo n.º 3
0
class DBTestCase(TestCase, AssertQueriesCountMixin):
    test_runner, old_config = None, None

    def setUp(self):
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner()
        self.before_environment_setup()  # HOOK
        self.test_runner.setup_test_environment()
        self.before_database_setup()  # HOOK
        self.old_config = self.test_runner.setup_databases()
        self.setup()

    def tearDown(self):
        self.test_runner.teardown_databases(self.old_config)
        self.test_runner.teardown_test_environment()
        self.teardown()  # just to make it look like pep8

    def before_environment_setup(self):
        pass

    def before_database_setup(self):
        pass

    def setup(self):
        pass

    def teardown(self):
        pass
Exemplo n.º 4
0
    def test_setup_aliased_databases(self):
        from django.db.backends.dummy.base import DatabaseCreation

        runner = DjangoTestSuiteRunner(verbosity=0)
        old_db_connections = db.connections
        old_destroy_test_db = DatabaseCreation.destroy_test_db
        old_create_test_db = DatabaseCreation.create_test_db
        try:
            destroyed_names = []
            DatabaseCreation.destroy_test_db = lambda self, old_database_name, verbosity=1: destroyed_names.append(old_database_name)
            DatabaseCreation.create_test_db = lambda self, verbosity=1, autoclobber=False: self._get_test_db_name()

            db.connections = db.ConnectionHandler({
                'default': {
                    'ENGINE': 'django.db.backends.dummy',
                    'NAME': 'dbname',
                },
                'other': {
                    'ENGINE': 'django.db.backends.dummy',
                    'NAME': 'dbname',
                }
            })

            old_config = runner.setup_databases()
            runner.teardown_databases(old_config)

            self.assertEqual(destroyed_names.count('dbname'), 1)
        finally:
            DatabaseCreation.create_test_db = old_create_test_db
            DatabaseCreation.destroy_test_db = old_destroy_test_db
            db.connections = old_db_connections
Exemplo n.º 5
0
 def test_setup_databases(self):
     """
     Test that setup_databases() doesn't fail with dummy database backend.
     """
     runner = DjangoTestSuiteRunner(verbosity=0)
     old_db_connections = db.connections
     try:
         db.connections = db.ConnectionHandler({})
         old_config = runner.setup_databases()
         runner.teardown_databases(old_config)
     except Exception as e:
         self.fail("setup_databases/teardown_databases unexpectedly raised "
                   "an error: %s" % e)
     finally:
         db.connections = old_db_connections
Exemplo n.º 6
0
 def test_setup_aliased_default_database(self):
     """
     Test that setup_datebases() doesn't fail when 'default' is aliased
     """
     runner = DjangoTestSuiteRunner(verbosity=0)
     old_db_connections = db.connections
     try:
         db.connections = db.ConnectionHandler({
             'default': {
                 'NAME': 'dummy'
             },
             'aliased': {
                 'NAME': 'dummy'
             }
         })
         old_config = runner.setup_databases()
         runner.teardown_databases(old_config)
     except Exception as e:
         self.fail("setup_databases/teardown_databases unexpectedly raised "
                   "an error: %s" % e)
     finally:
         db.connections = old_db_connections
Exemplo n.º 7
0
def teardown_module(module):
    runner = DjangoTestSuiteRunner(interactive=False)
    runner.teardown_databases(module.config)
Exemplo n.º 8
0
class SetupTesting(TestSuite):

    """
    Test Suite configuring Django settings and using
    DjangoTestSuiteRunner as test runner.
    Also runs PEP8 and Coverage checks.
    """

    def __init__(self, *args, **kwargs):
        self.configure()
        self.coverage = coverage()
        self.coverage.start()
        self.packages = get_packages(
            path=BASEDIR,
            exclude_packages=exclude_packages)
        self.options = {
            'failfast': '',
            'autoreload': '',
            'label': ['testing'],
        }

        super(SetupTesting, self).__init__(tests=self.build_tests(),
                                           *args, **kwargs)

        # Setup testrunner.
        from django.test.simple import DjangoTestSuiteRunner
        self.test_runner = DjangoTestSuiteRunner(
            verbosity=2,
            interactive=False,
            failfast=True
        )
        # South patches the test management command to handle the
        # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed.
        try:
            from south.management.commands import patch_for_test_db_setup
            patch_for_test_db_setup()
        except ImportError:
            pass
        self.test_runner.setup_test_environment()
        self.old_config = self.test_runner.setup_databases()


    def flake8_report(self):
        """
        Outputs flake8 report.
        """
        log.info("\n\nFlake8 Report:")
        base = get_path([BASEDIR, 'tribus'])
        pys = find_files(path=base, pattern='*.py')
        flake8_style = get_style_guide()
        report = flake8_style.check_files(pys)
        exit_code = print_report(report, flake8_style)


    def pep257_report(self):
        """
        Outputs flake8 report.
        """
        log.info("\n\nPEP257 Report:")
        base = get_path([BASEDIR, 'tribus'])
        pys = find_files(path=base, pattern='*.py')
        report = pep257.check_files(pys)

        if len(report) > 0:
            for r in report:
                log.info(r)
        else:
            log.info("\nNo errors found!")


    def coverage_report(self):
        """
        Outputs Coverage report to screen and coverage.xml.
        """

        include = ['%s*' % package for package in self.packages]
        omit = ['*testing*']

        log.info("\n\nCoverage Report:")
        try:
            self.coverage.stop()
            self.coverage.report(include=include, omit=omit)
        except CoverageException as e:
            log.info("Coverage Exception: %s" % e)

        if os.environ.get('TRAVIS'):
            log.info("Submitting coverage to coveralls.io...")
            try:
                result = Coveralls()
                result.wear()
            except CoverallsException as e:
                log.error("Coveralls Exception: %s" % e)


    def build_tests(self):
        """
        Build tests for inclusion in suite from resolved packages.
        TODO: Cleanup/simplify this method, flow too complex,
        too much duplication.
        """
        from django.db.models import get_app
        from django.test.simple import build_suite

        tests = []
        app = get_app(self.options['label'][0])
        tests.append(build_suite(app))

        return tests


    def configure(self):
        """
        Configures Django settings.
        """
        from django.conf import settings
        from django.utils.importlib import import_module

        try:
            test_settings = import_module('tribus.config.testing')
        except ImportError as e:
            log.info('ImportError: Unable to import test settings: %s' % e)
            sys.exit(1)

        setting_attrs = {}
        for attr in dir(test_settings):
            if '__' not in attr:
                setting_attrs[attr] = getattr(test_settings, attr)

        if not settings.configured:
            settings.configure(**setting_attrs)

    def run(self, result, *args, **kwargs):
        """
        Run the test, teardown the environment and generate reports.
        """
        result.failfast = self.options['failfast']
        result = super(SetupTesting, self).run(result, *args, **kwargs)
        self.test_runner.teardown_databases(self.old_config)
        self.test_runner.teardown_test_environment()
        #self.coverage_report()
        #self.flake8_report()
        #self.pep257_report()
        return result
Exemplo n.º 9
0
class Benchmark(GenericBenchmark):
    def __init__(self, *args, **kwargs):
        global options
        options = LazyStruct(**kwargs)
        self.test_runner = DjangoTestSuiteRunner()
        self.prepare()

    def prepare_oss_list(self):
        return [oss for oss in
                    [OssGenerator(idx=idx, fs=self.fs_entity)
                        for idx in range(0, options.oss)]]

    def prepare_mds_list(self):
        return [MdsGenerator(fs=self.fs_entity)]

    def step_stats(self):
        """Generate stats for all servers in a single step"""
        update_servers = []
        for server in self.server_list():
            stats = {'node': {},
                    'lustre': {'target': {}}}
            for node_stat in server.stats.keys():
                stats['node'][node_stat] = server.stats[node_stat]

            # make this match up with what comes in from an update scan
            stats['lustre']['lnet'] = stats['node']['lnet']

            for target in server.target_list:
                stats['lustre']['target'][target.name] = {}
                for target_stat in target.stats.keys():
                    stats['lustre']['target'][target.name][target_stat] = target.stats[target_stat]
            update_servers.append([server.entity, stats])

        return update_servers

    def precreate_stats(self):
        self.stats_list = []

        steps = range(0, options.duration, options.frequency)
        for idx, v in enumerate(steps):
            sys.stderr.write("\rPrecreating stats... (%d/%d)" % (idx, len(steps)))
            self.stats_list.append(self.step_stats())

        sys.stderr.write("\rPrecreating stats... Done.        \n")

    def prepare(self):
        from south.management.commands import patch_for_test_db_setup

        self.test_runner.setup_test_environment()
        # This is necessary to ensure that we use django.core.syncdb()
        # instead of south's hacked syncdb()
        patch_for_test_db_setup()
        self.old_db_config = self.test_runner.setup_databases()

        mgs_host = ManagedHost.objects.create(
                address="mgs",
                fqdn="mgs",
                nodename="mgs")
        mgs_vol = Volume.objects.create(label="mgs")
        VolumeNode.objects.create(host = mgs_host,
                                  path = uuid.uuid4(),
                                  primary = True,
                                  use = True,
                                  volume = mgs_vol)
        self.mgs, mounts = ManagedMgs.create_for_volume(mgs_vol.pk, name="MGS")
        self.fs_entity = ManagedFilesystem.objects.create(name=options.fsname,
                                                          mgs=self.mgs)
        self.oss_list = self.prepare_oss_list()
        self.mds_list = self.prepare_mds_list()

        if not options.no_precreate:
            self.precreate_stats()

    def get_stats_size(self):
        stats_size = LazyStruct()
        from django.db import connection
        cursor = connection.cursor()
        if 'postgres' in connection.settings_dict['ENGINE']:
            stats_size.row_count = stats_size.data = stats_size.index = 0

            for model in Stats:
                cursor.execute("select count(id) as rows, pg_relation_size('{0}') as data_length, pg_total_relation_size('{0}') - pg_relation_size('{0}') as index_length from {0}".format(model._meta.db_table))
                rows, data, index = cursor.fetchone()
                stats_size.row_count += rows
                stats_size.data += data
                stats_size.index += index
        else:
            raise RuntimeError("Unsupported DB: %s" % connection.settings_dict['ENGINE'])
        return stats_size

    def server_list(self):
        return self.mds_list + self.oss_list

    def store_metrics(self, scan):
        return scan.store_metrics()

    def run(self):
        def t2s(t):
            return time.strftime("%H:%M:%S", time.localtime(t))

        def s2s(s):
            if s > 600:
                from datetime import timedelta, datetime
                d = timedelta(seconds=int(s)) + datetime(1, 1, 1)
                return "%.2d:%.2d:%.2d:%.2d" % (d.day - 1, d.hour, d.minute, d.second)
            else:
                return "%d" % s

        stats_size_start = self.get_stats_size()

        scan = UpdateScan()
        run_start = time.time()
        run_count = 0
        create_interval = 0
        create_count = 0
        start_la = os.getloadavg()
        last_width = 0
        print "window start: %s, window stop: %s" % (t2s(run_start),
                                       t2s(run_start + options.duration))
        update_times = range(int(run_start),
                             int(run_start + options.duration),
                             options.frequency)
        for stats_idx, update_time in enumerate(update_times):
            new_timing_line = "\r%s" % t2s(update_time)
            sys.stderr.write(new_timing_line)
            store_start = time.time()
            count = 0

            if options.no_precreate:
                step_stats_list = self.step_stats()
            else:
                step_stats_list = self.stats_list[stats_idx]

            server_stats_count = 0
            for step_stats in step_stats_list:
                scan.host = step_stats[0]
                scan.host_data = {'metrics': {'raw': step_stats[1]}}
                scan.update_time = update_time
                count += self.store_metrics(scan)
                # Since we've hard-coded the server stats, we need to record
                # the actual number to make the reporting accurate.
                if options.server_stats == 0:
                    for key in ['meminfo', 'lnet', 'cpustats']:
                        server_stats_count += len(step_stats[1]['node'][key])

            # Terrible hack to make reporting accurate.
            if options.server_stats == 0:
                options.server_stats = server_stats_count

            run_count += count
            store_end = time.time()
            interval = store_end - store_start
            rate = count / interval
            meter = "+" if interval < options.frequency else "-"
            seconds_left = (len(update_times) - stats_idx) * interval
            timing_stats = ": inserted %d stats (rate: %lf stats/sec, complete in: %s) %s" % (count, rate, s2s(seconds_left), meter)
            current_line_width = len(new_timing_line + timing_stats)
            if current_line_width < last_width:
                sys.stderr.write(new_timing_line + timing_stats + " " * (last_width - current_line_width))
            else:
                sys.stderr.write(timing_stats)
            last_width = current_line_width

            if not options.include_create and update_time == int(run_start):
                create_interval = interval
                create_count = count

        run_end = time.time()
        end_la = os.getloadavg()

        stats_size_end = self.get_stats_size()

        run_info = LazyStruct()
        run_info.step_count = options.duration / options.frequency
        run_info.run_count = run_count
        run_info.run_interval = run_end - run_start - create_interval
        run_info.run_rate = (run_count - create_count) / run_info.run_interval
        run_info.create_interval = create_interval
        run_info.create_count = create_count
        run_info.start_load_avg = start_la
        run_info.end_load_avg = end_la
        run_info.stats_data_used = stats_size_end.data - stats_size_start.data
        run_info.stats_index_used = stats_size_end.index - stats_size_start.index
        run_info.stats_rows_used = stats_size_end.row_count - stats_size_start.row_count

        self.print_report(run_info)

    def profile_system(self):
        def _read_lines(filename):
            fh = open(filename)
            try:
                return [line.rstrip("\n") for line in fh.readlines()]
            finally:
                fh.close()

        def _cpu_info():
            count = 0
            speed = 0
            for line in _read_lines("/proc/cpuinfo"):
                if 'processor' in line:
                    count += 1
                    continue

                if 'cpu MHz' in line:
                    speed = float(line.split()[3])
                    continue

            return {'count': count, 'speed': speed}

        def _mem_info():
            mem_info = {}
            for line in _read_lines("/proc/meminfo"):
                for query in ["MemTotal", "MemFree", "SwapTotal", "SwapFree"]:
                    if query in line:
                        mem_info[query] = float(line.split()[1])
                        break

            mem_info['pct_mem_used'] = ((mem_info['MemTotal'] - mem_info['MemFree']) / mem_info['MemTotal']) * 100
            try:
                mem_info['pct_swap_used'] = ((mem_info['SwapTotal'] - mem_info['SwapFree']) / mem_info['SwapTotal']) * 100
            except ZeroDivisionError:
                mem_info['pct_swap_used'] = 0.0
            return mem_info

        profile = LazyStruct()
        cpu_info = _cpu_info()
        profile.cpu_count = cpu_info['count']
        profile.cpu_speed = cpu_info['speed']
        mem_info = _mem_info()
        profile.mem_total = mem_info['MemTotal']
        profile.mem_pct_used = mem_info['pct_mem_used']
        profile.swap_total = mem_info['SwapTotal']
        profile.swap_pct_used = mem_info['pct_swap_used']

        return profile

    # TODO: Customizable output formats (csv, tsv, etc.)
    def print_report(self, run_info):
        print "\n"
        try:
            profile = self.profile_system()
            print "CPUs: %d @ %.2f GHz, Mem: %d MB real (%.2f%% used) / %d MB swap (%.2f%% used)" % (profile.cpu_count, (profile.cpu_speed / 1000), (profile.mem_total / 1000), profile.mem_pct_used, (profile.swap_total / 1000), profile.swap_pct_used)
        except IOError:
            print "No system profile available (on a mac?)"
        print "Load averages (1/5/15): start: %.2f/%.2f/%.2f, end: %.2f/%.2f/%.2f" % (run_info.start_load_avg + run_info.end_load_avg)
        print "counts: OSS: %d, OSTs/OSS: %d (%d total); stats-per: OSS: %d, MDS: %d" % (options.oss, options.ost, (options.oss * options.ost), ((options.ost * options.ost_stats) + options.server_stats), (options.mdt_stats + options.server_stats))
        print "run count (%d stats) / run time (%.2f sec) = run rate (%.2f stats/sec)" % (run_info.run_count, run_info.run_interval, run_info.run_rate)
        print "%d steps, %d stats/step, duration %d" % (run_info.step_count, run_info.run_count / run_info.step_count, options.duration)

        def _to_mb(in_bytes):
            return in_bytes * 1.0 / (1024 * 1024)

        stats_total_used = run_info.stats_data_used + run_info.stats_index_used
        print "stats rows: %d, space used: %.2f MB (%.2f MB data, %.2f MB index)" % (run_info.stats_rows_used, _to_mb(stats_total_used), _to_mb(run_info.stats_data_used), _to_mb(run_info.stats_index_used))

    def cleanup(self):
        self.test_runner.teardown_databases(self.old_db_config)
        self.test_runner.teardown_test_environment()
Exemplo n.º 10
0
                                tags=tags)

                result = runner.run()
                if app_module is not None:
                    registry.call_hook('after_each', 'app', app_module, result)

                results.append(result)
                if not result or result.steps != result.steps_passed:
                    failed = True
        except SystemExit, e:
            failed = e.code

        except Exception, e:
            failed = True
            import traceback
            traceback.print_exc(e)
        finally:
            try:
                registry.call_hook('after', 'harvest', results)
                stop_code = server.stop(failed)
                if stop_code != 0:
                    failed = True
                DjangoTestSuiteRunner.teardown_databases(
                    self._test_runner, self._created_db)
                DjangoTestSuiteRunner.teardown_test_environment(
                    self._test_runner)
            except:
                import traceback
                traceback.print_exc()
            sys.exit(int(failed))
Exemplo n.º 11
0
class SetupTestSuite(unittest.TestSuite):
    """
    Test Suite configuring Django settings and using
    DiscoverRunner or DjangoTestSuiteRunner as the test runner.
    Also runs PEP8 and Coverage checks.
    """
    def __init__(self, *args, **kwargs):
        self.cov = coverage()
        self.cov.start()
        self.configure()
        self.packages = self.resolve_packages()

        parser = argparse.ArgumentParser()
        parser.add_argument('-a', '--autoreload', dest='autoreload',
                action='store_const', const=True, default=False,)
        parser.add_argument('-f', '--failfast', dest='failfast',
                action='store_const', const=True, default=False,)
        parser.add_argument('-l', '--label', dest='label')
        self.options = vars(parser.parse_args(sys.argv[2:]))
        sys.argv = sys.argv[:2]

        runner_options = {
            'verbosity': 1,
            'interactive': True,
            'failfast': False,
        }

        if django.VERSION >= (1, 8):
            from django.test.runner import DiscoverRunner
            self.test_runner = DiscoverRunner(**runner_options)
            tests = self.test_runner.build_suite()
        else:
            from django.test.simple import DjangoTestSuiteRunner
            self.test_runner = DjangoTestSuiteRunner(**runner_options)
            tests = self.build_tests()

        super(SetupTestSuite, self).__init__(tests=tests, *args, **kwargs)

        # South patches the test management command to handle the
        # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed.
        if django.VERSION < (1,7):
            try:
                from south.management.commands import patch_for_test_db_setup
                patch_for_test_db_setup()
            except ImportError:
                pass
        self.test_runner.setup_test_environment()
        self.old_config = self.test_runner.setup_databases()

    def handle_label_exception(self, exception):
        """
        Check whether or not the exception was caused due to a bad label
        being provided. If so raise LabelException which will cause an exit,
        otherwise continue.

        The check looks for particular error messages, which obviously sucks.
        TODO: Implement a cleaner test.
        """
        markers = [
            'no such test method',
            'should be of the form app.TestCase or app.TestCase.test_method',
            'App with label',
            'does not refer to a test',
        ]
        if any(marker in exception.message for marker in markers):
            log.info(exception)
            raise LabelException(exception)
        else:
            raise exception

    def build_tests(self):
        """
        Build tests for inclusion in suite from resolved packages for <= 1.8
        TODO: Cleanup/simplify this method, flow too complex,
        too much duplication.
        """
        from django.core.exceptions import ImproperlyConfigured
        from django.test.simple import build_suite, build_test
        try:
            from django.apps import apps
            get_app = apps.get_app_config
        except ImportError:
            from django.db.models import get_app
        tests = []
        packages = [self.options['label'], ] if \
                self.options['label'] else self.packages
        for package in packages:
            try:
                if not self.options['autoreload']:
                    if self.options['label']:
                        try:
                            tests.append(build_test(package))
                        except (ImproperlyConfigured, ValueError) as e:
                            self.handle_label_exception(e)
                    else:
                        app = get_app(package)
                        tests.append(build_suite(app))
                else:
                    # Wait for exceptions to be resolved.
                    exception = None
                    while True:
                        try:
                            if self.options['label']:
                                try:
                                    tests.append(build_test(package))
                                except (ImproperlyConfigured, ValueError) as e:
                                    self.handle_label_exception(e)
                            else:
                                app = get_app(package)
                                tests.append(build_suite(app))
                            break
                        except LabelException:
                            raise
                        except Exception as e:
                            if exception != str(e):
                                traceback.print_exc()
                            exception = str(e)
                            time.sleep(1)
            except ImproperlyConfigured as e:
                log.info("Warning: %s" % traceback.format_exc())
            except ImportError as e:
                log.info("Warning: %s" % traceback.format_exc())

        return tests

    def configure(self):
        """
        Configures Django settings.
        """

        import django
        from django.conf import settings
        try:
            from django.utils.importlib import import_module
        except ImportError:
            from importlib import import_module
        try:
            test_settings = import_module('test_settings')
        except ImportError as e:
            log.info('ImportError: Unable to import test settings: %s' % e)
            sys.exit(1)

        setting_attrs = {}
        for attr in dir(test_settings):
            if '__' not in attr:
                setting_attrs[attr] = getattr(test_settings, attr)

        if not settings.configured:
            settings.configure(**setting_attrs)

        if hasattr(django, 'setup'):
            django.setup()

    def coverage_report(self):
        """
        Outputs Coverage report to screen and coverage.xml.
        """
        verbose = '--quiet' not in sys.argv
        self.cov.stop()
        if verbose:
            log.info("\nCoverage Report:")
            try:
                include = ['%s*' % package for package in self.packages]
                omit = ['*tests*']
                self.cov.report(include=include, omit=omit)
                self.cov.save()
                self.cov.xml_report(include=include, omit=omit)
            except misc.CoverageException as e:
                log.info("Coverage Exception: %s" % e)

    def resolve_packages(self):
        """
        Frame hack to determine packages contained in module for testing.
        We ignore submodules (those containing '.')
        """
        f = sys._getframe()
        while f:
            if 'self' in f.f_locals:
                locals_self = f.f_locals['self']
                py_modules = getattr(locals_self, 'py_modules', None)
                packages = getattr(locals_self, 'packages', None)

                top_packages = []
                if py_modules or packages:
                    if py_modules:
                        for module in py_modules:
                            if '.' not in module:
                                top_packages.append(module)
                    if packages:
                        for package in packages:
                            if '.' not in package:
                                top_packages.append(package)

                    return list(set(top_packages))
            f = f.f_back

    def pep8_report(self):
        """
        Outputs PEP8 report to screen and pep8.txt.
        """
        verbose = '--quiet' not in sys.argv
        if verbose:
            # Hook into stdout.
            old_stdout = sys.stdout
            sys.stdout = mystdout = StringIO()

            # Run Pep8 checks, excluding South migrations.
            pep8_style = pep8.StyleGuide()
            pep8_style.options.exclude.append('migrations')
            pep8_style.options.exclude.append('south_migrations')
            pep8_style.check_files(self.packages)

            # Restore stdout.
            sys.stdout = old_stdout

            # Save result to pep8.txt.
            result = mystdout.getvalue()
            output = open('pep8.txt', 'w')
            output.write(result)
            output.close()

            # Return Pep8 result
            if result:
                log.info("\nPEP8 Report:")
                log.info(result)

    def run(self, result, *args, **kwargs):
        """
        Run the test, teardown the environment and generate reports.
        """
        result.failfast = self.options['failfast']
        result = super(SetupTestSuite, self).run(result, *args, **kwargs)
        self.test_runner.teardown_databases(self.old_config)
        self.test_runner.teardown_test_environment()
        self.coverage_report()
        self.pep8_report()
        return result