class LemonwiseReviewsTestSetup(Plugin): """ Nose plugin that sets up the environment needed to test """ enabled = True name = 'testsetup-lemonwise' old_config = None runner = None def __init__(self, *args, **kwargs): # Import client here to patch Django's own version from lemonwise.utils.testharness import client super(LemonwiseReviewsTestSetup, self).__init__(*args, **kwargs) def options(self, parser, env): super(LemonwiseReviewsTestSetup, self).options(parser, env) def begin(self): from django.test.simple import DjangoTestSuiteRunner setup_environ(settings) self.runner = DjangoTestSuiteRunner() self.runner.setup_test_environment() self.old_config = self.runner.setup_databases() def finalize(self, result): self.runner.teardown_databases(self.old_config) self.runner.teardown_test_environment()
class DBTestCase(TestCase, AssertQueriesCountMixin): test_runner, old_config = None, None def setUp(self): from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner() self.before_environment_setup() # HOOK self.test_runner.setup_test_environment() self.before_database_setup() # HOOK self.old_config = self.test_runner.setup_databases() self.setup() def tearDown(self): self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() self.teardown() # just to make it look like pep8 def before_environment_setup(self): pass def before_database_setup(self): pass def setup(self): pass def teardown(self): pass
class DBTestCase(TestCase, AssertQueriesCountMixin): test_runner, old_config = None, None def setUp(self): from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner() self.before_environment_setup() # HOOK self.test_runner.setup_test_environment() self.before_database_setup() # HOOK self.old_config = self.test_runner.setup_databases() self.setup() def tearDown(self): self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() self.teardown() # just to make it look like pep8 def before_environment_setup(self): pass def before_database_setup(self): pass def setup(self): pass def teardown(self): pass
def run(self): catchbreak = self.unit_test.pop("catchbreak") if catchbreak: from unittest.signals import installHandler installHandler() # Before use the output, we save the original for reset in the end org_out = sys.stdout org_err = sys.stderr try: test_runner = TestRunner(**self.__dict__) if test_runner.exit_request is None: if self.is_django: from django.test.simple import DjangoTestSuiteRunner dtsr = DjangoTestSuiteRunner(**self.unit_test) dtsr.setup_test_environment() old_config = dtsr.setup_databases() test_runner.run(self.test_list) dtsr.teardown_databases(old_config) dtsr.teardown_test_environment() else: test_runner.run(self.test_list) except Exception: print_exception() finally: # Return the output sys.stdout = org_out sys.stderr = org_err if isinstance(test_runner.exit_request, int): sys.exit(test_runner.exit_request)
enable_xunit=options.get('enable_xunit'), xunit_filename=options.get('xunit_file'), tags=tags) result = runner.run() if app_module is not None: registry.call_hook('after_each', 'app', app_module, result) results.append(result) if not result or result.steps != result.steps_passed: failed = True except SystemExit, e: failed = e.code except Exception, e: failed = True import traceback traceback.print_exc(e) finally: try: registry.call_hook('after', 'harvest', results) stop_code = server.stop(failed) if stop_code != 0: failed = True DjangoTestSuiteRunner.teardown_databases(self._test_runner, self._created_db) DjangoTestSuiteRunner.teardown_test_environment(self._test_runner) except: import traceback traceback.print_exc() sys.exit(int(failed))
class SetupTestSuite(unittest.TestSuite): """ Test Suite configuring Django settings and using DjangoTestSuiteRunner as test runner. Also runs PEP8 and Coverage checks. """ def __init__(self, *args, **kwargs): self.configure() self.cov = coverage() self.cov.start() self.packages = self.resolve_packages() parser = argparse.ArgumentParser() parser.add_argument('-a', '--autoreload', dest='autoreload', action='store_const', const=True, default=False,) parser.add_argument('-f', '--failfast', dest='failfast', action='store_const', const=True, default=False,) parser.add_argument('-l', '--label', dest='label') self.options = vars(parser.parse_args(sys.argv[2:])) sys.argv = sys.argv[:2] super(SetupTestSuite, self).__init__(tests=self.build_tests(), *args, **kwargs) # Setup testrunner. from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner( verbosity=1, interactive=True, failfast=False ) # South patches the test management command to handle the # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed. try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except ImportError: pass self.test_runner.setup_test_environment() self.old_config = self.test_runner.setup_databases() def handle_label_exception(self, exception): """ Check whether or not the exception was caused due to a bad label being provided. If so raise LabelException which will cause an exit, otherwise continue. The check looks for particular error messages, which obviously sucks. TODO: Implement a cleaner test. """ markers = [ 'no such test method', 'should be of the form app.TestCase or app.TestCase.test_method', 'App with label', 'does not refer to a test', ] if any(marker in exception.message for marker in markers): log.info(exception) raise LabelException(exception) else: raise exception def build_tests(self): """ Build tests for inclusion in suite from resolved packages. TODO: Cleanup/simplify this method, flow too complex, too much duplication. """ from django.core.exceptions import ImproperlyConfigured from django.db.models import get_app from django.test.simple import build_suite, build_test tests = [] packages = [self.options['label'], ] if \ self.options['label'] else self.packages for package in packages: try: if not self.options['autoreload']: if self.options['label']: try: tests.append(build_test(package)) except (ImproperlyConfigured, ValueError) as e: self.handle_label_exception(e) else: app = get_app(package) tests.append(build_suite(app)) else: # Wait for exceptions to be resolved. exception = None while True: try: if self.options['label']: try: tests.append(build_test(package)) except (ImproperlyConfigured, ValueError) as e: self.handle_label_exception(e) else: app = get_app(package) tests.append(build_suite(app)) break except LabelException: raise except Exception as e: if exception != str(e): traceback.print_exc() exception = str(e) time.sleep(1) except ImproperlyConfigured as e: log.info("Warning: %s" % e) except ImportError as e: log.info("Warning: %s" % e) return tests def configure(self): """ Configures Django settings. """ from django.conf import settings from django.utils.importlib import import_module try: test_settings = import_module('test_settings') except ImportError as e: log.info('ImportError: Unable to import test settings: %s' % e) sys.exit(1) setting_attrs = {} for attr in dir(test_settings): if '__' not in attr: setting_attrs[attr] = getattr(test_settings, attr) if not settings.configured: settings.configure(**setting_attrs) def coverage_report(self): """ Outputs Coverage report to screen and coverage.xml. """ verbose = '--quiet' not in sys.argv self.cov.stop() if verbose: log.info("\nCoverage Report:") try: include = ['%s*' % package for package in self.packages] omit = ['*tests*'] self.cov.report(include=include, omit=omit) self.cov.xml_report(include=include, omit=omit) except misc.CoverageException as e: log.info("Coverage Exception: %s" % e) def resolve_packages(self): """ Frame hack to determine packages contained in module for testing. We ignore submodules (those containing '.') """ f = sys._getframe() while f: if 'self' in f.f_locals: locals_self = f.f_locals['self'] py_modules = getattr(locals_self, 'py_modules', None) packages = getattr(locals_self, 'packages', None) top_packages = [] if py_modules or packages: if py_modules: for module in py_modules: if '.' not in module: top_packages.append(module) if packages: for package in packages: if '.' not in package: top_packages.append(package) return list(set(top_packages)) f = f.f_back def pep8_report(self): """ Outputs PEP8 report to screen and pep8.txt. """ verbose = '--quiet' not in sys.argv if verbose: # Hook into stdout. old_stdout = sys.stdout sys.stdout = mystdout = StringIO() # Run Pep8 checks, excluding South migrations. pep8_style = pep8.StyleGuide() pep8_style.options.exclude.append('migrations') pep8_style.check_files(self.packages) # Restore stdout. sys.stdout = old_stdout # Save result to pep8.txt. result = mystdout.getvalue() output = open('pep8.txt', 'w') output.write(result) output.close() # Return Pep8 result if result: log.info("\nPEP8 Report:") log.info(result) def run(self, result, *args, **kwargs): """ Run the test, teardown the environment and generate reports. """ result.failfast = self.options['failfast'] result = super(SetupTestSuite, self).run(result, *args, **kwargs) self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() self.coverage_report() self.pep8_report() return result
class DjangoManager(object): """ A Django plugin for py.test that handles creating and destroying the test environment and test database. Similar to Django's TransactionTestCase, a transaction is started and rolled back for each test. Additionally, the settings are copied before each test and restored at the end of the test, so it is safe to modify settings within tests. """ def __init__(self, verbosity=0, noinput=False): self.verbosity = verbosity self.noinput = noinput self._old_database_name = None self._old_settings = [] self._old_urlconf = None self.suite_runner = None self.old_db_config = None self.testcase = None def pytest_sessionstart(self, session): #capture = py.io.StdCapture() # make sure the normal django syncdb command is run (do not run migrations for tests) # this is faster and less error prone management.get_commands() # load commands dict management._commands['syncdb'] = 'django.core' # make sure `south` migrations are disabled self.suite_runner = DjangoTestSuiteRunner() self.suite_runner.setup_test_environment() self.old_db_config = self.suite_runner.setup_databases() settings.DATABASE_SUPPORTS_TRANSACTIONS = True #unused_out, err = capture.reset() #srsys.stderr.write(err) def pytest_sessionfinish(self, session, exitstatus): capture = py.io.StdCapture() self.suite_runner.teardown_test_environment() self.suite_runner.teardown_databases(self.old_db_config) unused_out, err = capture.reset() sys.stderr.write(err) def pytest_itemstart(self, item): # This lets us control the order of the setup/teardown # Yuck. if _is_unittest(self._get_item_obj(item)): item.setup = lambda: None item.teardown = lambda: None def pytest_runtest_setup(self, item): # Set the URLs if the py.test.urls() decorator has been applied if hasattr(item.obj, 'urls'): self._old_urlconf = settings.ROOT_URLCONF settings.ROOT_URLCONF = item.obj.urls clear_url_caches() item_obj = self._get_item_obj(item) testcase = _get_testcase(item_obj) # We have to run these here since py.test's unittest plugin skips # __call__() testcase.client = Client() testcase._pre_setup() testcase.setUp() def pytest_runtest_teardown(self, item): item_obj = self._get_item_obj(item) testcase = _get_testcase(item_obj) testcase.tearDown() if not isinstance(item_obj, TestCase): testcase._post_teardown() if hasattr(item, 'urls') and self._old_urlconf is not None: settings.ROOT_URLCONF = self._old_urlconf self._old_urlconf = None def _get_item_obj(self, item): try: return item.obj.im_self except AttributeError: return None def pytest_namespace(self): """ Sets up the py.test.params decorator. """ def params(funcarglist): """ A decorator to make parametrised tests easy. Takes a list of dictionaries of keyword arguments for the function. A test is created for each dictionary. Example: @py.test.params([dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)]) def test_equals(a, b): assert a == b """ def wrapper(function): function.funcarglist = funcarglist return function return wrapper def load_fixture(fixture): """ Loads a fixture, useful for loading fixtures in funcargs. Example: def pytest_funcarg__articles(request): py.test.load_fixture('test_articles') return Article.objects.all() """ call_command('loaddata', fixture, **{ 'verbosity': self.verbosity + 1, 'commit': not settings.DATABASE_SUPPORTS_TRANSACTIONS }) def urls(urlconf): """ A decorator to change the URLconf for a particular test, similar to the `urls` attribute on Django's `TestCase`. Example: @py.test.urls('myapp.test_urls') def test_something(client): assert 'Success!' in client.get('/some_path/') """ def wrapper(function): function.urls = urlconf return wrapper return {'params': params, 'load_fixture': load_fixture, 'urls': urls} def pytest_generate_tests(self, metafunc): """ Generates parametrised tests if the py.test.params decorator has been used. """ for funcargs in getattr(metafunc.function, 'funcarglist', ()): metafunc.addcall(funcargs=funcargs)
class SetupTesting(TestSuite): """ Test Suite configuring Django settings and using DjangoTestSuiteRunner as test runner. Also runs PEP8 and Coverage checks. """ def __init__(self, *args, **kwargs): self.configure() self.coverage = coverage() self.coverage.start() self.packages = get_packages( path=BASEDIR, exclude_packages=exclude_packages) self.options = { 'failfast': '', 'autoreload': '', 'label': ['testing'], } super(SetupTesting, self).__init__(tests=self.build_tests(), *args, **kwargs) # Setup testrunner. from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner( verbosity=2, interactive=False, failfast=True ) # South patches the test management command to handle the # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed. try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except ImportError: pass self.test_runner.setup_test_environment() self.old_config = self.test_runner.setup_databases() def flake8_report(self): """ Outputs flake8 report. """ log.info("\n\nFlake8 Report:") base = get_path([BASEDIR, 'tribus']) pys = find_files(path=base, pattern='*.py') flake8_style = get_style_guide() report = flake8_style.check_files(pys) exit_code = print_report(report, flake8_style) def pep257_report(self): """ Outputs flake8 report. """ log.info("\n\nPEP257 Report:") base = get_path([BASEDIR, 'tribus']) pys = find_files(path=base, pattern='*.py') report = pep257.check_files(pys) if len(report) > 0: for r in report: log.info(r) else: log.info("\nNo errors found!") def coverage_report(self): """ Outputs Coverage report to screen and coverage.xml. """ include = ['%s*' % package for package in self.packages] omit = ['*testing*'] log.info("\n\nCoverage Report:") try: self.coverage.stop() self.coverage.report(include=include, omit=omit) except CoverageException as e: log.info("Coverage Exception: %s" % e) if os.environ.get('TRAVIS'): log.info("Submitting coverage to coveralls.io...") try: result = Coveralls() result.wear() except CoverallsException as e: log.error("Coveralls Exception: %s" % e) def build_tests(self): """ Build tests for inclusion in suite from resolved packages. TODO: Cleanup/simplify this method, flow too complex, too much duplication. """ from django.db.models import get_app from django.test.simple import build_suite tests = [] app = get_app(self.options['label'][0]) tests.append(build_suite(app)) return tests def configure(self): """ Configures Django settings. """ from django.conf import settings from django.utils.importlib import import_module try: test_settings = import_module('tribus.config.testing') except ImportError as e: log.info('ImportError: Unable to import test settings: %s' % e) sys.exit(1) setting_attrs = {} for attr in dir(test_settings): if '__' not in attr: setting_attrs[attr] = getattr(test_settings, attr) if not settings.configured: settings.configure(**setting_attrs) def run(self, result, *args, **kwargs): """ Run the test, teardown the environment and generate reports. """ result.failfast = self.options['failfast'] result = super(SetupTesting, self).run(result, *args, **kwargs) self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() #self.coverage_report() #self.flake8_report() #self.pep257_report() return result
def teardown_test_environment(self, **kwargs): DjangoTestSuiteRunner.teardown_test_environment(self, **kwargs) os.kill(self._ecilop_process.pid, SIGTERM) shutil.rmtree(TEST_ROOT_PATH)
def teardown_test_environment(self, **kwargs): DjangoTestSuiteRunner.teardown_test_environment(self, **kwargs) os.kill(self._ecilop_process.pid, SIGTERM) shutil.rmtree(TEST_ROOT_PATH)
class Benchmark(GenericBenchmark): def __init__(self, *args, **kwargs): global options options = LazyStruct(**kwargs) self.test_runner = DjangoTestSuiteRunner() self.prepare() def prepare_oss_list(self): return [oss for oss in [OssGenerator(idx=idx, fs=self.fs_entity) for idx in range(0, options.oss)]] def prepare_mds_list(self): return [MdsGenerator(fs=self.fs_entity)] def step_stats(self): """Generate stats for all servers in a single step""" update_servers = [] for server in self.server_list(): stats = {'node': {}, 'lustre': {'target': {}}} for node_stat in server.stats.keys(): stats['node'][node_stat] = server.stats[node_stat] # make this match up with what comes in from an update scan stats['lustre']['lnet'] = stats['node']['lnet'] for target in server.target_list: stats['lustre']['target'][target.name] = {} for target_stat in target.stats.keys(): stats['lustre']['target'][target.name][target_stat] = target.stats[target_stat] update_servers.append([server.entity, stats]) return update_servers def precreate_stats(self): self.stats_list = [] steps = range(0, options.duration, options.frequency) for idx, v in enumerate(steps): sys.stderr.write("\rPrecreating stats... (%d/%d)" % (idx, len(steps))) self.stats_list.append(self.step_stats()) sys.stderr.write("\rPrecreating stats... Done. \n") def prepare(self): from south.management.commands import patch_for_test_db_setup self.test_runner.setup_test_environment() # This is necessary to ensure that we use django.core.syncdb() # instead of south's hacked syncdb() patch_for_test_db_setup() self.old_db_config = self.test_runner.setup_databases() mgs_host = ManagedHost.objects.create( address="mgs", fqdn="mgs", nodename="mgs") mgs_vol = Volume.objects.create(label="mgs") VolumeNode.objects.create(host = mgs_host, path = uuid.uuid4(), primary = True, use = True, volume = mgs_vol) self.mgs, mounts = ManagedMgs.create_for_volume(mgs_vol.pk, name="MGS") self.fs_entity = ManagedFilesystem.objects.create(name=options.fsname, mgs=self.mgs) self.oss_list = self.prepare_oss_list() self.mds_list = self.prepare_mds_list() if not options.no_precreate: self.precreate_stats() def get_stats_size(self): stats_size = LazyStruct() from django.db import connection cursor = connection.cursor() if 'postgres' in connection.settings_dict['ENGINE']: stats_size.row_count = stats_size.data = stats_size.index = 0 for model in Stats: cursor.execute("select count(id) as rows, pg_relation_size('{0}') as data_length, pg_total_relation_size('{0}') - pg_relation_size('{0}') as index_length from {0}".format(model._meta.db_table)) rows, data, index = cursor.fetchone() stats_size.row_count += rows stats_size.data += data stats_size.index += index else: raise RuntimeError("Unsupported DB: %s" % connection.settings_dict['ENGINE']) return stats_size def server_list(self): return self.mds_list + self.oss_list def store_metrics(self, scan): return scan.store_metrics() def run(self): def t2s(t): return time.strftime("%H:%M:%S", time.localtime(t)) def s2s(s): if s > 600: from datetime import timedelta, datetime d = timedelta(seconds=int(s)) + datetime(1, 1, 1) return "%.2d:%.2d:%.2d:%.2d" % (d.day - 1, d.hour, d.minute, d.second) else: return "%d" % s stats_size_start = self.get_stats_size() scan = UpdateScan() run_start = time.time() run_count = 0 create_interval = 0 create_count = 0 start_la = os.getloadavg() last_width = 0 print "window start: %s, window stop: %s" % (t2s(run_start), t2s(run_start + options.duration)) update_times = range(int(run_start), int(run_start + options.duration), options.frequency) for stats_idx, update_time in enumerate(update_times): new_timing_line = "\r%s" % t2s(update_time) sys.stderr.write(new_timing_line) store_start = time.time() count = 0 if options.no_precreate: step_stats_list = self.step_stats() else: step_stats_list = self.stats_list[stats_idx] server_stats_count = 0 for step_stats in step_stats_list: scan.host = step_stats[0] scan.host_data = {'metrics': {'raw': step_stats[1]}} scan.update_time = update_time count += self.store_metrics(scan) # Since we've hard-coded the server stats, we need to record # the actual number to make the reporting accurate. if options.server_stats == 0: for key in ['meminfo', 'lnet', 'cpustats']: server_stats_count += len(step_stats[1]['node'][key]) # Terrible hack to make reporting accurate. if options.server_stats == 0: options.server_stats = server_stats_count run_count += count store_end = time.time() interval = store_end - store_start rate = count / interval meter = "+" if interval < options.frequency else "-" seconds_left = (len(update_times) - stats_idx) * interval timing_stats = ": inserted %d stats (rate: %lf stats/sec, complete in: %s) %s" % (count, rate, s2s(seconds_left), meter) current_line_width = len(new_timing_line + timing_stats) if current_line_width < last_width: sys.stderr.write(new_timing_line + timing_stats + " " * (last_width - current_line_width)) else: sys.stderr.write(timing_stats) last_width = current_line_width if not options.include_create and update_time == int(run_start): create_interval = interval create_count = count run_end = time.time() end_la = os.getloadavg() stats_size_end = self.get_stats_size() run_info = LazyStruct() run_info.step_count = options.duration / options.frequency run_info.run_count = run_count run_info.run_interval = run_end - run_start - create_interval run_info.run_rate = (run_count - create_count) / run_info.run_interval run_info.create_interval = create_interval run_info.create_count = create_count run_info.start_load_avg = start_la run_info.end_load_avg = end_la run_info.stats_data_used = stats_size_end.data - stats_size_start.data run_info.stats_index_used = stats_size_end.index - stats_size_start.index run_info.stats_rows_used = stats_size_end.row_count - stats_size_start.row_count self.print_report(run_info) def profile_system(self): def _read_lines(filename): fh = open(filename) try: return [line.rstrip("\n") for line in fh.readlines()] finally: fh.close() def _cpu_info(): count = 0 speed = 0 for line in _read_lines("/proc/cpuinfo"): if 'processor' in line: count += 1 continue if 'cpu MHz' in line: speed = float(line.split()[3]) continue return {'count': count, 'speed': speed} def _mem_info(): mem_info = {} for line in _read_lines("/proc/meminfo"): for query in ["MemTotal", "MemFree", "SwapTotal", "SwapFree"]: if query in line: mem_info[query] = float(line.split()[1]) break mem_info['pct_mem_used'] = ((mem_info['MemTotal'] - mem_info['MemFree']) / mem_info['MemTotal']) * 100 try: mem_info['pct_swap_used'] = ((mem_info['SwapTotal'] - mem_info['SwapFree']) / mem_info['SwapTotal']) * 100 except ZeroDivisionError: mem_info['pct_swap_used'] = 0.0 return mem_info profile = LazyStruct() cpu_info = _cpu_info() profile.cpu_count = cpu_info['count'] profile.cpu_speed = cpu_info['speed'] mem_info = _mem_info() profile.mem_total = mem_info['MemTotal'] profile.mem_pct_used = mem_info['pct_mem_used'] profile.swap_total = mem_info['SwapTotal'] profile.swap_pct_used = mem_info['pct_swap_used'] return profile # TODO: Customizable output formats (csv, tsv, etc.) def print_report(self, run_info): print "\n" try: profile = self.profile_system() print "CPUs: %d @ %.2f GHz, Mem: %d MB real (%.2f%% used) / %d MB swap (%.2f%% used)" % (profile.cpu_count, (profile.cpu_speed / 1000), (profile.mem_total / 1000), profile.mem_pct_used, (profile.swap_total / 1000), profile.swap_pct_used) except IOError: print "No system profile available (on a mac?)" print "Load averages (1/5/15): start: %.2f/%.2f/%.2f, end: %.2f/%.2f/%.2f" % (run_info.start_load_avg + run_info.end_load_avg) print "counts: OSS: %d, OSTs/OSS: %d (%d total); stats-per: OSS: %d, MDS: %d" % (options.oss, options.ost, (options.oss * options.ost), ((options.ost * options.ost_stats) + options.server_stats), (options.mdt_stats + options.server_stats)) print "run count (%d stats) / run time (%.2f sec) = run rate (%.2f stats/sec)" % (run_info.run_count, run_info.run_interval, run_info.run_rate) print "%d steps, %d stats/step, duration %d" % (run_info.step_count, run_info.run_count / run_info.step_count, options.duration) def _to_mb(in_bytes): return in_bytes * 1.0 / (1024 * 1024) stats_total_used = run_info.stats_data_used + run_info.stats_index_used print "stats rows: %d, space used: %.2f MB (%.2f MB data, %.2f MB index)" % (run_info.stats_rows_used, _to_mb(stats_total_used), _to_mb(run_info.stats_data_used), _to_mb(run_info.stats_index_used)) def cleanup(self): self.test_runner.teardown_databases(self.old_db_config) self.test_runner.teardown_test_environment()
tags=tags) result = runner.run() if app_module is not None: registry.call_hook('after_each', 'app', app_module, result) results.append(result) if not result or result.steps != result.steps_passed: failed = True except SystemExit, e: failed = e.code except Exception, e: failed = True import traceback traceback.print_exc(e) finally: try: registry.call_hook('after', 'harvest', results) stop_code = server.stop(failed) if stop_code != 0: failed = True DjangoTestSuiteRunner.teardown_databases( self._test_runner, self._created_db) DjangoTestSuiteRunner.teardown_test_environment( self._test_runner) except: import traceback traceback.print_exc() sys.exit(int(failed))
class SetupTestSuite(unittest.TestSuite): """ Test Suite configuring Django settings and using DiscoverRunner or DjangoTestSuiteRunner as the test runner. Also runs PEP8 and Coverage checks. """ def __init__(self, *args, **kwargs): self.cov = coverage() self.cov.start() self.configure() self.packages = self.resolve_packages() parser = argparse.ArgumentParser() parser.add_argument('-a', '--autoreload', dest='autoreload', action='store_const', const=True, default=False,) parser.add_argument('-f', '--failfast', dest='failfast', action='store_const', const=True, default=False,) parser.add_argument('-l', '--label', dest='label') self.options = vars(parser.parse_args(sys.argv[2:])) sys.argv = sys.argv[:2] runner_options = { 'verbosity': 1, 'interactive': True, 'failfast': False, } if django.VERSION >= (1, 8): from django.test.runner import DiscoverRunner self.test_runner = DiscoverRunner(**runner_options) tests = self.test_runner.build_suite() else: from django.test.simple import DjangoTestSuiteRunner self.test_runner = DjangoTestSuiteRunner(**runner_options) tests = self.build_tests() super(SetupTestSuite, self).__init__(tests=tests, *args, **kwargs) # South patches the test management command to handle the # SOUTH_TESTS_MIGRATE setting. Apply that patch if South is installed. if django.VERSION < (1,7): try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except ImportError: pass self.test_runner.setup_test_environment() self.old_config = self.test_runner.setup_databases() def handle_label_exception(self, exception): """ Check whether or not the exception was caused due to a bad label being provided. If so raise LabelException which will cause an exit, otherwise continue. The check looks for particular error messages, which obviously sucks. TODO: Implement a cleaner test. """ markers = [ 'no such test method', 'should be of the form app.TestCase or app.TestCase.test_method', 'App with label', 'does not refer to a test', ] if any(marker in exception.message for marker in markers): log.info(exception) raise LabelException(exception) else: raise exception def build_tests(self): """ Build tests for inclusion in suite from resolved packages for <= 1.8 TODO: Cleanup/simplify this method, flow too complex, too much duplication. """ from django.core.exceptions import ImproperlyConfigured from django.test.simple import build_suite, build_test try: from django.apps import apps get_app = apps.get_app_config except ImportError: from django.db.models import get_app tests = [] packages = [self.options['label'], ] if \ self.options['label'] else self.packages for package in packages: try: if not self.options['autoreload']: if self.options['label']: try: tests.append(build_test(package)) except (ImproperlyConfigured, ValueError) as e: self.handle_label_exception(e) else: app = get_app(package) tests.append(build_suite(app)) else: # Wait for exceptions to be resolved. exception = None while True: try: if self.options['label']: try: tests.append(build_test(package)) except (ImproperlyConfigured, ValueError) as e: self.handle_label_exception(e) else: app = get_app(package) tests.append(build_suite(app)) break except LabelException: raise except Exception as e: if exception != str(e): traceback.print_exc() exception = str(e) time.sleep(1) except ImproperlyConfigured as e: log.info("Warning: %s" % traceback.format_exc()) except ImportError as e: log.info("Warning: %s" % traceback.format_exc()) return tests def configure(self): """ Configures Django settings. """ import django from django.conf import settings try: from django.utils.importlib import import_module except ImportError: from importlib import import_module try: test_settings = import_module('test_settings') except ImportError as e: log.info('ImportError: Unable to import test settings: %s' % e) sys.exit(1) setting_attrs = {} for attr in dir(test_settings): if '__' not in attr: setting_attrs[attr] = getattr(test_settings, attr) if not settings.configured: settings.configure(**setting_attrs) if hasattr(django, 'setup'): django.setup() def coverage_report(self): """ Outputs Coverage report to screen and coverage.xml. """ verbose = '--quiet' not in sys.argv self.cov.stop() if verbose: log.info("\nCoverage Report:") try: include = ['%s*' % package for package in self.packages] omit = ['*tests*'] self.cov.report(include=include, omit=omit) self.cov.save() self.cov.xml_report(include=include, omit=omit) except misc.CoverageException as e: log.info("Coverage Exception: %s" % e) def resolve_packages(self): """ Frame hack to determine packages contained in module for testing. We ignore submodules (those containing '.') """ f = sys._getframe() while f: if 'self' in f.f_locals: locals_self = f.f_locals['self'] py_modules = getattr(locals_self, 'py_modules', None) packages = getattr(locals_self, 'packages', None) top_packages = [] if py_modules or packages: if py_modules: for module in py_modules: if '.' not in module: top_packages.append(module) if packages: for package in packages: if '.' not in package: top_packages.append(package) return list(set(top_packages)) f = f.f_back def pep8_report(self): """ Outputs PEP8 report to screen and pep8.txt. """ verbose = '--quiet' not in sys.argv if verbose: # Hook into stdout. old_stdout = sys.stdout sys.stdout = mystdout = StringIO() # Run Pep8 checks, excluding South migrations. pep8_style = pep8.StyleGuide() pep8_style.options.exclude.append('migrations') pep8_style.options.exclude.append('south_migrations') pep8_style.check_files(self.packages) # Restore stdout. sys.stdout = old_stdout # Save result to pep8.txt. result = mystdout.getvalue() output = open('pep8.txt', 'w') output.write(result) output.close() # Return Pep8 result if result: log.info("\nPEP8 Report:") log.info(result) def run(self, result, *args, **kwargs): """ Run the test, teardown the environment and generate reports. """ result.failfast = self.options['failfast'] result = super(SetupTestSuite, self).run(result, *args, **kwargs) self.test_runner.teardown_databases(self.old_config) self.test_runner.teardown_test_environment() self.coverage_report() self.pep8_report() return result