def run_tests(self): from illume import config from coverage import Coverage config.setenv("test") from pytest import main from illume.util import remove_or_ignore_dir from logging import basicConfig, DEBUG basicConfig(level=DEBUG, filename="illume-test.log") project_root = config.get("PROJECT_ROOT") data_dir = config.get("DATA_DIR") cov_config_dir = os.path.join(project_root, '.coveagerc') cov = Coverage(config_file=cov_config_dir) # Remvoe data directory in case tests failed to complete last time. remove_or_ignore_dir(data_dir) cov.start() exit_code = main(shlex.split(self.pytest_args or "")) cov.stop() cov.xml_report() # Remove data directory if tests passed successfully. Keep it around # if tests failed so the developer can troubleshoot the problem. if exit_code == 0: remove_or_ignore_dir(data_dir) sys.exit(exit_code)
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None, profile=False, coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None, skip_test_records=False, skip_before_tests=False, failfast=False): "Run tests" import frappe.test_runner tests = test site = get_site(context) frappe.init(site=site) frappe.flags.skip_before_tests = skip_before_tests frappe.flags.skip_test_records = skip_test_records if coverage: # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe') cov = Coverage(source=[source_path], omit=['*.html', '*.js', '*.css']) cov.start() ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force, profile=profile, junit_xml_output=junit_xml_output, ui_tests = ui_tests, doctype_list_path = doctype_list_path, failfast=failfast) if coverage: cov.stop() cov.save() if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 if os.environ.get('CI'): sys.exit(ret)
def run_tests(self): " Run coverage on unit test " # need to import here cause we are in a venv import six from coverage import Coverage coverage = Coverage() coverage.start() # Purge modules under test from sys.modules. The test loader will # re-import them from the build location. Required when 2to3 is used # with namespace packages. if six.PY3 and getattr(self.distribution, 'use_2to3', False): module = self.test_suite.split('.')[0] if module in _namespace_packages: del_modules = [] if module in sys.modules: del_modules.append(module) module += '.' for name in sys.modules: if name.startswith(module): del_modules.append(name) list(map(sys.modules.__delitem__, del_modules)) unittest_main( None, None, self._argv, testLoader=self._resolve_as_ep(self.test_loader), testRunner=self._resolve_as_ep(self.test_runner), exit=False, ) coverage.stop() coverage.save() coverage.report(show_missing=False)
def run_coverage(self, test_name): cov = Coverage(source=self.config.source, omit=self.config.omit) cov.start() self.map_test(test_name) cov.stop() return cov.get_data()
def _bootstrap(self): from coverage import Coverage cov = Coverage(data_suffix=True) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage cov = Coverage(data_suffix=True) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage rcfile = getattr(multiprocessing, PATCHED_MARKER) cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
class CoverageContext(object): def __init__(self): from coverage import Coverage self._cov = Coverage(cover_pylib=False) self._cov.start() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self._cov.stop()
def run(self): from coverage import Coverage cov = Coverage(source=self.distribution.packages) cov.start() super().run() cov.stop() cov.xml_report() cov.html_report()
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import rcfile = os.environ[COVERAGE_RCFILE_ENV] cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def run(self): import pytest cov = Coverage() cov.erase() cov.start() result = pytest.main() cov.stop() cov.save() cov.html_report(directory="covhtml") sys.exit(int(bool(len(result.failures) > 0 or len(result.errors) > 0)))
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage coverage_config = getattr(multiprocessing, PATCHED_MARKER) coverage_config.parallel = True cov = Coverage() cov.config = coverage_config cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def run_test_suite(): cov = Coverage(config_file=True) cov.erase() cov.start() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="python-doc-inherit", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run print(colored(text="Step 1: Running unit tests.\n", color="yellow", attrs=["bold"])) test_suite = TestLoader().discover(str(Path("tests").absolute())) result = TextTestRunner(verbosity=1).run(test_suite) if not result.wasSuccessful(): sys.exit(len(result.failures) + len(result.errors)) # Announce coverage run print(colored(text="\nStep 2: Generating coverage results.\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: print(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: print("pep8 errors detected.") print(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") # Announce success print(colored(text="\nTests completed successfully with no errors. Congrats!", color="green", attrs=["bold"]))
def with_coverage(f, source, *, report=True, data=False): cov = Coverage(source=[source]) cov.start() try: exit_code = f() finally: cov.stop() if not exit_code: if report: print() # Print blank line. cov.report(show_missing=False) cov.html_report() if data: cov.save() return exit_code
def main(): cov = Coverage( omit=[ "*passlib*", "*test*", "*tornado*", "*backports_abc*", "*singledispatch*", "*six*", "*certifi*", "*daemon*", "*funcsigs*", "*mock*", "*pbr*", "*pkg_resources*", "*tablib*", ] ) cov.start() from app_test import ApplicationTest from database_test import DatabaseTest from http_test import HTTPTestCase from procinfo_test import ProcInfoTest from user_test import UserTest from token_test import TokenTest from ws_test import WebSocketTestCase from unittest import TestLoader, TextTestRunner, TestSuite loader = TestLoader() suite = TestSuite( ( loader.loadTestsFromTestCase(ProcInfoTest), loader.loadTestsFromTestCase(DatabaseTest), loader.loadTestsFromTestCase(UserTest), loader.loadTestsFromTestCase(TokenTest), loader.loadTestsFromTestCase(HTTPTestCase), loader.loadTestsFromTestCase(WebSocketTestCase), loader.loadTestsFromTestCase(ApplicationTest), ) ) runner = TextTestRunner(verbosity=2) runner.run(suite) cov.stop() cov.save()
def test_does_not_trace_files_outside_inclusion(tmpdir, branch, timid): @given(st.booleans()) def test(a): rnd() cov = Coverage( config_file=False, data_file=str(tmpdir.join('.coverage')), branch=branch, timid=timid, include=[__file__], ) cov._warn = escalate_warning cov.start() test() cov.stop() data = cov.get_data() assert len(list(data.measured_files())) == 1
def test_achieves_full_coverage(tmpdir, branch, timid): @given(st.booleans(), st.booleans(), st.booleans()) def test(a, b, c): some_function_to_test(a, b, c) cov = Coverage( config_file=False, data_file=str(tmpdir.join('.coverage')), branch=branch, timid=timid, ) cov._warn = escalate_warning cov.start() test() cov.stop() data = cov.get_data() lines = data.lines(__file__) for i in hrange(LINE_START + 1, LINE_END + 1): assert i in lines
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import cov = Coverage(data_suffix=True) cov._warn_preimported_source = False cov.start() debug = cov._debug try: if debug.should("multiproc"): debug.write("Calling multiprocessing bootstrap") return original_bootstrap(self) finally: if debug.should("multiproc"): debug.write("Finished multiprocessing bootstrap") cov.stop() cov.save() if debug.should("multiproc"): debug.write("Saved multiprocessing data")
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import rcfile = os.environ[COVERAGE_RCFILE_ENV] cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() debug = cov.debug try: if debug.should("multiproc"): debug.write("Calling multiprocessing bootstrap") return original_bootstrap(self) finally: if debug.should("multiproc"): debug.write("Finished multiprocessing bootstrap") cov.stop() cov.save() if debug.should("multiproc"): debug.write("Saved multiprocessing data")
def main(): """ The main function, mainly functioning to do the main functional work (thanks pylint) """ if len(sys.argv) > 1 and sys.argv[1] == 'cover': # FIXME - there are enough args now to need an arg parser cover = Coverage(branch=True, auto_data=True) min_percent = 0 if len(sys.argv) > 2: min_percent = float(sys.argv[2]) else: cover = False loader = unittest.defaultTestLoader runner = unittest.TextTestRunner(verbosity=2) if cover: cover.erase() cover.start() tests = loader.discover('.') tests_lib = loader.discover('lib', top_level_dir='lib') tests.addTests(tests_lib) result = runner.run(tests) if cover: cover.stop() # the debian coverage package didnt include jquery.debounce.min.js # (and additionally, that thing is not packaged for debian elsewhere) try: cover.html_report() except: pass percent = cover.report(show_missing=True) if min_percent > percent: print("The coverage ({:.1f}% reached) fails to reach the " "minimum required ({}%)\n".format(percent, min_percent)) exit(1) if not result.wasSuccessful(): exit(1)
def run_unit_tests(): from datetime import datetime from coverage import Coverage from io import StringIO import unittest from tests import testModels w = StringIO() cov = Coverage(omit=["/usr/*", "*/venv/*", "*-packages*"]) cov.start() runner = unittest.TextTestRunner(stream=w) runner.run(unittest.makeSuite(testModels)) cov.stop() cov.report(file=w) output = w.getvalue() db.app = app return ("You ran the tests on: " + datetime.now().strftime("%I:%M%p on %B %d, %Y") + " GMT\n" + output)
class AutotestConfig(AppConfig): name = 'autotest' def __init__(self, *args, **kwargs): super(AutotestConfig, self).__init__(*args, **kwargs) self.coverage = None def coverage_start(self): if coverage and not self.coverage: self.coverage = Coverage() self.coverage.start() return self.coverage def coverage_report(self): if coverage and self.coverage: self.coverage.stop() coverage.stop() self.coverage.get_data().update(coverage.get_data()) self.coverage.html_report()
def stmtCovered(code, test) : # Replace input lines with values code = feedInput(code, test) # Initialize the coverage object cov = Coverage() # Create a temporary file with the code fname = "tmp"+str(int(random()*100000)) f = open(fname, 'w') f.write("\n".join(code)) f.close() # Capture statement coverage print("Test case is: "+str(test)) cov.start() out = execfile(fname) cov.stop() # Obtain the report of coverage and remove the temporary file report = cov.analysis(fname) os.remove(fname) # Return the executed line numbers return set(range(1, len(code)+1)) - set(report[2])
class CodeCoverage(object): """ Code Coverage radish extension """ OPTIONS = [ ("--with-coverage", "enable code coverage"), ("--cover-packages=<cover_packages>", "specify source code package") ] LOAD_IF = staticmethod(lambda config: config.with_coverage) LOAD_PRIORITY = 70 def __init__(self): before.all(self.coverage_start) after.all(self.coverage_stop) if world.config.cover_packages: cover_packages = world.config.cover_packages.split(",") else: cover_packages = [] self.coverage = Coverage(source=cover_packages) def coverage_start(self, features, marker): """ Start the coverage measurement """ self.coverage.load() self.coverage.start() def coverage_stop(self, features, marker): """ Stop the coverage measurement and create report """ self.coverage.stop() self.coverage.save() self.coverage.report(file=sys.stdout)
def run_test_suite(args): skip_utc = args.skip_utc enable_coverage = not args.no_coverage enable_pep8 = not args.no_pep8 if enable_coverage: cov = Coverage(config_file=True) cov.erase() cov.start() settings.configure( DJBRAINTREE_TESTS_SKIP_UTC=skip_utc, TIME_ZONE='America/Los_Angeles', DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": "djbraintree", "USER": "", "PASSWORD": "", "HOST": "", "PORT": "", }, }, ROOT_URLCONF="tests.test_urls", INSTALLED_APPS=[ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "jsonfield", "djbraintree", "tests", "tests.apps.testapp" ], MIDDLEWARE_CLASSES=( "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware"), SITE_ID=1, BRAINTREE_PUBLIC_KEY=os.environ.get("BRAINTREE_PUBLIC_KEY", ""), BRAINTREE_PRIVATE_KEY=os.environ.get("BRAINTREE_PRIVATE_KEY", ""), BRAINTREE_MERCHANT_ID=os.environ.get("BRAINTREE_MERCHANT_ID", ""), DJBRAINTREE_PLANS={ "test0": { "braintree_plan_id": "test_id_0", "name": "Test Plan 0", "description": "A test plan", "price": 1000, # $10.00 "currency": "usd", "interval": "month" }, "test": { "braintree_plan_id": "test_id", "name": "Test Plan 1", "description": "Another test plan", "price": 2500, # $25.00 "currency": "usd", "interval": "month" }, "test2": { "braintree_plan_id": "test_id_2", "name": "Test Plan 2", "description": "Yet Another test plan", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_deletion": { "braintree_plan_id": "test_id_3", "name": "Test Plan 3", "description": "Test plan for deletion.", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_trial": { "braintree_plan_id": "test_id_4", "name": "Test Plan 4", "description": "Test plan for trails.", "price": 7000, # $70.00 "currency": "usd", "interval": "month", "trial_period_days": 7 }, "unidentified_test_plan": { "name": "Unidentified Test Plan", "description": "A test plan with no ID.", "price": 2500, # $25.00 "currency": "usd", "interval": "month" } }, DJBRAINTREE_PLAN_HIERARCHY={ "bronze": { "level": 1, "plans": [ "test0", "test", ] }, "silver": { "level": 2, "plans": [ "test2", "test_deletion", ] }, "gold": { "level": 3, "plans": [ "test_trial", "unidentified_test_plan", ] }, }, DJBRAINTREE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS=( "(admin)", "test_url_name", "testapp_namespaced:test_url_namespaced", "fn:/test_fnmatch*"), ) # Avoid AppRegistryNotReady exception # http://stackoverflow.com/questions/24793351/django-appregistrynotready if hasattr(django, "setup"): django.setup() # Announce the test suite sys.stdout.write( colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write( colored(text="dj-braintree", color="green", attrs=["bold"])) sys.stdout.write( colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run sys.stdout.write( colored(text="Step 1: Running unit tests.\n\n", color="yellow", attrs=["bold"])) # Hack to reset the global argv before nose has a chance to grab it # http://stackoverflow.com/a/1718407/1834570 args = sys.argv[1:] sys.argv = sys.argv[0:1] from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner(verbosity=1) failures = test_runner.run_tests(["."]) if failures: sys.exit(failures) if enable_coverage: # Announce coverage run sys.stdout.write( colored(text="\nStep 2: Generating coverage results.\n\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() cov.xml_report() if percentage < TESTS_THRESHOLD: sys.stderr.write( colored( text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n\n".format( old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) else: # Announce disabled coverage run sys.stdout.write( colored(text="\nStep 2: Generating coverage results [SKIPPED].", color="yellow", attrs=["bold"])) if enable_pep8: # Announce flake8 run sys.stdout.write( colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print( "----------------------------------------------------------------------" ) from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: sys.stderr.write("pep8 errors detected.\n") sys.stderr.write( colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") else: # Announce disabled coverage run sys.stdout.write( colored(text="\nStep 3: Checking for pep8 errors [SKIPPED].\n", color="yellow", attrs=["bold"])) # Announce success if enable_coverage and enable_pep8: sys.stdout.write( colored( text= "\nTests completed successfully with no errors. Congrats!\n", color="green", attrs=["bold"])) else: sys.stdout.write( colored( text= "\nTests completed successfully, but some step(s) were skipped!\n", color="green", attrs=["bold"])) sys.stdout.write( colored(text="Don't push without running the skipped step(s).\n", color="red", attrs=["bold"]))
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None, profile=False, coverage=False, junit_xml_output=False, ui_tests=False, doctype_list_path=None, skip_test_records=False, skip_before_tests=False, failfast=False): "Run tests" import frappe.test_runner tests = test site = get_site(context) allow_tests = frappe.get_conf(site).allow_tests if not (allow_tests or os.environ.get('CI')): click.secho('Testing is disabled for the site!', bold=True) click.secho('You can enable tests by entering following command:') click.secho( 'bench --site {0} set-config allow_tests true'.format(site), fg='green') return frappe.init(site=site) frappe.flags.skip_before_tests = skip_before_tests frappe.flags.skip_test_records = skip_test_records if coverage: from coverage import Coverage # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe') cov = Coverage(source=[source_path], omit=[ '*.html', '*.js', '*.xml', '*.css', '*.less', '*.scss', '*.vue', '*/doctype/*/*_dashboard.py', '*/patches/*' ]) cov.start() ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force, profile=profile, junit_xml_output=junit_xml_output, ui_tests=ui_tests, doctype_list_path=doctype_list_path, failfast=failfast) if coverage: cov.stop() cov.save() if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 if os.environ.get('CI'): sys.exit(ret)
class Testmon(object): coverage_stack = [] def __init__(self, rootdir="", testmon_labels=None, cov_plugin=None): if testmon_labels is None: testmon_labels = {"singleprocess"} self.rootdir = rootdir self.testmon_labels = testmon_labels self.cov = None self.setup_coverage(not ("singleprocess" in testmon_labels), cov_plugin) def setup_coverage(self, subprocess, cov_plugin=None): params = { "include": [os.path.join(self.rootdir, "*")], "omit": _get_python_lib_paths(), } self.cov = Coverage( data_file=getattr(self, "sub_cov_file", None), config_file=False, **params ) self.cov._warn_no_data = False def start(self): Testmon.coverage_stack.append(self.cov) self.cov.erase() self.cov.start() def stop(self): self.cov.stop() Testmon.coverage_stack.pop() def stop_and_save(self, testmon_data: TestmonData, nodeid, result): self.stop() if hasattr(self, "sub_cov_file"): self.cov.combine() measured_files = get_measured_relfiles( self.rootdir, self.cov, home_file(nodeid) ) node_data = testmon_data.node_data_from_cov(measured_files) nodes_fingerprints = testmon_data.node_data2records(node_data) nodes_fingerprints.append( { "filename": LIBRARIES_KEY, "checksum": testmon_data.libraries, "mtime": None, "fingerprint": checksums_to_blob( encode_lines([testmon_data.libraries]) ), } ) testmon_data.db.insert_node_fingerprints( nodeid, nodes_fingerprints, result, ) def close(self): if hasattr(self, "sub_cov_file"): os.remove(self.sub_cov_file + "_rc") os.environ.pop("COVERAGE_PROCESS_START", None)
class CoverageScript(object): """The command-line interface to coverage.py.""" def __init__(self): self.global_option = False self.coverage = None def command_line(self, argv): """The bulk of the command line interface to coverage.py. `argv` is the argument list to process. Returns 0 if all is well, 1 if something went wrong. """ # Collect the command-line options. if not argv: show_help(topic='minimum_help') return OK # The command syntax we parse depends on the first argument. Global # switch syntax always starts with an option. self.global_option = argv[0].startswith('-') if self.global_option: parser = GlobalOptionParser() else: parser = CMDS.get(argv[0]) if not parser: show_help("Unknown command: '%s'" % argv[0]) return ERR argv = argv[1:] ok, options, args = parser.parse_args_ok(argv) if not ok: return ERR # Handle help and version. if self.do_help(options, args, parser): return OK # Listify the list options. source = unshell_list(options.source) omit = unshell_list(options.omit) include = unshell_list(options.include) debug = unshell_list(options.debug) # Do something. self.coverage = Coverage( data_suffix=options.parallel_mode, cover_pylib=options.pylib, timid=options.timid, branch=options.branch, config_file=options.rcfile, source=source, omit=omit, include=include, debug=debug, concurrency=options.concurrency, check_preimported=True, context=options.context, ) if options.action == "debug": return self.do_debug(args) elif options.action == "erase": self.coverage.erase() return OK elif options.action == "run": return self.do_run(options, args) elif options.action == "combine": if options.append: self.coverage.load() data_dirs = args or None self.coverage.combine(data_dirs, strict=True) self.coverage.save() return OK # Remaining actions are reporting, with some common options. report_args = dict( morfs=unglob_args(args), ignore_errors=options.ignore_errors, omit=omit, include=include, ) # We need to be able to import from the current directory, because # plugins may try to, for example, to read Django settings. sys.path.insert(0, '') self.coverage.load() total = None if options.action == "report": total = self.coverage.report( show_missing=options.show_missing, skip_covered=options.skip_covered, **report_args ) elif options.action == "annotate": self.coverage.annotate(directory=options.directory, **report_args) elif options.action == "html": total = self.coverage.html_report( directory=options.directory, title=options.title, skip_covered=options.skip_covered, **report_args ) elif options.action == "xml": outfile = options.outfile total = self.coverage.xml_report(outfile=outfile, **report_args) if total is not None: # Apply the command line fail-under options, and then use the config # value, so we can get fail_under from the config file. if options.fail_under is not None: self.coverage.set_option("report:fail_under", options.fail_under) fail_under = self.coverage.get_option("report:fail_under") precision = self.coverage.get_option("report:precision") if should_fail_under(total, fail_under, precision): return FAIL_UNDER return OK def do_help(self, options, args, parser): """Deal with help requests. Return True if it handled the request, False if not. """ # Handle help. if options.help: if self.global_option: show_help(topic='help') else: show_help(parser=parser) return True if options.action == "help": if args: for a in args: parser = CMDS.get(a) if parser: show_help(parser=parser) else: show_help(topic=a) else: show_help(topic='help') return True # Handle version. if options.version: show_help(topic='version') return True return False def do_run(self, options, args): """Implementation of 'coverage run'.""" if not args: if options.module: # Specified -m with nothing else. show_help("No module specified for -m") return ERR command_line = self.coverage.get_option("run:command_line") if command_line is not None: args = shlex.split(command_line) if args and args[0] == "-m": options.module = True args = args[1:] if not args: show_help("Nothing to do.") return ERR if options.append and self.coverage.get_option("run:parallel"): show_help("Can't append to data files in parallel mode.") return ERR if options.concurrency == "multiprocessing": # Can't set other run-affecting command line options with # multiprocessing. for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']: # As it happens, all of these options have no default, meaning # they will be None if they have not been specified. if getattr(options, opt_name) is not None: show_help( "Options affecting multiprocessing must only be specified " "in a configuration file.\n" "Remove --{} from the command line.".format(opt_name) ) return ERR runner = PyRunner(args, as_module=bool(options.module)) runner.prepare() if options.append: self.coverage.load() # Run the script. self.coverage.start() code_ran = True try: runner.run() except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() return OK def do_debug(self, args): """Implementation of 'coverage debug'.""" if not args: show_help("What information would you like: config, data, sys?") return ERR for info in args: if info == 'sys': sys_info = self.coverage.sys_info() print(info_header("sys")) for line in info_formatter(sys_info): print(" %s" % line) elif info == 'data': self.coverage.load() data = self.coverage.get_data() print(info_header("data")) print("path: %s" % self.coverage.get_data().data_filename()) if data: print("has_arcs: %r" % data.has_arcs()) summary = line_counts(data, fullpath=True) filenames = sorted(summary.keys()) print("\n%d files:" % len(filenames)) for f in filenames: line = "%s: %d lines" % (f, summary[f]) plugin = data.file_tracer(f) if plugin: line += " [%s]" % plugin print(line) else: print("No data collected") elif info == 'config': print(info_header("config")) config_info = self.coverage.config.__dict__.items() for line in info_formatter(config_info): print(" %s" % line) else: show_help("Don't know what you mean by %r" % info) return ERR return OK
class Coverage: """ Extension for Python Code Coverage """ OPTIONS = [ click.Option( param_decls=("--with-coverage", "with_coverage"), is_flag=True, help="Enable Code Coverage", ), click.Option( param_decls=("--cover-package", "cover_packages"), multiple=True, help= "Python Package name for which the coverage is measured and reported", ), click.Option( param_decls=("--cover-append", "cover_append"), is_flag=True, help="Append measured coverage data to previous collected data", ), click.Option( param_decls=("--cover-config-file", "cover_config_file"), default=".coveragerc", help="Path to a custom coverage configuration file", ), click.Option( param_decls=("--cover-branches", "cover_branches"), is_flag=True, help="Include branch coverage in the report", ), click.Option( param_decls=("--cover-erase", "cover_erase"), is_flag=True, help="Erase previously collected data", ), click.Option( param_decls=("--cover-min-percentage", "cover_min_percentage"), type=click.IntRange(0, 100), help= "Fail if the provided minimum coverage percentage is not reached", ), click.Option( param_decls=("--cover-html", "cover_html"), help= "Path to the directory where to store the HTML coverage report", ), click.Option( param_decls=("--cover-xml", "cover_xml"), help="Path to the directory where to store the XML coverage report", ), ] @classmethod def load(cls, config): if config.with_coverage: return cls( config.cover_packages, config.cover_append, config.cover_config_file, config.cover_branches, config.cover_erase, config.cover_min_percentage, config.cover_html, config.cover_xml, ) else: return None def __init__( self, cover_packages, cover_append, cover_config_file, cover_branches, cover_erase, cover_min_percentage, cover_html, cover_xml, ): try: from coverage import Coverage # noqa except ImportError: raise RadishError( "if you want to use the code coverage extension you have to " "'pip install radish-bdd[coverage]'") self.cover_packages = cover_packages self.cover_append = cover_append self.cover_config_file = cover_config_file self.cover_branches = cover_branches self.cover_erase = cover_erase self.cover_min_percentage = cover_min_percentage self.cover_html = cover_html self.cover_xml = cover_xml before.all()(self.coverage_start) after.all()(self.coverage_stop) self.coverage = None self.modules_on_init = set(sys.modules.keys()) def coverage_start(self, features): """ Hook to start the coverage measurement """ from coverage import Coverage # if no explicit modules are specified we just # use the ones loaded from radish's basedir. # During the plugin init the basedir modules are # not loaded yet, but they are during the start method. # Thus, we are safe to consider the difference between the # two for coverage measurement. if not self.cover_packages: source = list( set(sys.modules.keys()).difference(self.modules_on_init)) else: source = self.cover_packages self.coverage = Coverage( source=source, config_file=self.cover_config_file, branch=self.cover_branches, ) if self.cover_erase: self.coverage.combine() self.coverage.erase() if self.cover_append: self.coverage.load() self.coverage.start() def coverage_stop(self, features): """ Stop the coverage measurement and create report """ self.coverage.stop() self.coverage.combine() self.coverage.save() self.coverage.report(file=sys.stdout) if self.cover_html: self.coverage.html_report(directory=self.cover_html) if self.cover_xml: self.coverage.xml_report(outfile=self.cover_xml) if self.cover_min_percentage: report = StringIO() self.coverage.report(file=report) match = re.search(r"^TOTAL\s+(.*)$", report.getvalue(), re.MULTILINE) if not match: raise RadishError( "Failed to find total percentage in coverage report") total_percentage = int(match.groups()[0].split()[-1][:-1]) if total_percentage < int(self.cover_min_percentage): raise RadishError( "Failed to reach minimum expected coverage of {0}% (reached: {1}%)" .format(self.cover_min_percentage, total_percentage))
def test_create_dir(self): pass def test_upload(self): pass def test_rename(self): pass def test_del_file(self): pass def test_change_plan(self): pass if __name__ == '__main__': try: unittest.main() except: pass cov.stop() cov.save() print("\n\nCoverage Report:\n") cov.report() print("HTML version: " + os.path.join(basedir, "htmlcov/coverage/index.html")) cov.html_report(directory='htmlcov/coverage') cov.erase()
def _execute_unit_tests(self, args): """ Execute python unit test and save a basic summary into the following place. $HOME/.pyunittest/hash The test cases are executed wtih coverage enabled. """ self.failures = {} if hasattr(Cmd, 'do_rerun'): del Cmd.do_rerun if len(self.tests) == 0: for test in os.listdir('./'): if test[0:5] == 'test_' and test[-3:] == '.py': self.testcase = test[5:-3] self._select_test_cases_from_directory(self.testcase) count = 0 suite = unittest.TestSuite() for testcase in self.tests: msg = 'Testcase %s ' % (testcase) if self.tests[testcase]['class']: self.xterm_message(msg, Fore.MAGENTA, oldmsg=msg) tests_to_run = [] for item in self.tests[testcase]['tests']: if item[0:len(args)] == args: tests_to_run.append(item) count = count + 1 msg = 'Running %s tests(s)... ' % (len(tests_to_run)) module = None self.xterm_message(msg, Fore.MAGENTA, newline=True) try: module = importlib.import_module('test_%s' % (testcase)) except ImportError as err: self.xterm_message('Unable to import testcase file - perhaps python is battered and bruised :-(\n%s' % (str(err)), Fore.RED, newline=True) self.xterm_message(traceback.format_exc(), Fore.RED, newline=True, style=Style.DIM) if module: try: class__ = '%s' % (self.tests[testcase]['class']) class_ = getattr(module, class__) for item in tests_to_run: suite.addTest(class_('test_%s' % (item))) except AttributeError as err: self.xterm_message('Unable to instantiate class - perhaps there is no valid test case defined in this file :-(\n%s' % (str(err)), Fore.RED, newline=True) cov = Coverage() cov.start() results = unittest.TextTestRunner(verbosity=9).run(suite) cov.stop() cov.html_report(directory='covhtml') # Remove modules module_to_remove = [] for c in range(50): try: for mod in sys.modules: if hasattr(sys.modules[mod], '__file__') and sys.modules[mod].__file__[0:len(self.workingdir)] == self.workingdir: module_to_remove.append(mod) elif mod[0:5] == "test_": module_to_remove.append(mod) except Exception as err: pass # We may get a RuntimeError that the dict sys.modules change size during iteration for mod in module_to_remove: if mod in sys.modules: del sys.modules[mod] for (testcase, string) in results.failures: raise ValueError('we dont handle failures') error_count = 0 unique_failures = {} for (testcase, string) in results.errors: if string not in unique_failures: unique_failures[string] = [] unique_failures[string].append(testcase) error_count = error_count + 1 if error_count == 0: self.xterm_message("\n\nRun %s tests with 0 errors" % (count), Fore.GREEN, newline=True, style=Style.NORMAL) return False self.xterm_message("\n\nRun %s tests with %s errors" % (count, error_count), Fore.RED, newline=True, style=Style.NORMAL) erridx = 0 for error in unique_failures: erridx = erridx + 1 print(" %s %s cases of the following error" % (erridx, len(unique_failures[error]))) for line in error.split('\n'): print(" %s" % (line)) for test in unique_failures[error]: print(" %s" % (test)) cosmetic_name = str(test).split(' ')[0][5:] self.failures[cosmetic_name] = str(test) self.xterm_message("Run %s tests with %s errors" % (count, error_count), Fore.RED, newline=True, style=Style.NORMAL) Cmd.do_rerun = self._do_rerun Cmd.complete_rerun = self._complete_rerun
class CoverageScript: """The command-line interface to coverage.py.""" def __init__(self): self.global_option = False self.coverage = None def command_line(self, argv): """The bulk of the command line interface to coverage.py. `argv` is the argument list to process. Returns 0 if all is well, 1 if something went wrong. """ # Collect the command-line options. if not argv: show_help(topic='minimum_help') return OK # The command syntax we parse depends on the first argument. Global # switch syntax always starts with an option. self.global_option = argv[0].startswith('-') if self.global_option: parser = GlobalOptionParser() else: parser = COMMANDS.get(argv[0]) if not parser: show_help(f"Unknown command: {argv[0]!r}") return ERR argv = argv[1:] ok, options, args = parser.parse_args_ok(argv) if not ok: return ERR # Handle help and version. if self.do_help(options, args, parser): return OK # Listify the list options. source = unshell_list(options.source) omit = unshell_list(options.omit) include = unshell_list(options.include) debug = unshell_list(options.debug) contexts = unshell_list(options.contexts) if options.concurrency is not None: concurrency = options.concurrency.split(",") else: concurrency = None # Do something. self.coverage = Coverage( data_file=options.data_file or DEFAULT_DATAFILE, data_suffix=options.parallel_mode, cover_pylib=options.pylib, timid=options.timid, branch=options.branch, config_file=options.rcfile, source=source, omit=omit, include=include, debug=debug, concurrency=concurrency, check_preimported=True, context=options.context, messages=not options.quiet, ) if options.action == "debug": return self.do_debug(args) elif options.action == "erase": self.coverage.erase() return OK elif options.action == "run": return self.do_run(options, args) elif options.action == "combine": if options.append: self.coverage.load() data_paths = args or None self.coverage.combine(data_paths, strict=True, keep=bool(options.keep)) self.coverage.save() return OK # Remaining actions are reporting, with some common options. report_args = dict( morfs=unglob_args(args), ignore_errors=options.ignore_errors, omit=omit, include=include, contexts=contexts, ) # We need to be able to import from the current directory, because # plugins may try to, for example, to read Django settings. sys.path.insert(0, '') self.coverage.load() total = None if options.action == "report": total = self.coverage.report(precision=options.precision, show_missing=options.show_missing, skip_covered=options.skip_covered, skip_empty=options.skip_empty, sort=options.sort, **report_args) elif options.action == "annotate": self.coverage.annotate(directory=options.directory, **report_args) elif options.action == "html": total = self.coverage.html_report( directory=options.directory, precision=options.precision, skip_covered=options.skip_covered, skip_empty=options.skip_empty, show_contexts=options.show_contexts, title=options.title, **report_args) elif options.action == "xml": total = self.coverage.xml_report(outfile=options.outfile, skip_empty=options.skip_empty, **report_args) elif options.action == "json": total = self.coverage.json_report( outfile=options.outfile, pretty_print=options.pretty_print, show_contexts=options.show_contexts, **report_args) elif options.action == "lcov": total = self.coverage.lcov_report(outfile=options.outfile, **report_args) else: # There are no other possible actions. raise AssertionError if total is not None: # Apply the command line fail-under options, and then use the config # value, so we can get fail_under from the config file. if options.fail_under is not None: self.coverage.set_option("report:fail_under", options.fail_under) if options.precision is not None: self.coverage.set_option("report:precision", options.precision) fail_under = self.coverage.get_option("report:fail_under") precision = self.coverage.get_option("report:precision") if should_fail_under(total, fail_under, precision): msg = "total of {total} is less than fail-under={fail_under:.{p}f}".format( total=Numbers(precision=precision).display_covered(total), fail_under=fail_under, p=precision, ) print("Coverage failure:", msg) return FAIL_UNDER return OK def do_help(self, options, args, parser): """Deal with help requests. Return True if it handled the request, False if not. """ # Handle help. if options.help: if self.global_option: show_help(topic='help') else: show_help(parser=parser) return True if options.action == "help": if args: for a in args: parser = COMMANDS.get(a) if parser: show_help(parser=parser) else: show_help(topic=a) else: show_help(topic='help') return True # Handle version. if options.version: show_help(topic='version') return True return False def do_run(self, options, args): """Implementation of 'coverage run'.""" if not args: if options.module: # Specified -m with nothing else. show_help("No module specified for -m") return ERR command_line = self.coverage.get_option("run:command_line") if command_line is not None: args = shlex.split(command_line) if args and args[0] in {"-m", "--module"}: options.module = True args = args[1:] if not args: show_help("Nothing to do.") return ERR if options.append and self.coverage.get_option("run:parallel"): show_help("Can't append to data files in parallel mode.") return ERR if options.concurrency == "multiprocessing": # Can't set other run-affecting command line options with # multiprocessing. for opt_name in [ 'branch', 'include', 'omit', 'pylib', 'source', 'timid' ]: # As it happens, all of these options have no default, meaning # they will be None if they have not been specified. if getattr(options, opt_name) is not None: show_help( "Options affecting multiprocessing must only be specified " + "in a configuration file.\n" + f"Remove --{opt_name} from the command line.") return ERR os.environ["COVERAGE_RUN"] = "true" runner = PyRunner(args, as_module=bool(options.module)) runner.prepare() if options.append: self.coverage.load() # Run the script. self.coverage.start() code_ran = True try: runner.run() except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() return OK def do_debug(self, args): """Implementation of 'coverage debug'.""" if not args: show_help( "What information would you like: config, data, sys, premain, pybehave?" ) return ERR if args[1:]: show_help("Only one topic at a time, please") return ERR if args[0] == "sys": write_formatted_info(print, "sys", self.coverage.sys_info()) elif args[0] == "data": print(info_header("data")) data_file = self.coverage.config.data_file debug_data_file(data_file) for filename in combinable_files(data_file): print("-----") debug_data_file(filename) elif args[0] == "config": write_formatted_info(print, "config", self.coverage.config.debug_info()) elif args[0] == "premain": print(info_header("premain")) print(short_stack()) elif args[0] == "pybehave": write_formatted_info(print, "pybehave", env.debug_info()) else: show_help(f"Don't know what you mean by {args[0]!r}") return ERR return OK
if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sequoia.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?") from exc # Code coverage is handled here because of a bug with Django and nose that # hasn't been fixed after years. IS_TESTING = 'test' in sys.argv if IS_TESTING: from coverage import Coverage COV = Coverage() COV.erase() COV.start() execute_from_command_line(sys.argv) if IS_TESTING: COV.stop() COV.save() COV.report() COV.html_report(directory='htmlcov') COV.xml_report()
def main(argv): """Main program.""" # Environment if sys.version_info < (2, 7): stderr('%s: need Python 2.7 or later' % argv[0]) stderr('your python is %s' % sys.version) return 1 # Defaults cfg = Options() cfg.basedir = os.path.join(os.path.dirname(argv[0]), 'src') cfg.basedir = os.path.abspath(cfg.basedir) # Figure out terminal size try: import curses except ImportError: pass else: try: curses.setupterm() cols = curses.tigetnum('cols') if cols > 0: cfg.screen_width = cols except (curses.error, TypeError): # tigetnum() is broken in PyPy3 and raises TypeError pass # Option processing opts, args = getopt.gnu_getopt(argv[1:], 'hvpqufw', [ 'list-files', 'list-tests', 'list-hooks', 'level=', 'all-levels', 'coverage' ]) for k, v in opts: if k == '-h': print(__doc__) return 0 elif k == '-v': cfg.verbosity += 1 cfg.quiet = False elif k == '-p': cfg.progress = True cfg.quiet = False elif k == '-q': cfg.verbosity = 0 cfg.progress = False cfg.quiet = True elif k == '-u': cfg.unit_tests = True elif k == '-f': cfg.functional_tests = True elif k == '-w': cfg.warn_omitted = True elif k == '--list-files': cfg.list_files = True cfg.run_tests = False elif k == '--list-tests': cfg.list_tests = True cfg.run_tests = False elif k == '--list-hooks': cfg.list_hooks = True cfg.run_tests = False elif k == '--coverage': cfg.coverage = True elif k == '--level': try: cfg.level = int(v) except ValueError: stderr('%s: invalid level: %s' % (argv[0], v)) stderr('run %s -h for help') return 1 elif k == '--all-levels': cfg.level = None else: stderr('%s: invalid option: %s' % (argv[0], k)) stderr('run %s -h for help') return 1 if args: cfg.pathname_regex = args[0] if len(args) > 1: cfg.test_regex = args[1] if len(args) > 2: stderr('%s: too many arguments: %s' % (argv[0], args[2])) stderr('run %s -h for help') return 1 if not cfg.unit_tests and not cfg.functional_tests: cfg.unit_tests = True # Set up the python path sys.path[0] = cfg.basedir # Set up tracing before we start importing things cov = None if cfg.run_tests and cfg.coverage: from coverage import Coverage cov = Coverage(omit=['test.py']) # Finding and importing test_files = get_test_files(cfg) if cov is not None: cov.start() if cfg.list_tests or cfg.run_tests: test_cases = get_test_cases(test_files, cfg, cov=cov) if cfg.list_hooks or cfg.run_tests: test_hooks = get_test_hooks(test_files, cfg, cov=cov) # Configure the logging module import logging logging.basicConfig() logging.root.setLevel(logging.CRITICAL) # Running success = True if cfg.list_files: baselen = len(cfg.basedir) + 1 print("\n".join([fn[baselen:] for fn in test_files])) if cfg.list_tests: print("\n".join([test.id() for test in test_cases])) if cfg.list_hooks: print("\n".join([str(hook) for hook in test_hooks])) if cfg.run_tests: runner = CustomTestRunner(cfg, test_hooks) suite = unittest.TestSuite() suite.addTests(test_cases) if cov is not None: cov.start() run_result = runner.run(suite) if cov is not None: cov.stop() success = run_result.wasSuccessful() del run_result if cov is not None: traced_file_types = ('.py', '.pyx', '.pxi', '.pxd') modules = [] def add_file(_, path, files): if 'tests' in os.path.relpath(path, cfg.basedir).split(os.sep): return for filename in files: if filename.endswith(traced_file_types): modules.append(os.path.join(path, filename)) if cfg.follow_symlinks: walker = walk_with_symlinks else: walker = os.path.walk walker(os.path.abspath(cfg.basedir), add_file, None) try: cov.xml_report(modules, outfile='coverage.xml') if cfg.coverdir: cov.html_report(modules, directory=cfg.coverdir) finally: # test runs can take a while, so at least try to print something cov.report() # That's all if success: return 0 else: return 1
def run_test_suite(args): skip_utc = args.skip_utc enable_coverage = not args.no_coverage enable_pep8 = not args.no_pep8 if enable_coverage: cov = Coverage(config_file=True) cov.erase() cov.start() settings.configure( DJSTRIPE_TESTS_SKIP_UTC=skip_utc, TIME_ZONE='America/Los_Angeles', DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": "djstripe", "USER": "", "PASSWORD": "", "HOST": "", "PORT": "", }, }, ROOT_URLCONF="tests.test_urls", INSTALLED_APPS=[ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "jsonfield", "djstripe", "tests", "tests.apps.testapp" ], MIDDLEWARE_CLASSES=( "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware" ), SITE_ID=1, STRIPE_PUBLIC_KEY=os.environ.get("STRIPE_PUBLIC_KEY", ""), STRIPE_SECRET_KEY=os.environ.get("STRIPE_SECRET_KEY", ""), DJSTRIPE_PLANS={ "test0": { "stripe_plan_id": "test_id_0", "name": "Test Plan 0", "description": "A test plan", "price": 1000, # $10.00 "currency": "usd", "interval": "month" }, "test": { "stripe_plan_id": "test_id", "name": "Test Plan 1", "description": "Another test plan", "price": 2500, # $25.00 "currency": "usd", "interval": "month" }, "test2": { "stripe_plan_id": "test_id_2", "name": "Test Plan 2", "description": "Yet Another test plan", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_deletion": { "stripe_plan_id": "test_id_3", "name": "Test Plan 3", "description": "Test plan for deletion.", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_trial": { "stripe_plan_id": "test_id_4", "name": "Test Plan 4", "description": "Test plan for trails.", "price": 7000, # $70.00 "currency": "usd", "interval": "month", "trial_period_days": 7 }, "unidentified_test_plan": { "name": "Unidentified Test Plan", "description": "A test plan with no ID.", "price": 2500, # $25.00 "currency": "usd", "interval": "month" } }, DJSTRIPE_PLAN_HIERARCHY = { "bronze": { "level": 1, "plans": [ "test0", "test", ] }, "silver": { "level": 2, "plans": [ "test2", "test_deletion", ] }, "gold": { "level": 3, "plans": [ "test_trial", "unidentified_test_plan", ] }, }, DJSTRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS=( "(admin)", "test_url_name", "testapp_namespaced:test_url_namespaced", ), ) # Avoid AppRegistryNotReady exception # http://stackoverflow.com/questions/24793351/django-appregistrynotready if hasattr(django, "setup"): django.setup() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="dj-stripe", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run sys.stdout.write(colored(text="Step 1: Running unit tests.\n\n", color="yellow", attrs=["bold"])) # Hack to reset the global argv before nose has a chance to grab it # http://stackoverflow.com/a/1718407/1834570 args = sys.argv[1:] sys.argv = sys.argv[0:1] from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner(verbosity=1) failures = test_runner.run_tests(["."]) if failures: sys.exit(failures) if enable_coverage: # Announce coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results.\n\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: sys.stderr.write(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results [SKIPPED].", color="yellow", attrs=["bold"])) if enable_pep8: # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: sys.stderr.write("pep8 errors detected.\n") sys.stderr.write(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors [SKIPPED].\n", color="yellow", attrs=["bold"])) # Announce success if enable_coverage and enable_pep8: sys.stdout.write(colored(text="\nTests completed successfully with no errors. Congrats!\n", color="green", attrs=["bold"])) else: sys.stdout.write(colored(text="\nTests completed successfully, but some step(s) were skipped!\n", color="green", attrs=["bold"])) sys.stdout.write(colored(text="Don't push without running the skipped step(s).\n", color="red", attrs=["bold"]))
def test(ctx, paths='', failfast=False, verbose=False, skip_coverage=False): """ Runs the unit tests Usage: inv dev.test --paths='api' --failfast """ import unittest import nose from coverage import Coverage from nose.plugins.capture import Capture from nose.plugins.logcapture import LogCapture from config import set_config config = set_config('testing') from tests import prepare_database as tests_prepare_database class ConfiguringPlugin(nose.plugins.Plugin): enabled = True def configure(self, options, conf): pass def begin(self): tests_prepare_database() runner = unittest.TextTestRunner(verbosity=2 if verbose else 1) argv = ['nosetests'] if failfast: argv.append('--stop') for path in paths.split(','): prefix = 'tests.' if not path: prefix = prefix[:-1] argv.append(prefix + path) plugins = [ConfiguringPlugin()] if config.nose.log_capturing: argv += [ '--logging-clear-handlers', '--logging-format=(%(thread)d) %(name)s: %(levelname)s: %(message)s' ] plugins.append(LogCapture()) if config.nose.stdout_capturing: plugins.append(Capture()) os.chdir(os.path.join(config.project_dir, os.path.pardir)) if not skip_coverage: cov = Coverage( source=[config.project_dir], omit=[ 'src/admin/*', 'src/celery_tasks/*', 'src/db/*', 'src/tasks/*', 'src/tests/*', ], ) cov.start() nose.main( argv=argv, testRunner=runner, plugins=plugins, exit=False, ) if not skip_coverage: directory = os.path.join(config.project_dir, '.coverage_report') print(f'\nSaving coverage report to "{os.path.abspath(directory)}"\n') cov.stop() cov.save() cov.html_report(directory=directory, title='WSP Coverage Report')
def test(config, tests=(), fail_fast=False, verbosity=1, with_coverage=False, with_lint=False): from coverage import Coverage from django import setup from django.conf import settings from django.conf.urls import url from django.http import HttpResponse from django.test.runner import DiscoverRunner with_coverage = with_coverage and not tests with_lint = with_lint and not tests settings.configure( DEBUG=True, ALLOWED_HOSTS=['*'], DATABASES={ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } }, ROOT_URLCONF=( url(r'^test$', lambda request: HttpResponse('test'), name='test'), ), INSTALLED_APPS=( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admin', 'arcutils', ), MIDDLEWARE_CLASSES=[], LDAP={ 'default': { 'host': 'ldap://ldap-login.oit.pdx.edu', 'username': '', 'password': '', 'search_base': 'ou=people,dc=pdx,dc=edu', } }, TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', ] } }], ) setup() runner = DiscoverRunner(failfast=fail_fast, verbosity=verbosity) if with_coverage: coverage = Coverage(source=['arcutils']) coverage.start() if tests: num_errors = runner.run_tests(tests) else: num_errors = runner.run_tests(['arcutils']) if num_errors: abort(code=num_errors, message='Test failure(s) encountered; aborting') if with_coverage: coverage.stop() coverage.report() if with_lint: lint(config)
def run_test_suite(args): enable_coverage = not args.no_coverage tests = args.tests if enable_coverage: cov = Coverage(config_file=True) cov.erase() cov.start() settings.configure( DEBUG=True, USE_TZ=True, TIME_ZONE="UTC", SITE_ID=1, DATABASES={ "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": "djstripe", "USER": "******", "PASSWORD": "", "HOST": "localhost", "PORT": "", }, }, TEMPLATES=[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', ], }, }, ], ROOT_URLCONF="tests.test_urls", INSTALLED_APPS=[ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "jsonfield", "djstripe", "tests", "tests.apps.testapp" ], MIDDLEWARE=( "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware" ), STRIPE_PUBLIC_KEY=os.environ.get("STRIPE_PUBLIC_KEY", ""), STRIPE_SECRET_KEY=os.environ.get("STRIPE_SECRET_KEY", ""), DJSTRIPE_PLANS={ "test0": { "stripe_plan_id": "test_id_0", "name": "Test Plan 0", "description": "A test plan", "price": 1000, # $10.00 "currency": "usd", "interval": "month" }, "test": { "stripe_plan_id": "test_id", "name": "Test Plan 1", "description": "Another test plan", "price": 2500, # $25.00 "currency": "usd", "interval": "month" }, "test2": { "stripe_plan_id": "test_id_2", "name": "Test Plan 2", "description": "Yet Another test plan", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_deletion": { "stripe_plan_id": "test_id_3", "name": "Test Plan 3", "description": "Test plan for deletion.", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_trial": { "stripe_plan_id": "test_id_4", "name": "Test Plan 4", "description": "Test plan for trails.", "price": 7000, # $70.00 "currency": "usd", "interval": "month", "trial_period_days": 7 }, "unidentified_test_plan": { "name": "Unidentified Test Plan", "description": "A test plan with no ID.", "price": 2500, # $25.00 "currency": "usd", "interval": "month" } }, DJSTRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS=( "(admin)", "test_url_name", "testapp_namespaced:test_url_namespaced", "fn:/test_fnmatch*" ), DJSTRIPE_USE_NATIVE_JSONFIELD=os.environ.get("USE_NATIVE_JSONFIELD", "") == "1", ) # Avoid AppRegistryNotReady exception # http://stackoverflow.com/questions/24793351/django-appregistrynotready if hasattr(django, "setup"): django.setup() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="dj-stripe", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run sys.stdout.write(colored(text="Step 1: Running unit tests.\n\n", color="yellow", attrs=["bold"])) # Hack to reset the global argv before nose has a chance to grab it # http://stackoverflow.com/a/1718407/1834570 args = sys.argv[1:] sys.argv = sys.argv[0:1] from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner(verbosity=1, keepdb=True, failfast=True) failures = test_runner.run_tests(tests) if failures: sys.exit(failures) if enable_coverage: # Announce coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results.\n\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: sys.stderr.write(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results [SKIPPED].", color="yellow", attrs=["bold"])) # Announce success if enable_coverage: sys.stdout.write(colored(text="\nTests completed successfully with no errors. Congrats!\n", color="green", attrs=["bold"])) else: sys.stdout.write(colored(text="\nTests completed successfully, but some step(s) were skipped!\n", color="green", attrs=["bold"])) sys.stdout.write(colored(text="Don't push without running the skipped step(s).\n", color="red", attrs=["bold"]))
#!/usr/bin/env python import os import sys from django.core import management # Point to the correct settings for testing os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings' if __name__ == "__main__": testing = 'test' in sys.argv if testing: from coverage import Coverage cov = Coverage() cov.erase() cov.start() management.execute_from_command_line() if testing: cov.stop() cov.save() cov.report()
def run_tests(complete: bool = True, strict: bool = True, dry_run: bool = False ) -> (unittest.TestResult, Coverage, unittest.TestSuite): """ Run integration tests :param complete: When true ibllib unit tests are run in addition to the integration tests. :param strict: When true asserts that all gathered tests were successfully imported. This means that a module not found error in any test module will raise an exception. :param dry_run: When true the tests are gathered but not run. :return Test results and coverage objects, and test suite. """ # Coverage recorded for all code within the source directory; otherwise just omit some # common pyCharm files options = {'omit': ['*pydevd_file_utils.py', 'test_*'], 'source': []} # Gather tests test_dir = str(Path(ci.tests.__file__).parent) logger.info(f'Loading integration tests from {test_dir}') ci_tests = unittest.TestLoader().discover(test_dir, pattern='test_*') if complete: # include ibllib and brainbox unit tests root = Path(ibllib.__file__).parents[ 1] # Search relative to our imported ibllib package test_dirs = [ root.joinpath(x) for x in ('brainbox', 'oneibl', 'ibllib', 'alf') ] for tdir in test_dirs: logger.info(f'Loading unit tests from folders: {tdir}') assert tdir.exists(), f'Failed to find unit test folders in {tdir}' unit_tests = unittest.TestLoader().discover(str(tdir), pattern='test_*', top_level_dir=root) logger.info( f"Found {unit_tests.countTestCases()}, appending to the test suite" ) ci_tests = unittest.TestSuite((ci_tests, *unit_tests)) # for coverage, append the path of the test modules to the source key options['source'].append(str(tdir)) logger.info(f'Complete suite contains {ci_tests.countTestCases()} tests') # Check all tests loaded successfully not_loaded = [ x[12:] for x in list_tests(ci_tests) if x.startswith('_Failed') ] if len(not_loaded) != 0: err_msg = 'Failed to import the following tests:\n\t' + '\n\t'.join( not_loaded) assert not strict, err_msg logger.warning(err_msg) if dry_run: return unittest.TestResult(), Coverage(**options), ci_tests # Run tests with coverage cov = Coverage(**options) cov.start() result = unittest.TextTestRunner(verbosity=2).run(ci_tests) cov.stop() cov.save() return result, cov, ci_tests
def run_test_suite(args): enable_coverage = not args.no_coverage tests = args.tests if enable_coverage: cov = Coverage(config_file=True) cov.erase() cov.start() settings.configure( DEBUG=True, USE_TZ=True, TIME_ZONE="UTC", SITE_ID=1, DATABASES={ "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": "djstripe", "USER": "", "PASSWORD": "", "HOST": "", "PORT": "", }, }, TEMPLATES=[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', ], }, }, ], ROOT_URLCONF="tests.test_urls", INSTALLED_APPS=[ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "jsonfield", "djstripe", "tests", "tests.apps.testapp" ], MIDDLEWARE_CLASSES=( "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware"), STRIPE_PUBLIC_KEY=os.environ.get("STRIPE_PUBLIC_KEY", ""), STRIPE_SECRET_KEY=os.environ.get("STRIPE_SECRET_KEY", ""), DJSTRIPE_PLANS={ "test0": { "stripe_plan_id": "test_id_0", "name": "Test Plan 0", "description": "A test plan", "price": 1000, # $10.00 "currency": "usd", "interval": "month" }, "test": { "stripe_plan_id": "test_id", "name": "Test Plan 1", "description": "Another test plan", "price": 2500, # $25.00 "currency": "usd", "interval": "month" }, "test2": { "stripe_plan_id": "test_id_2", "name": "Test Plan 2", "description": "Yet Another test plan", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_deletion": { "stripe_plan_id": "test_id_3", "name": "Test Plan 3", "description": "Test plan for deletion.", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_trial": { "stripe_plan_id": "test_id_4", "name": "Test Plan 4", "description": "Test plan for trails.", "price": 7000, # $70.00 "currency": "usd", "interval": "month", "trial_period_days": 7 }, "unidentified_test_plan": { "name": "Unidentified Test Plan", "description": "A test plan with no ID.", "price": 2500, # $25.00 "currency": "usd", "interval": "month" } }, DJSTRIPE_PLAN_HIERARCHY={ "bronze": { "level": 1, "plans": [ "test0", "test", ] }, "silver": { "level": 2, "plans": [ "test2", "test_deletion", ] }, "gold": { "level": 3, "plans": [ "test_trial", "unidentified_test_plan", ] }, }, DJSTRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS=( "(admin)", "test_url_name", "testapp_namespaced:test_url_namespaced", "fn:/test_fnmatch*"), ) # Avoid AppRegistryNotReady exception # http://stackoverflow.com/questions/24793351/django-appregistrynotready if hasattr(django, "setup"): django.setup() # Announce the test suite sys.stdout.write( colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="dj-stripe", color="green", attrs=["bold"])) sys.stdout.write( colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run sys.stdout.write( colored(text="Step 1: Running unit tests.\n\n", color="yellow", attrs=["bold"])) # Hack to reset the global argv before nose has a chance to grab it # http://stackoverflow.com/a/1718407/1834570 args = sys.argv[1:] sys.argv = sys.argv[0:1] from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner(verbosity=1, keepdb=True, failfast=True) failures = test_runner.run_tests(tests) if failures: sys.exit(failures) if enable_coverage: # Announce coverage run sys.stdout.write( colored(text="\nStep 2: Generating coverage results.\n\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: sys.stderr.write( colored( text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n\n".format( old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) else: # Announce disabled coverage run sys.stdout.write( colored(text="\nStep 2: Generating coverage results [SKIPPED].", color="yellow", attrs=["bold"])) # Announce success if enable_coverage: sys.stdout.write( colored( text= "\nTests completed successfully with no errors. Congrats!\n", color="green", attrs=["bold"])) else: sys.stdout.write( colored( text= "\nTests completed successfully, but some step(s) were skipped!\n", color="green", attrs=["bold"])) sys.stdout.write( colored(text="Don't push without running the skipped step(s).\n", color="red", attrs=["bold"]))
class CoverageScript(object): """The command-line interface to coverage.py.""" def __init__(self): self.global_option = False self.coverage = None def command_line(self, argv): """The bulk of the command line interface to coverage.py. `argv` is the argument list to process. Returns 0 if all is well, 1 if something went wrong. """ # Collect the command-line options. if not argv: show_help(topic='minimum_help') return OK # The command syntax we parse depends on the first argument. Global # switch syntax always starts with an option. self.global_option = argv[0].startswith('-') if self.global_option: parser = GlobalOptionParser() else: parser = CMDS.get(argv[0]) if not parser: show_help("Unknown command: '%s'" % argv[0]) return ERR argv = argv[1:] ok, options, args = parser.parse_args_ok(argv) if not ok: return ERR # Handle help and version. if self.do_help(options, args, parser): return OK # Listify the list options. source = unshell_list(options.source) omit = unshell_list(options.omit) include = unshell_list(options.include) debug = unshell_list(options.debug) contexts = unshell_list(options.contexts) # Do something. self.coverage = Coverage( data_suffix=options.parallel_mode, cover_pylib=options.pylib, timid=options.timid, branch=options.branch, config_file=options.rcfile, source=source, omit=omit, include=include, debug=debug, concurrency=options.concurrency, check_preimported=True, context=options.context, ) if options.action == "debug": return self.do_debug(args) elif options.action == "erase": self.coverage.erase() return OK elif options.action == "run": return self.do_run(options, args) elif options.action == "combine": if options.append: self.coverage.load() data_dirs = args or None self.coverage.combine(data_dirs, strict=True) self.coverage.save() return OK # Remaining actions are reporting, with some common options. report_args = dict( morfs=unglob_args(args), ignore_errors=options.ignore_errors, omit=omit, include=include, contexts=contexts, ) # We need to be able to import from the current directory, because # plugins may try to, for example, to read Django settings. sys.path.insert(0, '') self.coverage.load() total = None if options.action == "report": total = self.coverage.report(show_missing=options.show_missing, skip_covered=options.skip_covered, skip_empty=options.skip_empty, precision=options.precision, sort=options.sort, **report_args) elif options.action == "annotate": self.coverage.annotate(directory=options.directory, **report_args) elif options.action == "html": total = self.coverage.html_report( directory=options.directory, title=options.title, skip_covered=options.skip_covered, skip_empty=options.skip_empty, show_contexts=options.show_contexts, precision=options.precision, **report_args) elif options.action == "xml": outfile = options.outfile total = self.coverage.xml_report(outfile=outfile, skip_empty=options.skip_empty, **report_args) elif options.action == "json": outfile = options.outfile total = self.coverage.json_report( outfile=outfile, pretty_print=options.pretty_print, show_contexts=options.show_contexts, **report_args) if total is not None: # Apply the command line fail-under options, and then use the config # value, so we can get fail_under from the config file. if options.fail_under is not None: self.coverage.set_option("report:fail_under", options.fail_under) fail_under = self.coverage.get_option("report:fail_under") precision = self.coverage.get_option("report:precision") if should_fail_under(total, fail_under, precision): msg = "total of {total:.{p}f} is less than fail-under={fail_under:.{p}f}".format( total=total, fail_under=fail_under, p=precision, ) print("Coverage failure:", msg) return FAIL_UNDER return OK def do_help(self, options, args, parser): """Deal with help requests. Return True if it handled the request, False if not. """ # Handle help. if options.help: if self.global_option: show_help(topic='help') else: show_help(parser=parser) return True if options.action == "help": if args: for a in args: parser = CMDS.get(a) if parser: show_help(parser=parser) else: show_help(topic=a) else: show_help(topic='help') return True # Handle version. if options.version: show_help(topic='version') return True return False def do_run(self, options, args): """Implementation of 'coverage run'.""" if not args: if options.module: # Specified -m with nothing else. show_help("No module specified for -m") return ERR command_line = self.coverage.get_option("run:command_line") if command_line is not None: args = shlex.split(command_line) if args and args[0] == "-m": options.module = True args = args[1:] if not args: show_help("Nothing to do.") return ERR if options.append and self.coverage.get_option("run:parallel"): show_help("Can't append to data files in parallel mode.") return ERR if options.concurrency == "multiprocessing": # Can't set other run-affecting command line options with # multiprocessing. for opt_name in [ 'branch', 'include', 'omit', 'pylib', 'source', 'timid' ]: # As it happens, all of these options have no default, meaning # they will be None if they have not been specified. if getattr(options, opt_name) is not None: show_help( "Options affecting multiprocessing must only be specified " "in a configuration file.\n" "Remove --{} from the command line.".format(opt_name)) return ERR runner = PyRunner(args, as_module=bool(options.module)) runner.prepare() if options.append: self.coverage.load() # Run the script. self.coverage.start() code_ran = True try: runner.run() except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() return OK def do_debug(self, args): """Implementation of 'coverage debug'.""" if not args: show_help( "What information would you like: config, data, sys, premain?") return ERR for info in args: if info == 'sys': sys_info = self.coverage.sys_info() print(info_header("sys")) for line in info_formatter(sys_info): print(" %s" % line) elif info == 'data': self.coverage.load() data = self.coverage.get_data() print(info_header("data")) print("path: %s" % self.coverage.get_data().data_filename()) if data: print("has_arcs: %r" % data.has_arcs()) summary = line_counts(data, fullpath=True) filenames = sorted(summary.keys()) print("\n%d files:" % len(filenames)) for f in filenames: line = "%s: %d lines" % (f, summary[f]) plugin = data.file_tracer(f) if plugin: line += " [%s]" % plugin print(line) else: print("No data collected") elif info == 'config': print(info_header("config")) config_info = self.coverage.config.__dict__.items() for line in info_formatter(config_info): print(" %s" % line) elif info == "premain": print(info_header("premain")) print(short_stack()) else: show_help("Don't know what you mean by %r" % info) return ERR return OK
def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SmartClassroom.settings") if ReturnedOSName == "win32": os.system("title Smart Classroom Django Server Handler") os.system("CLS") elif ReturnedOSName == "linux": pass else: exit(-1) print("Smart Classroom Django Server Handler | < Django Project Caller >") print( '02/29/2020 | By Janrey "CodexLink" Licas | http://github.com/CodexLink\n' ) print("In Collaboration with") print(" - Ronald Langaoan Jr. |> Hardware Designer and Manager") print( " - Janos Angelo Jantoc |> Hardware Designer and Assistant Programmer" ) print(" - Joshua Santos |> Hardware Manager and Builder") print(" - Johnell Casey Murillo Panotes |> Hardware Assistant\n") if sys.argv[1] == "test": print("\n - Coverage Report Activated!") from coverage import Coverage print("| - Coverage > Importing...") baseCov = Coverage() print("| - Coverage > Initializing...") baseCov.erase() print("| - Coverage > Erasing Recent Reports...") baseCov.start() print("| - Coverage > Code Coverage Start!") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv) if sys.argv[1] == "test": baseCov.stop() baseCov.save() baseCovReport = baseCov.report() print("\n | - Coverage > Report Saved.") baseCov.html_report() if baseCovReport < reqCovPercentage: print( "\n | - Coverage > Code Coverage Completed But Fails To Meet Required Passing Rate of {0}%..." .format(reqCovPercentage)) else: print( "\n | - Coverage > Code Coverage Completed and Passed Above Required Passing Rate of {0}%!" .format(reqCovPercentage)) print("\n | - Coverage > Done~!\n") sys.exit(0)
class TestRunner(BenchmarkRunner): def add_options(self, parser): x = parser.add_mutually_exclusive_group().add_argument x('-c', '--coverage', action='store_true', help='Enable coverage') x('-C', '--cov-unit', action='store_true', help='Same as -c but output 1 file per test,' ' in the temporary test directory') _ = parser.add_argument _('-L', '--log', action='store_true', help='Force all logs to be emitted immediately and keep' ' packet body in logs of successful threaded tests') _('-l', '--loop', type=int, default=1, help='Repeat tests several times') _('-f', '--functional', action='store_true', help='Functional tests') x = parser.add_mutually_exclusive_group().add_argument x('-s', '--stop-on-error', action='store_false', dest='stop_on_success', default=None, help='Continue as long as tests pass successfully.' ' It is usually combined with --loop, to check that tests' ' do not fail randomly.') x('-S', '--stop-on-success', action='store_true', default=None, help='Opposite of --stop-on-error: stop as soon as a test' ' passes. Details about errors are not printed at exit.') x = parser.add_mutually_exclusive_group().add_argument x('-p', '--dump-protocol', const=True, dest='protocol', action='store_const', help='Dump schema of protocol instead of checking it.') x('-P', '--no-check-protocol', const=False, dest='protocol', action='store_const', help='Do not check schema of protocol.') _('-r', '--readable-tid', action='store_true', help='Change master behaviour to generate readable TIDs for easier' ' debugging (rather than from current time).') _('-u', '--unit', action='store_true', help='Unit & threaded tests') _('-z', '--zodb', action='store_true', help='ZODB test suite running on a NEO') _('-v', '--verbose', action='store_true', help='Verbose output') _('only', nargs=argparse.REMAINDER, metavar='[[!] module [test...]]', help="Filter by given module/test. These arguments are shell" " patterns. This implies -ufz if none of this option is" " passed.") parser.epilog = """ Environment Variables: NEO_TESTS_ADAPTER Default is SQLite for threaded clusters, MySQL otherwise. MySQL specific: NEO_DB_SOCKET default: libmysqlclient.so default NEO_DB_PREFIX default: %(DB_PREFIX)s NEO_DB_ADMIN default: %(DB_ADMIN)s NEO_DB_PASSWD default: %(DB_PASSWD)s NEO_DB_USER default: %(DB_USER)s ZODB tests: NEO_TEST_ZODB_FUNCTIONAL Clusters are threaded by default. If true, they are built like in functional tests. NEO_TEST_ZODB_MASTERS default: 1 NEO_TEST_ZODB_PARTITIONS default: 1 NEO_TEST_ZODB_REPLICAS default: 0 NEO_TEST_ZODB_STORAGES default: 1 """ % neo_tests__dict__ def load_options(self, args): if not (args.unit or args.functional or args.zodb): if not args.only: sys.exit('Nothing to run, please give one of -f, -u, -z') args.unit = args.functional = args.zodb = True return dict( log=args.log, loop=args.loop, unit=args.unit, functional=args.functional, zodb=args.zodb, verbosity=2 if args.verbose else 1, coverage=args.coverage, cov_unit=args.cov_unit, only=args.only, protocol=args.protocol, stop_on_success=args.stop_on_success, readable_tid=args.readable_tid, ) def start(self): config = self._config logging.backlog(max_packet=1 << 20, **({ 'max_size': None } if config.log else {})) only = config.only # run requested tests runner = NeoTestRunner(config.title or 'Neo', config.verbosity, config.stop_on_success, config.readable_tid) if config.cov_unit: from coverage import Coverage cov_dir = runner.temp_directory + '/coverage' os.mkdir(cov_dir) @Patch(NeoTestBase) def setUp(orig, self): orig(self) self.__coverage = Coverage('%s/%s' % (cov_dir, self.id())) self.__coverage.start() @Patch(NeoTestBase) def _tearDown(orig, self, success): self.__coverage.stop() self.__coverage.save() del self.__coverage orig(self, success) if config.protocol is False: from contextlib import nested protocol_checker = nested() else: from neo.tests.protocol_checker import protocolChecker protocol_checker = protocolChecker(config.protocol) with protocol_checker: try: for _ in xrange(config.loop): if config.unit: runner.run('Unit tests', UNIT_TEST_MODULES, only) if config.functional: runner.run('Functional tests', FUNC_TEST_MODULES, only) if config.zodb: runner.run('ZODB tests', ZODB_TEST_MODULES, only) except KeyboardInterrupt: config['mail_to'] = None traceback.print_exc() except StopOnSuccess: pass if config.coverage: coverage.stop() if coverage.neotestrunner: coverage.combine(coverage.neotestrunner) coverage.save() if runner.dots: print # build report if (only or config.stop_on_success) and not config.mail_to: runner._buildSummary = lambda *args: ( runner.__class__._buildSummary(runner, *args)[0], '') self.build_report = str self._successful = runner.wasSuccessful() return runner.buildReport(self.add_status)
class CodeCoverage(object): """ Code Coverage radish extension """ OPTIONS = [ ('--with-coverage', 'enable code coverage'), ('--cover-packages=<cover_packages>', 'specify source code package'), ('--cover-append', 'append coverage data to previous collected data'), ('--cover-config-file=<cover_config_file>', 'specify coverage config file [default: .coveragerc]'), ('--cover-branches', 'include branch coverage in report'), ('--cover-erase', 'erase previously collected coverage data'), ('--cover-min-percentage=<cover_min_percentage>', 'fail if the given minimum coverage percentage is not reached'), ('--cover-html=<cover_html_dir>', 'specify a directory where to store HTML coverage report'), ('--cover-xml=<cover_xml_file>', 'specify a file where to store XML coverage report') ] LOAD_IF = staticmethod(lambda config: config.with_coverage) LOAD_PRIORITY = 70 def __init__(self): try: from coverage import Coverage except ImportError: raise RadishError( 'if you want to use the code coverage you have to "pip install radish-bdd[coverage]"' ) before.all(self.coverage_start) after.all(self.coverage_stop) if world.config.cover_packages: self.cover_packages = world.config.cover_packages.split(",") else: self.cover_packages = [] self.coverage = None self.modules_on_init = set(sys.modules.keys()) def coverage_start(self, features, marker): """ Start the coverage measurement """ from coverage import Coverage # if no explicit modules are specified we just # use the ones loaded from radish's basedir. # During the plugin init the basedir modules are # not loaded yet, but they are during the start method. # Thus, we are safe to consider the difference between the # two for coverage measurement. if not self.cover_packages: source = list( set(sys.modules.keys()).difference(self.modules_on_init)) else: source = self.cover_packages self.coverage = Coverage(source=source, config_file=world.config.cover_config_file, branch=world.config.cover_branches) if world.config.cover_erase: self.coverage.combine() self.coverage.erase() if world.config.cover_append: self.coverage.load() self.coverage.start() def coverage_stop(self, features, marker): """ Stop the coverage measurement and create report """ self.coverage.stop() self.coverage.combine() self.coverage.save() self.coverage.report(file=sys.stdout) if world.config.cover_html: self.coverage.html_report(directory=world.config.cover_html) if world.config.cover_xml: self.coverage.xml_report(outfile=world.config.cover_xml) if world.config.cover_min_percentage: report = StringIO() self.coverage.report(file=report) match = re.search(r'^TOTAL\s+(.*)$', report.getvalue(), re.MULTILINE) if not match: raise RadishError( 'Failed to find total percentage in coverage report') total_percentage = int(match.groups()[0].split()[-1][:-1]) if total_percentage < int(world.config.cover_min_percentage): raise RadishError( 'Failed to reach minimum expected coverage of {0}% (reached: {1}%)' .format(world.config.cover_min_percentage, total_percentage))
def run_tests(self, test_labels, extra_tests=None, **kwargs): # 1. 设置coverage opts = { 'auto_data': settings.COVERAGE_USE_CACHE, } if settings.COVERAGE_SOURCE: opts['source'] = settings.COVERAGE_SOURCE if settings.COVERAGE_CONFIG_FILE: opts['config_file'] = settings.COVERAGE_CONFIG_FILE coverage = Coverage(**opts) for e in settings.COVERAGE_CODE_EXCLUDES: coverage.exclude(e) coverage.start() results = super(CoverageRunner, self).run_tests(test_labels, extra_tests, **kwargs) coverage.stop() # 2. 添加coverage_modules coverage_modules = [] if test_labels: for label in test_labels: label = label.split('.')[0].rstrip('/') app = get_app(label) coverage_modules.append(self._get_app_package(app)) else: for app in get_apps(): coverage_modules.append(self._get_app_package(app)) coverage_modules.extend(settings.COVERAGE_ADDITIONAL_MODULES) # 加载所有的packages, modules等 packages, modules, excludes, errors = get_all_modules( coverage_modules, settings.COVERAGE_MODULE_EXCLUDES, settings.COVERAGE_PATH_EXCLUDES) if settings.COVERAGE_USE_STDOUT: coverage.report(modules.values(), show_missing=1) if excludes: message = "The following packages or modules were excluded:" print("") print(message) for e in excludes: print(e) print("") if errors: message = "There were problems with the following packages " message += "or modules:" print("") print(message) for e in errors: print(e) print("") # 输出Html格式的数据 # modules: module_name --> module # outdir = settings.COVERAGE_REPORT_HTML_OUTPUT_DIR if outdir: outdir = os.path.abspath(outdir) # 默认使用55minutes的html表 if settings.COVERAGE_CUSTOM_REPORTS: html_report(coverage, outdir, modules, excludes, errors) else: coverage.html_report(modules.values(), outdir) print("") print("HTML reports were output to '%s'" % outdir) return results