class my_test(unittest.TestCase): def setUp(self): if not os.path.exists(self._testMethodName): os.system('mkdir %s' % self._testMethodName) # 这里依赖于命名规范,include指定要测试的文件 if self._testMethodName.startswith("test_"): include_name = self._testMethodName[5:] self.cov = Coverage(include=['comm.py', '%s.py' % include_name]) self.cov.start() return super().setUp() def test_add(self): cases = deal_case(read_file("add_test.txt", is_line=True)) for case in cases: result = add(case[0], case[1]) if 0 == result['code']: result = result['result'] self.assertEqual(result, float(case[2])) elif 1 == result['code']: result = result['msg'] self.assertEqual(result, case[2]) def tearDown(self): self.cov.stop() self.cov.save() self.cov.html_report(directory='%s' % self._testMethodName) self.cov.erase() return super().tearDown()
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None, profile=False, coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None, skip_test_records=False, skip_before_tests=False, failfast=False): "Run tests" import frappe.test_runner tests = test site = get_site(context) frappe.init(site=site) frappe.flags.skip_before_tests = skip_before_tests frappe.flags.skip_test_records = skip_test_records if coverage: # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe') cov = Coverage(source=[source_path], omit=['*.html', '*.js', '*.xml', '*.css', '*/doctype/*/*_dashboard.py', '*/patches/*']) cov.start() ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force, profile=profile, junit_xml_output=junit_xml_output, ui_tests = ui_tests, doctype_list_path = doctype_list_path, failfast=failfast) if coverage: cov.stop() cov.save() if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 if os.environ.get('CI'): sys.exit(ret)
def generate_coverage_reports(reports_dir=COVERAGE_DIR, report="all"): ''' This task combines all the coverage data files generated during all the tests and generates the coverage reports. :param report: type of the coverage report to be generated. Options: text|html|xml. Defaults to 'html' :param reports_dir: directory under which all coverage files to be found and reports to be generated. Defaults to reports/coverage ''' print("\nInitiating code coverage analysis ...") coverage_obj = Coverage(data_file="%s/coverage" % reports_dir) coverage_obj.combine() coverage_obj.save() if report == "all": with open("%s/coverage.txt" % reports_dir, "w") as cov_rep: coverage_obj.report(file=cov_rep, show_missing=True) print("Generated Code-Coverage text report at %s/coverage.txt\n" % reports_dir) coverage_obj.html_report(directory=reports_dir) print("Generated Code-Coverage HTML report at %s\n" % reports_dir) coverage_obj.xml_report(outfile='%s/coverage.xml' % reports_dir) print("Generated Code-Coverage XML report at %s/coverage.xml\n" % reports_dir) elif report == "text": with open("%s/coverage.txt" % reports_dir, "w") as cov_rep: coverage_obj.report(file=cov_rep, show_missing=True) print("Generated Code-Coverage text report at %s/coverage.txt\n" % reports_dir) elif report == "html": coverage_obj.html_report(directory=reports_dir) print("Generated Code-Coverage HTML report at %s\n" % reports_dir) elif report == "xml": coverage_obj.xml_report(outfile='%s/coverage.xml' % reports_dir) print("Generated Code-Coverage XML report at %s/coverage.xml\n" % reports_dir)
def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings') try: from django.core.management import execute_from_command_line from django.conf import settings # MyProject Customization: run coverage.py around tests automatically running_tests = (sys.argv[1] == 'test') if running_tests: from coverage import Coverage cov = Coverage() cov.erase() cov.start() if settings.DEBUG: if os.environ.get('RUN_MAIN') or os.environ.get('WERKZEUG_RUN_MAIN'): import ptvsd ptvsd.enable_attach(address = ('0.0.0.0', 3500)) print("Attached remote debugger") except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if running_tests: cov.stop() cov.save() covered = cov.report() if covered < 100: sys.exit(1)
def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django-graphql.settings') try: command = sys.argv[1] except IndexError: command = "help" running_tests = (command == 'test') if running_tests: from coverage import Coverage cov = Coverage() cov.erase() cov.start() try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv) if running_tests: cov.stop() cov.save() covered = cov.report() if covered < 100: sys.exit(1)
def run_tests(self): " Run coverage on unit test " # need to import here cause we are in a venv import six from coverage import Coverage coverage = Coverage() coverage.start() # Purge modules under test from sys.modules. The test loader will # re-import them from the build location. Required when 2to3 is used # with namespace packages. if six.PY3 and getattr(self.distribution, 'use_2to3', False): module = self.test_suite.split('.')[0] if module in _namespace_packages: del_modules = [] if module in sys.modules: del_modules.append(module) module += '.' for name in sys.modules: if name.startswith(module): del_modules.append(name) list(map(sys.modules.__delitem__, del_modules)) unittest_main( None, None, self._argv, testLoader=self._resolve_as_ep(self.test_loader), testRunner=self._resolve_as_ep(self.test_runner), exit=False, ) coverage.stop() coverage.save() coverage.report(show_missing=False)
class CodeCoverage(): def __init__(self, with_coverage, app): self.with_coverage = with_coverage self.app = app or 'frappe' def __enter__(self): if self.with_coverage: import os from coverage import Coverage from frappe.utils import get_bench_path # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', self.app) omit = STANDARD_EXCLUSIONS[:] if self.app == 'frappe': omit.extend(FRAPPE_EXCLUSIONS) self.coverage = Coverage(source=[source_path], omit=omit, include=STANDARD_INCLUSIONS) self.coverage.start() def __exit__(self, exc_type, exc_value, traceback): if self.with_coverage: self.coverage.stop() self.coverage.save() self.coverage.xml_report()
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None, profile=False, coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None, skip_test_records=False, skip_before_tests=False, failfast=False): "Run tests" import frappe.test_runner tests = test site = get_site(context) frappe.init(site=site) frappe.flags.skip_before_tests = skip_before_tests frappe.flags.skip_test_records = skip_test_records if coverage: # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe') cov = Coverage(source=[source_path], omit=['*.html', '*.js', '*.css']) cov.start() ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force, profile=profile, junit_xml_output=junit_xml_output, ui_tests = ui_tests, doctype_list_path = doctype_list_path, failfast=failfast) if coverage: cov.stop() cov.save() if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 if os.environ.get('CI'): sys.exit(ret)
def main(): options = parse_args() test_directory = os.path.dirname(os.path.abspath(__file__)) selected_tests = get_selected_tests(options) if options.verbose: print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests))) if options.coverage and not PYTORCH_COLLECT_COVERAGE: shell(['coverage', 'erase']) if options.jit: selected_tests = filter(lambda test_name: "jit" in test_name, TESTS) if options.determine_from is not None and os.path.exists(options.determine_from): with open(options.determine_from, 'r') as fh: touched_files = [ os.path.normpath(name.strip()) for name in fh.read().split('\n') if len(name.strip()) > 0 ] # HACK: Ensure the 'test' paths can be traversed by Modulefinder sys.path.append('test') selected_tests = [ test for test in selected_tests if determine_target(test, touched_files, options) ] sys.path.remove('test') has_failed = False failure_messages = [] try: for test in selected_tests: options_clone = copy.deepcopy(options) if test in USE_PYTEST_LIST: options_clone.pytest = True err_message = run_test_module(test, test_directory, options_clone) if err_message is None: continue has_failed = True failure_messages.append(err_message) if not options_clone.continue_through_error: raise RuntimeError(err_message) print_to_stderr(err_message) finally: if options.coverage: from coverage import Coverage test_dir = os.path.dirname(os.path.abspath(__file__)) with set_cwd(test_dir): cov = Coverage() if PYTORCH_COLLECT_COVERAGE: cov.load() cov.combine(strict=False) cov.save() if not PYTORCH_COLLECT_COVERAGE: cov.html_report() if options.continue_through_error and has_failed: for err in failure_messages: print_to_stderr(err) sys.exit(1)
def run_tests(): config = utils.get_config() cov_conf_file = config.get('file_locations', 'coverage_in_conf', fallback='config/.coveragerc') cov_out_file = config.get('file_locations', 'coverage_out_dir', fallback='coverage') cov = Coverage(config_file=cov_conf_file) cov.start() result = grab_test_results() if not result.wasSuccessful(): print("Error running unit tests ...") exit(1) cov.stop() cov.save() print('Coverage Summary:') cov.report() cov.html_report(directory=cov_out_file) print('HTML version: file://{0}/{1}/index.html'.format( os.getcwd(), cov_out_file)) cov.erase() exit(0)
def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings') # MyMoney Customization: run coverage.py around tests automatically try: command = sys.argv[1] except IndexError: command = "help" running_tests = (command == 'test') if running_tests: from coverage import Coverage cov = Coverage() cov.erase() cov.start() try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv) if running_tests: cov.stop() cov.save() covered = cov.report() if covered < 100: sys.exit(0)
def _run_avocado(runnable, queue): try: # This assumes that a proper resolution (see resolver module) # was performed, and that a URI contains: # 1) path to python module # 2) class # 3) method # # To be defined: if the resolution uri should be composed like # this, or broken down and stored into other data fields module_path, klass_method = runnable.uri.split(":", 1) klass, method = klass_method.split(".", 1) params = AvocadoInstrumentedTestRunner._create_params(runnable) result_dir = runnable.output_dir or tempfile.mkdtemp(prefix=".avocado-task") test_factory = [ klass, { "name": TestID(1, runnable.uri, runnable.variant), "methodName": method, "config": runnable.config, "modulePath": module_path, "params": params, "tags": runnable.tags, "run.results_dir": result_dir, }, ] messages.start_logging(runnable.config, queue) if "COVERAGE_RUN" in os.environ: from coverage import Coverage coverage = Coverage() coverage.start() instance = loader.load_test(test_factory) early_state = instance.get_state() early_state["type"] = "early_state" queue.put(early_state) instance.run_avocado() if "COVERAGE_RUN" in os.environ: coverage.stop() coverage.save() state = instance.get_state() fail_reason = state.get("fail_reason") queue.put(messages.WhiteboardMessage.get(state["whiteboard"])) queue.put( messages.FinishedMessage.get( state["status"].lower(), fail_reason=fail_reason ) ) except Exception as e: queue.put(messages.StderrMessage.get(traceback.format_exc())) queue.put(messages.FinishedMessage.get("error", fail_reason=str(e)))
def coverage(): from coverage import Coverage cover = Coverage() cover.start() failures = run() cover.stop() cover.save() cover.html_report() return failures
def _bootstrap(self): from coverage import Coverage cov = Coverage(data_suffix=True) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def _run_avocado(runnable, queue): try: # This assumes that a proper resolution (see resolver module) # was performed, and that a URI contains: # 1) path to python module # 2) class # 3) method # # To be defined: if the resolution uri should be composed like # this, or broken down and stored into other data fields module_path, klass_method = runnable.uri.split(':', 1) klass, method = klass_method.split('.', 1) params = AvocadoInstrumentedTestRunner._create_params(runnable) result_dir = (runnable.output_dir or tempfile.mkdtemp(prefix=".avocado-task")) test_factory = [ klass, { 'name': TestID(1, runnable.uri), 'methodName': method, 'config': runnable.config, 'modulePath': module_path, 'params': params, 'tags': runnable.tags, 'run.results_dir': result_dir, } ] messages.start_logging(runnable.config, queue) if 'COVERAGE_RUN' in os.environ: from coverage import Coverage coverage = Coverage() coverage.start() instance = loader.load_test(test_factory) early_state = instance.get_state() early_state['type'] = "early_state" queue.put(early_state) instance.run_avocado() if 'COVERAGE_RUN' in os.environ: coverage.stop() coverage.save() state = instance.get_state() fail_reason = state.get('fail_reason') queue.put(messages.WhiteboardMessage.get(state['whiteboard'])) queue.put( messages.FinishedMessage.get(state['status'].lower(), fail_reason=fail_reason)) except Exception as e: queue.put(messages.StderrMessage.get(traceback.format_exc())) queue.put(messages.FinishedMessage.get('error', fail_reason=str(e)))
def run(self): cov = Coverage(data_suffix=True) cov._warn_no_data = True cov._warn_unimported_source = True cov.start() try: super().run() finally: cov.stop() cov.save()
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage cov = Coverage(data_suffix=True) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def code_coverage(): from coverage import Coverage cov = Coverage(data_suffix="blender") cov.start() print("Code coverage started") try: yield finally: cov.stop() cov.save() print("Code coverage stopped and saved")
def run_tests(*here): if Coverage is None: for x in here: _run_tests(x) else: coverage = Coverage() coverage.start() for x in here: _run_tests(x) coverage.stop() coverage.save()
def coverage(): if "--coverage" in sys.argv: cover = Coverage(source=["ls.joyous"]) cover.start() failures = run() cover.stop() cover.save() cover.html_report() else: failures = run() return failures
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import rcfile = os.environ[COVERAGE_RCFILE_ENV] cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def run_tests(context, app=None, module=None, doctype=None, test=(), profile=False, coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None, skip_test_records=False, skip_before_tests=False, failfast=False): "Run tests" import frappe.test_runner tests = test site = get_site(context) allow_tests = frappe.get_conf(site).allow_tests if not (allow_tests or os.environ.get('CI')): click.secho('Testing is disabled for the site!', bold=True) click.secho('You can enable tests by entering following command:') click.secho('bench --site {0} set-config allow_tests true'.format(site), fg='green') return frappe.init(site=site) frappe.flags.skip_before_tests = skip_before_tests frappe.flags.skip_test_records = skip_test_records if coverage: from coverage import Coverage # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe') cov = Coverage(source=[source_path], omit=[ '*.html', '*.js', '*.xml', '*.css', '*.less', '*.scss', '*.vue', '*/doctype/*/*_dashboard.py', '*/patches/*' ]) cov.start() ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force, profile=profile, junit_xml_output=junit_xml_output, ui_tests=ui_tests, doctype_list_path=doctype_list_path, failfast=failfast) if coverage: cov.stop() cov.save() if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 if os.environ.get('CI'): sys.exit(ret)
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage rcfile = getattr(multiprocessing, PATCHED_MARKER) cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def main(): options = parse_args() test_directory = str(REPO_ROOT / "test") selected_tests = get_selected_tests(options) if options.verbose: print_to_stderr("Selected tests:\n {}".format( "\n ".join(selected_tests))) if options.dry_run: return if options.coverage and not PYTORCH_COLLECT_COVERAGE: shell(["coverage", "erase"]) if IS_CI: selected_tests = get_reordered_tests(selected_tests) # downloading test cases configuration to local environment get_test_case_configs(dirpath=test_directory) has_failed = False failure_messages = [] try: for test in selected_tests: options_clone = copy.deepcopy(options) if test in USE_PYTEST_LIST: options_clone.pytest = True err_message = run_test_module(test, test_directory, options_clone) if err_message is None: continue has_failed = True failure_messages.append(err_message) if not options_clone.continue_through_error: raise RuntimeError(err_message) print_to_stderr(err_message) finally: if options.coverage: from coverage import Coverage with set_cwd(test_directory): cov = Coverage() if PYTORCH_COLLECT_COVERAGE: cov.load() cov.combine(strict=False) cov.save() if not PYTORCH_COLLECT_COVERAGE: cov.html_report() if options.continue_through_error and has_failed: for err in failure_messages: print_to_stderr(err) sys.exit(1)
def handle(self, *args, **kwargs): # pragma: no cover cov = Coverage() cov.erase() cov.start() super().handle(*args, **kwargs) cov.stop() cov.save() covered = cov.report() if covered < 100: sys.exit(1)
def run(self): import pytest cov = Coverage() cov.erase() cov.start() result = pytest.main() cov.stop() cov.save() cov.html_report(directory="covhtml") sys.exit(int(bool(len(result.failures) > 0 or len(result.errors) > 0)))
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage coverage_config = getattr(multiprocessing, PATCHED_MARKER) coverage_config.parallel = True cov = Coverage() cov.config = coverage_config cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def handle(self, *args, **kwargs): # pragma: no cover self.stdout.write('Run test with coverage..') cov = Coverage(branch=True, ) cov.erase() cov.start() super().handle(*args, **kwargs) cov.stop() cov.save() covered = cov.report(skip_covered=True) if covered < 90: sys.exit(1)
def run_tests(complete: bool = True, strict: bool = True, dry_run: bool = False) -> (unittest.TestResult, Coverage, unittest.TestSuite): """ Run integration tests :param complete: When true ibllib unit tests are run in addition to the integration tests. :param strict: When true asserts that all gathered tests were successfully imported. This means that a module not found error in any test module will raise an exception. :param dry_run: When true the tests are gathered but not run. :return Test results and coverage objects, and test suite. """ # Coverage recorded for all code within the source directory; otherwise just omit some # common pyCharm files options = {'omit': ['*pydevd_file_utils.py', 'test_*'], 'source': []} # Gather tests test_dir = str(Path(ci.tests.__file__).parent) logger.info(f'Loading integration tests from {test_dir}') ci_tests = unittest.TestLoader().discover(test_dir, pattern='test_*') if complete: # include ibllib and brainbox unit tests root = Path(ibllib.__file__).parents[1] # Search relative to our imported ibllib package test_dirs = [root.joinpath(x) for x in ('brainbox', 'oneibl', 'ibllib', 'alf')] for tdir in test_dirs: logger.info(f'Loading unit tests from folders: {tdir}') assert tdir.exists(), f'Failed to find unit test folders in {tdir}' unit_tests = unittest.TestLoader().discover(str(tdir), pattern='test_*', top_level_dir=root) logger.info(f"Found {unit_tests.countTestCases()}, appending to the test suite") ci_tests = unittest.TestSuite((ci_tests, *unit_tests)) # for coverage, append the path of the test modules to the source key options['source'].append(str(tdir)) logger.info(f'Complete suite contains {ci_tests.countTestCases()} tests') # Check all tests loaded successfully not_loaded = [x[12:] for x in list_tests(ci_tests) if x.startswith('_Failed')] if len(not_loaded) != 0: err_msg = 'Failed to import the following tests:\n\t' + '\n\t'.join(not_loaded) assert not strict, err_msg logger.warning(err_msg) if dry_run: return unittest.TestResult(), Coverage(**options), ci_tests # Run tests with coverage cov = Coverage(**options) cov.start() result = unittest.TextTestRunner(verbosity=2).run(ci_tests) cov.stop() cov.save() return result, cov, ci_tests
def run_test_suite(): cov = Coverage(config_file=True) cov.erase() cov.start() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="python-xirsys", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run print(colored(text="Step 1: Running unit tests.\n", color="yellow", attrs=["bold"])) test_suite = TestLoader().discover(str(Path("tests").absolute())) result = TextTestRunner(verbosity=1).run(test_suite) if not result.wasSuccessful(): sys.exit(len(result.failures) + len(result.errors)) # Announce coverage run print(colored(text="\nStep 2: Generating coverage results.\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: print(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: print("pep8 errors detected.") print(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") # Announce success print(colored(text="\nTests completed successfully with no errors. Congrats!", color="green", attrs=["bold"]))
def run_test_suite(): cov = Coverage(config_file=True) cov.erase() cov.start() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="python-doc-inherit", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run print(colored(text="Step 1: Running unit tests.\n", color="yellow", attrs=["bold"])) test_suite = TestLoader().discover(str(Path("tests").absolute())) result = TextTestRunner(verbosity=1).run(test_suite) if not result.wasSuccessful(): sys.exit(len(result.failures) + len(result.errors)) # Announce coverage run print(colored(text="\nStep 2: Generating coverage results.\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: print(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: print("pep8 errors detected.") print(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") # Announce success print(colored(text="\nTests completed successfully with no errors. Congrats!", color="green", attrs=["bold"]))
def run(self): tests = unittest.TestLoader().discover("tests", pattern="*[t|T]est*.py") runner = unittest.TextTestRunner(verbosity=1) cov = Coverage() cov.erase() cov.start() result = runner.run(tests) cov.stop() cov.save() cov.html_report(directory="covhtml") sys.exit(int(bool(len(result.failures) > 0 or len(result.errors) > 0)))
def run_unit_tests() -> int: cov = Coverage(include="app_google_groups/*") cov.start() loader = unittest.TestLoader() suite = loader.discover(start_dir=".", pattern="test_*.py") res = unittest.TextTestRunner().run(suite) cov.stop() try: cov.save() cov.report() except CoverageException as ce: print(str(ce)) print("Don't forget to add some unit tests!") num_test_fails = len(res.failures) + len(res.errors) return num_test_fails
def generate_coverage_report(): """ Runs all tests in a Code Coverage context and generates a report. """ cov = Coverage() cov.start() from unittest import TestLoader, TextTestRunner test_all() cov.stop() cov.save() cov.html_report(directory="htmlcov")
def with_coverage(f, source, *, report=True, data=False): cov = Coverage(source=[source]) cov.start() try: exit_code = f() finally: cov.stop() if not exit_code: if report: print() # Print blank line. cov.report(show_missing=False) cov.html_report() if data: cov.save() return exit_code
def main(): cov = Coverage( omit=[ "*passlib*", "*test*", "*tornado*", "*backports_abc*", "*singledispatch*", "*six*", "*certifi*", "*daemon*", "*funcsigs*", "*mock*", "*pbr*", "*pkg_resources*", "*tablib*", ] ) cov.start() from app_test import ApplicationTest from database_test import DatabaseTest from http_test import HTTPTestCase from procinfo_test import ProcInfoTest from user_test import UserTest from token_test import TokenTest from ws_test import WebSocketTestCase from unittest import TestLoader, TextTestRunner, TestSuite loader = TestLoader() suite = TestSuite( ( loader.loadTestsFromTestCase(ProcInfoTest), loader.loadTestsFromTestCase(DatabaseTest), loader.loadTestsFromTestCase(UserTest), loader.loadTestsFromTestCase(TokenTest), loader.loadTestsFromTestCase(HTTPTestCase), loader.loadTestsFromTestCase(WebSocketTestCase), loader.loadTestsFromTestCase(ApplicationTest), ) ) runner = TextTestRunner(verbosity=2) runner.run(suite) cov.stop() cov.save()
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import rcfile = os.environ[COVERAGE_RCFILE_ENV] cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() debug = cov.debug try: if debug.should("multiproc"): debug.write("Calling multiprocessing bootstrap") return original_bootstrap(self) finally: if debug.should("multiproc"): debug.write("Finished multiprocessing bootstrap") cov.stop() cov.save() if debug.should("multiproc"): debug.write("Saved multiprocessing data")
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import cov = Coverage(data_suffix=True) cov._warn_preimported_source = False cov.start() debug = cov._debug try: if debug.should("multiproc"): debug.write("Calling multiprocessing bootstrap") return original_bootstrap(self) finally: if debug.should("multiproc"): debug.write("Finished multiprocessing bootstrap") cov.stop() cov.save() if debug.should("multiproc"): debug.write("Saved multiprocessing data")
class CodeCoverage(object): """ Code Coverage radish extension """ OPTIONS = [ ("--with-coverage", "enable code coverage"), ("--cover-packages=<cover_packages>", "specify source code package") ] LOAD_IF = staticmethod(lambda config: config.with_coverage) LOAD_PRIORITY = 70 def __init__(self): before.all(self.coverage_start) after.all(self.coverage_stop) if world.config.cover_packages: cover_packages = world.config.cover_packages.split(",") else: cover_packages = [] self.coverage = Coverage(source=cover_packages) def coverage_start(self, features, marker): """ Start the coverage measurement """ self.coverage.load() self.coverage.start() def coverage_stop(self, features, marker): """ Stop the coverage measurement and create report """ self.coverage.stop() self.coverage.save() self.coverage.report(file=sys.stdout)
#!/usr/bin/env python import os import sys from django.core import management # Point to the correct settings for testing os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings' if __name__ == "__main__": testing = 'test' in sys.argv if testing: from coverage import Coverage cov = Coverage() cov.erase() cov.start() management.execute_from_command_line() if testing: cov.stop() cov.save() cov.report()
def run_test_suite(args): skip_utc = args.skip_utc enable_coverage = not args.no_coverage enable_pep8 = not args.no_pep8 if enable_coverage: cov = Coverage(config_file=True) cov.erase() cov.start() settings.configure( DJSTRIPE_TESTS_SKIP_UTC=skip_utc, TIME_ZONE='America/Los_Angeles', DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": "djstripe", "USER": "", "PASSWORD": "", "HOST": "", "PORT": "", }, }, ROOT_URLCONF="tests.test_urls", INSTALLED_APPS=[ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "jsonfield", "djstripe", "tests", "tests.apps.testapp" ], MIDDLEWARE_CLASSES=( "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware" ), SITE_ID=1, STRIPE_PUBLIC_KEY=os.environ.get("STRIPE_PUBLIC_KEY", ""), STRIPE_SECRET_KEY=os.environ.get("STRIPE_SECRET_KEY", ""), DJSTRIPE_PLANS={ "test0": { "stripe_plan_id": "test_id_0", "name": "Test Plan 0", "description": "A test plan", "price": 1000, # $10.00 "currency": "usd", "interval": "month" }, "test": { "stripe_plan_id": "test_id", "name": "Test Plan 1", "description": "Another test plan", "price": 2500, # $25.00 "currency": "usd", "interval": "month" }, "test2": { "stripe_plan_id": "test_id_2", "name": "Test Plan 2", "description": "Yet Another test plan", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_deletion": { "stripe_plan_id": "test_id_3", "name": "Test Plan 3", "description": "Test plan for deletion.", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_trial": { "stripe_plan_id": "test_id_4", "name": "Test Plan 4", "description": "Test plan for trails.", "price": 7000, # $70.00 "currency": "usd", "interval": "month", "trial_period_days": 7 }, "unidentified_test_plan": { "name": "Unidentified Test Plan", "description": "A test plan with no ID.", "price": 2500, # $25.00 "currency": "usd", "interval": "month" } }, DJSTRIPE_PLAN_HIERARCHY = { "bronze": { "level": 1, "plans": [ "test0", "test", ] }, "silver": { "level": 2, "plans": [ "test2", "test_deletion", ] }, "gold": { "level": 3, "plans": [ "test_trial", "unidentified_test_plan", ] }, }, DJSTRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS=( "(admin)", "test_url_name", "testapp_namespaced:test_url_namespaced", ), ) # Avoid AppRegistryNotReady exception # http://stackoverflow.com/questions/24793351/django-appregistrynotready if hasattr(django, "setup"): django.setup() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="dj-stripe", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run sys.stdout.write(colored(text="Step 1: Running unit tests.\n\n", color="yellow", attrs=["bold"])) # Hack to reset the global argv before nose has a chance to grab it # http://stackoverflow.com/a/1718407/1834570 args = sys.argv[1:] sys.argv = sys.argv[0:1] from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner(verbosity=1) failures = test_runner.run_tests(["."]) if failures: sys.exit(failures) if enable_coverage: # Announce coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results.\n\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: sys.stderr.write(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results [SKIPPED].", color="yellow", attrs=["bold"])) if enable_pep8: # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: sys.stderr.write("pep8 errors detected.\n") sys.stderr.write(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors [SKIPPED].\n", color="yellow", attrs=["bold"])) # Announce success if enable_coverage and enable_pep8: sys.stdout.write(colored(text="\nTests completed successfully with no errors. Congrats!\n", color="green", attrs=["bold"])) else: sys.stdout.write(colored(text="\nTests completed successfully, but some step(s) were skipped!\n", color="green", attrs=["bold"])) sys.stdout.write(colored(text="Don't push without running the skipped step(s).\n", color="red", attrs=["bold"]))
class CoverageScript(object): """The command-line interface to coverage.py.""" def __init__(self): self.global_option = False self.coverage = None def command_line(self, argv): """The bulk of the command line interface to coverage.py. `argv` is the argument list to process. Returns 0 if all is well, 1 if something went wrong. """ # Collect the command-line options. if not argv: show_help(topic='minimum_help') return OK # The command syntax we parse depends on the first argument. Global # switch syntax always starts with an option. self.global_option = argv[0].startswith('-') if self.global_option: parser = GlobalOptionParser() else: parser = CMDS.get(argv[0]) if not parser: show_help("Unknown command: '%s'" % argv[0]) return ERR argv = argv[1:] ok, options, args = parser.parse_args_ok(argv) if not ok: return ERR # Handle help and version. if self.do_help(options, args, parser): return OK # Listify the list options. source = unshell_list(options.source) omit = unshell_list(options.omit) include = unshell_list(options.include) debug = unshell_list(options.debug) # Do something. self.coverage = Coverage( data_suffix=options.parallel_mode, cover_pylib=options.pylib, timid=options.timid, branch=options.branch, config_file=options.rcfile, source=source, omit=omit, include=include, debug=debug, concurrency=options.concurrency, check_preimported=True, context=options.context, ) if options.action == "debug": return self.do_debug(args) elif options.action == "erase": self.coverage.erase() return OK elif options.action == "run": return self.do_run(options, args) elif options.action == "combine": if options.append: self.coverage.load() data_dirs = args or None self.coverage.combine(data_dirs, strict=True) self.coverage.save() return OK # Remaining actions are reporting, with some common options. report_args = dict( morfs=unglob_args(args), ignore_errors=options.ignore_errors, omit=omit, include=include, ) # We need to be able to import from the current directory, because # plugins may try to, for example, to read Django settings. sys.path.insert(0, '') self.coverage.load() total = None if options.action == "report": total = self.coverage.report( show_missing=options.show_missing, skip_covered=options.skip_covered, **report_args ) elif options.action == "annotate": self.coverage.annotate(directory=options.directory, **report_args) elif options.action == "html": total = self.coverage.html_report( directory=options.directory, title=options.title, skip_covered=options.skip_covered, **report_args ) elif options.action == "xml": outfile = options.outfile total = self.coverage.xml_report(outfile=outfile, **report_args) if total is not None: # Apply the command line fail-under options, and then use the config # value, so we can get fail_under from the config file. if options.fail_under is not None: self.coverage.set_option("report:fail_under", options.fail_under) fail_under = self.coverage.get_option("report:fail_under") precision = self.coverage.get_option("report:precision") if should_fail_under(total, fail_under, precision): return FAIL_UNDER return OK def do_help(self, options, args, parser): """Deal with help requests. Return True if it handled the request, False if not. """ # Handle help. if options.help: if self.global_option: show_help(topic='help') else: show_help(parser=parser) return True if options.action == "help": if args: for a in args: parser = CMDS.get(a) if parser: show_help(parser=parser) else: show_help(topic=a) else: show_help(topic='help') return True # Handle version. if options.version: show_help(topic='version') return True return False def do_run(self, options, args): """Implementation of 'coverage run'.""" if not args: if options.module: # Specified -m with nothing else. show_help("No module specified for -m") return ERR command_line = self.coverage.get_option("run:command_line") if command_line is not None: args = shlex.split(command_line) if args and args[0] == "-m": options.module = True args = args[1:] if not args: show_help("Nothing to do.") return ERR if options.append and self.coverage.get_option("run:parallel"): show_help("Can't append to data files in parallel mode.") return ERR if options.concurrency == "multiprocessing": # Can't set other run-affecting command line options with # multiprocessing. for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']: # As it happens, all of these options have no default, meaning # they will be None if they have not been specified. if getattr(options, opt_name) is not None: show_help( "Options affecting multiprocessing must only be specified " "in a configuration file.\n" "Remove --{} from the command line.".format(opt_name) ) return ERR runner = PyRunner(args, as_module=bool(options.module)) runner.prepare() if options.append: self.coverage.load() # Run the script. self.coverage.start() code_ran = True try: runner.run() except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() return OK def do_debug(self, args): """Implementation of 'coverage debug'.""" if not args: show_help("What information would you like: config, data, sys?") return ERR for info in args: if info == 'sys': sys_info = self.coverage.sys_info() print(info_header("sys")) for line in info_formatter(sys_info): print(" %s" % line) elif info == 'data': self.coverage.load() data = self.coverage.get_data() print(info_header("data")) print("path: %s" % self.coverage.get_data().data_filename()) if data: print("has_arcs: %r" % data.has_arcs()) summary = line_counts(data, fullpath=True) filenames = sorted(summary.keys()) print("\n%d files:" % len(filenames)) for f in filenames: line = "%s: %d lines" % (f, summary[f]) plugin = data.file_tracer(f) if plugin: line += " [%s]" % plugin print(line) else: print("No data collected") elif info == 'config': print(info_header("config")) config_info = self.coverage.config.__dict__.items() for line in info_formatter(config_info): print(" %s" % line) else: show_help("Don't know what you mean by %r" % info) return ERR return OK
def run_test_suite(args): enable_coverage = not args.no_coverage tests = args.tests if enable_coverage: cov = Coverage(config_file=True) cov.erase() cov.start() settings.configure( DEBUG=True, USE_TZ=True, TIME_ZONE="UTC", SITE_ID=1, DATABASES={ "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": "djstripe", "USER": "******", "PASSWORD": "", "HOST": "localhost", "PORT": "", }, }, TEMPLATES=[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', ], }, }, ], ROOT_URLCONF="tests.test_urls", INSTALLED_APPS=[ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "jsonfield", "djstripe", "tests", "tests.apps.testapp" ], MIDDLEWARE=( "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware" ), STRIPE_PUBLIC_KEY=os.environ.get("STRIPE_PUBLIC_KEY", ""), STRIPE_SECRET_KEY=os.environ.get("STRIPE_SECRET_KEY", ""), DJSTRIPE_PLANS={ "test0": { "stripe_plan_id": "test_id_0", "name": "Test Plan 0", "description": "A test plan", "price": 1000, # $10.00 "currency": "usd", "interval": "month" }, "test": { "stripe_plan_id": "test_id", "name": "Test Plan 1", "description": "Another test plan", "price": 2500, # $25.00 "currency": "usd", "interval": "month" }, "test2": { "stripe_plan_id": "test_id_2", "name": "Test Plan 2", "description": "Yet Another test plan", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_deletion": { "stripe_plan_id": "test_id_3", "name": "Test Plan 3", "description": "Test plan for deletion.", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_trial": { "stripe_plan_id": "test_id_4", "name": "Test Plan 4", "description": "Test plan for trails.", "price": 7000, # $70.00 "currency": "usd", "interval": "month", "trial_period_days": 7 }, "unidentified_test_plan": { "name": "Unidentified Test Plan", "description": "A test plan with no ID.", "price": 2500, # $25.00 "currency": "usd", "interval": "month" } }, DJSTRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS=( "(admin)", "test_url_name", "testapp_namespaced:test_url_namespaced", "fn:/test_fnmatch*" ), DJSTRIPE_USE_NATIVE_JSONFIELD=os.environ.get("USE_NATIVE_JSONFIELD", "") == "1", ) # Avoid AppRegistryNotReady exception # http://stackoverflow.com/questions/24793351/django-appregistrynotready if hasattr(django, "setup"): django.setup() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="dj-stripe", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run sys.stdout.write(colored(text="Step 1: Running unit tests.\n\n", color="yellow", attrs=["bold"])) # Hack to reset the global argv before nose has a chance to grab it # http://stackoverflow.com/a/1718407/1834570 args = sys.argv[1:] sys.argv = sys.argv[0:1] from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner(verbosity=1, keepdb=True, failfast=True) failures = test_runner.run_tests(tests) if failures: sys.exit(failures) if enable_coverage: # Announce coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results.\n\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: sys.stderr.write(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results [SKIPPED].", color="yellow", attrs=["bold"])) # Announce success if enable_coverage: sys.stdout.write(colored(text="\nTests completed successfully with no errors. Congrats!\n", color="green", attrs=["bold"])) else: sys.stdout.write(colored(text="\nTests completed successfully, but some step(s) were skipped!\n", color="green", attrs=["bold"])) sys.stdout.write(colored(text="Don't push without running the skipped step(s).\n", color="red", attrs=["bold"]))