def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None, profile=False, coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None, skip_test_records=False, skip_before_tests=False, failfast=False): "Run tests" import frappe.test_runner tests = test site = get_site(context) frappe.init(site=site) frappe.flags.skip_before_tests = skip_before_tests frappe.flags.skip_test_records = skip_test_records if coverage: # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe') cov = Coverage(source=[source_path], omit=['*.html', '*.js', '*.xml', '*.css', '*/doctype/*/*_dashboard.py', '*/patches/*']) cov.start() ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force, profile=profile, junit_xml_output=junit_xml_output, ui_tests = ui_tests, doctype_list_path = doctype_list_path, failfast=failfast) if coverage: cov.stop() cov.save() if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 if os.environ.get('CI'): sys.exit(ret)
def run_tests(self): " Run coverage on unit test " # need to import here cause we are in a venv import six from coverage import Coverage coverage = Coverage() coverage.start() # Purge modules under test from sys.modules. The test loader will # re-import them from the build location. Required when 2to3 is used # with namespace packages. if six.PY3 and getattr(self.distribution, 'use_2to3', False): module = self.test_suite.split('.')[0] if module in _namespace_packages: del_modules = [] if module in sys.modules: del_modules.append(module) module += '.' for name in sys.modules: if name.startswith(module): del_modules.append(name) list(map(sys.modules.__delitem__, del_modules)) unittest_main( None, None, self._argv, testLoader=self._resolve_as_ep(self.test_loader), testRunner=self._resolve_as_ep(self.test_runner), exit=False, ) coverage.stop() coverage.save() coverage.report(show_missing=False)
def run_tests(self): from illume import config from coverage import Coverage config.setenv("test") from pytest import main from illume.util import remove_or_ignore_dir from logging import basicConfig, DEBUG basicConfig(level=DEBUG, filename="illume-test.log") project_root = config.get("PROJECT_ROOT") data_dir = config.get("DATA_DIR") cov_config_dir = os.path.join(project_root, '.coveagerc') cov = Coverage(config_file=cov_config_dir) # Remvoe data directory in case tests failed to complete last time. remove_or_ignore_dir(data_dir) cov.start() exit_code = main(shlex.split(self.pytest_args or "")) cov.stop() cov.xml_report() # Remove data directory if tests passed successfully. Keep it around # if tests failed so the developer can troubleshoot the problem. if exit_code == 0: remove_or_ignore_dir(data_dir) sys.exit(exit_code)
def test(config, tests=(), fail_fast=False, with_coverage=True, with_lint=True): if tests: num_tests = len(tests) s = '' if num_tests == 1 else 's' printer.header('Running {num_tests} test{s}...'.format_map(locals())) else: coverage_message = ' with coverage' if with_coverage else '' printer.header('Running tests{coverage_message}...'.format_map( locals())) runner = unittest.TextTestRunner(failfast=fail_fast) loader = unittest.TestLoader() if with_coverage: from coverage import Coverage coverage = Coverage(source=['runcommands']) coverage.start() if tests: for name in tests: runner.run(loader.loadTestsFromName(name)) else: tests = loader.discover('.') result = runner.run(tests) if not result.errors: if with_coverage: coverage.stop() coverage.report() if with_lint: printer.header('Checking for lint...') lint(config)
class CodeCoverage(): def __init__(self, with_coverage, app): self.with_coverage = with_coverage self.app = app or 'frappe' def __enter__(self): if self.with_coverage: import os from coverage import Coverage from frappe.utils import get_bench_path # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', self.app) omit = STANDARD_EXCLUSIONS[:] if self.app == 'frappe': omit.extend(FRAPPE_EXCLUSIONS) self.coverage = Coverage(source=[source_path], omit=omit, include=STANDARD_INCLUSIONS) self.coverage.start() def __exit__(self, exc_type, exc_value, traceback): if self.with_coverage: self.coverage.stop() self.coverage.save() self.coverage.xml_report()
def run(self) -> int: coverage = Coverage() coverage.start() exit_code = main(['-v', 'tests']) coverage.stop() # Early exit if pytest failed if exit_code != 0: return 0 # Generate xml report in StringIO file coverage.get_data() coverage.config.from_args( ignore_errors=None, report_omit=None, report_include=None, xml_output=None, ) data = StringIO() reporter = XmlReporter(coverage, coverage.config) reporter.report(None, data) data.seek(0) # Check diff cover compared to origin/master using xml score = generate_coverage_report(coverage_xml=[data], compare_branch='origin/master', exclude=self.exclude_paths) return score
def report(testSuite): cov = Coverage(branch=True) cov.start() for test_case in testSuite: TriangleApp.TriangleTester(test_case[1][0], test_case[1][1], test_case[1][2]) cov.stop() cov.report(file=open('./tmp/results.txt', 'w'), show_missing=True) f = open('./tmp/results.txt', 'r') lines = f.readlines() imp_elm = [] br_not_cover = 0 elements = lines[2].split(' ') for e in elements: if e != '' and e[0].isdigit(): e = e.strip(',') e = e.strip('\n') e = e.strip('%') imp_elm.append(e) for i in range(5, len(imp_elm)): if imp_elm[i].find('->') != -1: br_not_cover = br_not_cover + 1 coverage_report = {'stmt': int(imp_elm[0]), 'miss': int(imp_elm[1]), 'branch': int(imp_elm[2]), 'br_par': int(imp_elm[3]), 'cover': int(imp_elm[4]), 'br_not_cover': br_not_cover} return coverage_report
def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django-graphql.settings') try: command = sys.argv[1] except IndexError: command = "help" running_tests = (command == 'test') if running_tests: from coverage import Coverage cov = Coverage() cov.erase() cov.start() try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv) if running_tests: cov.stop() cov.save() covered = cov.report() if covered < 100: sys.exit(1)
def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings') try: from django.core.management import execute_from_command_line from django.conf import settings # MyProject Customization: run coverage.py around tests automatically running_tests = (sys.argv[1] == 'test') if running_tests: from coverage import Coverage cov = Coverage() cov.erase() cov.start() if settings.DEBUG: if os.environ.get('RUN_MAIN') or os.environ.get('WERKZEUG_RUN_MAIN'): import ptvsd ptvsd.enable_attach(address = ('0.0.0.0', 3500)) print("Attached remote debugger") except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if running_tests: cov.stop() cov.save() covered = cov.report() if covered < 100: sys.exit(1)
class TestsSuite: CONFIG_FILE = ".coveragerc" is_tests_suite_run: bool = False console_log_level: LogLevelEnum = LogLevelEnum.INFO file_log_level: LogLevelEnum = LogLevelEnum.DEBUG log_file_path: str = None __coverage: Coverage = None __is_coverage: bool def __init__(self, is_coverage: bool = False): self.__coverage = Coverage(config_file=self.CONFIG_FILE) self.__is_coverage = is_coverage TestsSuite.is_tests_suite_run = True init_logger(self.console_log_level, self.file_log_level) def run_tests(self): if self.__is_coverage: self.__coverage.start() tests = unittest.TestLoader().discover(start_dir='.', pattern='*_test.py') unittest.TextTestRunner(verbosity=2).run(tests) if self.__is_coverage: self.__coverage.stop() def create_report(self): self.__coverage.report() self.__coverage.json_report() self.__coverage.html_report() def erase_data(self): self.__coverage.erase()
def run_coverage(self, test_name): cov = Coverage(source=self.config.source, omit=self.config.omit) cov.start() self.map_test(test_name) cov.stop() return cov.get_data()
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None, profile=False, coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None, skip_test_records=False, skip_before_tests=False, failfast=False): "Run tests" import frappe.test_runner tests = test site = get_site(context) frappe.init(site=site) frappe.flags.skip_before_tests = skip_before_tests frappe.flags.skip_test_records = skip_test_records if coverage: # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe') cov = Coverage(source=[source_path], omit=['*.html', '*.js', '*.css']) cov.start() ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force, profile=profile, junit_xml_output=junit_xml_output, ui_tests = ui_tests, doctype_list_path = doctype_list_path, failfast=failfast) if coverage: cov.stop() cov.save() if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 if os.environ.get('CI'): sys.exit(ret)
class my_test(unittest.TestCase): def setUp(self): if not os.path.exists(self._testMethodName): os.system('mkdir %s' % self._testMethodName) # 这里依赖于命名规范,include指定要测试的文件 if self._testMethodName.startswith("test_"): include_name = self._testMethodName[5:] self.cov = Coverage(include=['comm.py', '%s.py' % include_name]) self.cov.start() return super().setUp() def test_add(self): cases = deal_case(read_file("add_test.txt", is_line=True)) for case in cases: result = add(case[0], case[1]) if 0 == result['code']: result = result['result'] self.assertEqual(result, float(case[2])) elif 1 == result['code']: result = result['msg'] self.assertEqual(result, case[2]) def tearDown(self): self.cov.stop() self.cov.save() self.cov.html_report(directory='%s' % self._testMethodName) self.cov.erase() return super().tearDown()
def test(*tests, coverage=True, verbose=False, fail_fast=False): top_level_dir = Path.cwd() where = top_level_dir for segment in top_level_dir.name.split('.'): where = where / segment coverage = coverage and not tests verbosity = 2 if verbose else 1 if coverage: from coverage import Coverage cover = Coverage(branch=True, source=[where]) cover.start() loader = unittest.TestLoader() if tests: suite = loader.loadTestsFromNames(tests) else: suite = loader.discover(where, top_level_dir=top_level_dir) runner = unittest.TextTestRunner(verbosity=verbosity, failfast=fail_fast) runner.run(suite) if coverage: cover.stop() cover.report()
def run_tests(): config = utils.get_config() cov_conf_file = config.get('file_locations', 'coverage_in_conf', fallback='config/.coveragerc') cov_out_file = config.get('file_locations', 'coverage_out_dir', fallback='coverage') cov = Coverage(config_file=cov_conf_file) cov.start() result = grab_test_results() if not result.wasSuccessful(): print("Error running unit tests ...") exit(1) cov.stop() cov.save() print('Coverage Summary:') cov.report() cov.html_report(directory=cov_out_file) print('HTML version: file://{0}/{1}/index.html'.format( os.getcwd(), cov_out_file)) cov.erase() exit(0)
def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings') # MyMoney Customization: run coverage.py around tests automatically try: command = sys.argv[1] except IndexError: command = "help" running_tests = (command == 'test') if running_tests: from coverage import Coverage cov = Coverage() cov.erase() cov.start() try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv) if running_tests: cov.stop() cov.save() covered = cov.report() if covered < 100: sys.exit(0)
def test(package, coverage=True, tests=(), verbose=False, fail_fast=False): cwd = os.getcwd() where = os.path.join(cwd, package.replace('.', os.sep)) top_level_dir = cwd coverage = coverage and not tests verbosity = 2 if verbose else 1 if coverage: from coverage import Coverage cover = Coverage(branch=True, source=[where]) cover.start() loader = unittest.TestLoader() if tests: suite = loader.loadTestsFromNames(tests) else: suite = loader.discover(where, top_level_dir=top_level_dir) runner = unittest.TextTestRunner(verbosity=verbosity, failfast=fail_fast) runner.run(suite) if coverage: cover.stop() cover.report()
def _run_avocado(runnable, queue): try: # This assumes that a proper resolution (see resolver module) # was performed, and that a URI contains: # 1) path to python module # 2) class # 3) method # # To be defined: if the resolution uri should be composed like # this, or broken down and stored into other data fields module_path, klass_method = runnable.uri.split(":", 1) klass, method = klass_method.split(".", 1) params = AvocadoInstrumentedTestRunner._create_params(runnable) result_dir = runnable.output_dir or tempfile.mkdtemp(prefix=".avocado-task") test_factory = [ klass, { "name": TestID(1, runnable.uri, runnable.variant), "methodName": method, "config": runnable.config, "modulePath": module_path, "params": params, "tags": runnable.tags, "run.results_dir": result_dir, }, ] messages.start_logging(runnable.config, queue) if "COVERAGE_RUN" in os.environ: from coverage import Coverage coverage = Coverage() coverage.start() instance = loader.load_test(test_factory) early_state = instance.get_state() early_state["type"] = "early_state" queue.put(early_state) instance.run_avocado() if "COVERAGE_RUN" in os.environ: coverage.stop() coverage.save() state = instance.get_state() fail_reason = state.get("fail_reason") queue.put(messages.WhiteboardMessage.get(state["whiteboard"])) queue.put( messages.FinishedMessage.get( state["status"].lower(), fail_reason=fail_reason ) ) except Exception as e: queue.put(messages.StderrMessage.get(traceback.format_exc())) queue.put(messages.FinishedMessage.get("error", fail_reason=str(e)))
def coverage(): from coverage import Coverage cover = Coverage() cover.start() failures = run() cover.stop() cover.save() cover.html_report() return failures
def _bootstrap(self): from coverage import Coverage cov = Coverage(data_suffix=True) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def _run_avocado(runnable, queue): try: # This assumes that a proper resolution (see resolver module) # was performed, and that a URI contains: # 1) path to python module # 2) class # 3) method # # To be defined: if the resolution uri should be composed like # this, or broken down and stored into other data fields module_path, klass_method = runnable.uri.split(':', 1) klass, method = klass_method.split('.', 1) params = AvocadoInstrumentedTestRunner._create_params(runnable) result_dir = (runnable.output_dir or tempfile.mkdtemp(prefix=".avocado-task")) test_factory = [ klass, { 'name': TestID(1, runnable.uri), 'methodName': method, 'config': runnable.config, 'modulePath': module_path, 'params': params, 'tags': runnable.tags, 'run.results_dir': result_dir, } ] messages.start_logging(runnable.config, queue) if 'COVERAGE_RUN' in os.environ: from coverage import Coverage coverage = Coverage() coverage.start() instance = loader.load_test(test_factory) early_state = instance.get_state() early_state['type'] = "early_state" queue.put(early_state) instance.run_avocado() if 'COVERAGE_RUN' in os.environ: coverage.stop() coverage.save() state = instance.get_state() fail_reason = state.get('fail_reason') queue.put(messages.WhiteboardMessage.get(state['whiteboard'])) queue.put( messages.FinishedMessage.get(state['status'].lower(), fail_reason=fail_reason)) except Exception as e: queue.put(messages.StderrMessage.get(traceback.format_exc())) queue.put(messages.FinishedMessage.get('error', fail_reason=str(e)))
def run(self): cov = Coverage(data_suffix=True) cov._warn_no_data = True cov._warn_unimported_source = True cov.start() try: super().run() finally: cov.stop() cov.save()
def run_with_coverage(run): from coverage import Coverage coverage = Coverage() coverage.start() apply(run) coverage.end() coverage.result(['../core/%s' % prefix,])
def coverage_analysis(): cov = Coverage() cov.start() try: yield finally: cov.stop() cov.report() print("------ SAVING COVERAGE REPORTS ------ ") cov.xml_report(outfile=os.path.join(".", 'cobertura.xml'))
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage cov = Coverage(data_suffix=True) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def main( ctx, port: int, config: str, addresses: str, version: bool, report_coverage: bool ) -> None: """run the relay server""" # silence warnings from urllib3, see github issue 246 logging.getLogger("urllib3.connectionpool").setLevel(logging.CRITICAL) logger.info("Starting relay server version %s", get_version()) if report_coverage: coverage = Coverage() coverage.start() try: config_dict = load_config(config) except ValidationError as error: logger.error("Validation error in config: " + validation_error_string(error)) sys.exit(1) configure_logging(config_dict) sentry_config = config_dict.get("sentry", None) if sentry_config is not None: sentry_sdk.init( dsn=sentry_config["dsn"], integrations=[sentry_sdk.integrations.flask.FlaskIntegration()], ) if addresses is None: addresses = config_dict["relay"]["addresses_filepath"] trustlines = TrustlinesRelay(config=config_dict, addresses_json_path=addresses) trustlines.start() rest_config = config_dict["rest"] if port is None: port = rest_config["port"] host = rest_config["host"] ipport = (host, port) app = ApiApp(trustlines, enabled_apis=select_enabled_apis(config_dict)) http_server = WSGIServer(ipport, app, log=None, handler_class=WebSocketHandler) if report_coverage: def shutdown(code, frame): logger.info("Relay server is shutting down ...") http_server.stop(timeout=60) coverage.stop() coverage.xml_report(outfile="/end2end-coverage/coverage.xml") exit(signal.SIGTERM) signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGQUIT, shutdown) signal.signal(signal.SIGINT, shutdown) logger.info("Server is running on {}".format(ipport)) http_server.serve_forever()
def main(): """ The main function, mainly functioning to do the main functional work (thanks pylint) """ if len(sys.argv) > 1 and sys.argv[1] == 'cover': # FIXME - there are enough args now to need an arg parser cover = Coverage( branch=True, auto_data=True, omit=[ "/usr/share/pyshared/*", "/usr/lib/python3/dist-packages/*", ], ) min_percent = 0 if len(sys.argv) > 2: min_percent = float(sys.argv[2]) else: cover = False loader = unittest.defaultTestLoader runner = unittest.TextTestRunner(verbosity=2) if cover: cover.erase() cover.start() tests = loader.discover('.') # If we ever drop libraries into the 'lib' subdir defined in the above # sys.path.insert then we will need to discover their tests and add # them separately with the following: tests_lib = loader.discover('lib', top_level_dir='lib') tests.addTests(tests_lib) result = runner.run(tests) if cover: cover.stop() # the debian coverage package didnt include jquery.debounce.min.js # (and additionally, that thing is not packaged for debian elsewhere) try: cover.html_report() except Exception: pass percent = cover.report(show_missing=True) if min_percent > percent: err_fmt = "The coverage ({:.1f}% reached) fails to reach the minimum required ({}%)\n" # noqa sys.stderr.write(err_fmt.format(percent, min_percent)) exit(1) if not result.wasSuccessful(): exit(1)
class CoverageContext(object): def __init__(self): from coverage import Coverage self._cov = Coverage(cover_pylib=False) self._cov.start() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self._cov.stop()
def start(self, with_uuid=True): # Prevent starting when already forked and wait wasn't called. if self.pid != 0: raise AlreadyRunning, 'Already running with PID %r' % (self.pid, ) command = self.command args = [] self.with_uuid = with_uuid for arg, param in self.arg_dict.iteritems(): args.append('--' + arg) if param is not None: args.append(str(param)) if with_uuid: args += '--uuid', str(self.uuid) global coverage if coverage: cls = self.__class__ cls._coverage_index += 1 coverage_data_path = cls._coverage_prefix + str(cls._coverage_index) self._coverage_fd, w = os.pipe() def save_coverage(*args): if coverage: coverage.stop() coverage.save() if args: os.close(w) os.kill(os.getpid(), signal.SIGSTOP) self.pid = logging.fork() if self.pid: # Wait that the signal to kill the child is set up. os.close(w) os.read(self._coverage_fd, 1) if coverage: coverage.neotestrunner.append(coverage_data_path) else: # Child try: signal.signal(signal.SIGTERM, lambda *args: sys.exit()) if coverage: coverage.stop() from coverage import Coverage coverage = Coverage(coverage_data_path) coverage.start() signal.signal(signal.SIGUSR2, save_coverage) os.close(self._coverage_fd) os.write(w, '\0') sys.argv = [command] + args getattr(neo.scripts, command).main() status = 0 except SystemExit, e: status = e.code if status is None: status = 0 except KeyboardInterrupt: status = 1
def run_tests(context, app=None, module=None, doctype=None, test=(), profile=False, coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None, skip_test_records=False, skip_before_tests=False, failfast=False): "Run tests" import frappe.test_runner tests = test site = get_site(context) allow_tests = frappe.get_conf(site).allow_tests if not (allow_tests or os.environ.get('CI')): click.secho('Testing is disabled for the site!', bold=True) click.secho('You can enable tests by entering following command:') click.secho('bench --site {0} set-config allow_tests true'.format(site), fg='green') return frappe.init(site=site) frappe.flags.skip_before_tests = skip_before_tests frappe.flags.skip_test_records = skip_test_records if coverage: from coverage import Coverage # Generate coverage report only for app that is being tested source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe') cov = Coverage(source=[source_path], omit=[ '*.html', '*.js', '*.xml', '*.css', '*.less', '*.scss', '*.vue', '*/doctype/*/*_dashboard.py', '*/patches/*' ]) cov.start() ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force, profile=profile, junit_xml_output=junit_xml_output, ui_tests=ui_tests, doctype_list_path=doctype_list_path, failfast=failfast) if coverage: cov.stop() cov.save() if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 if os.environ.get('CI'): sys.exit(ret)
def coverage(): if "--coverage" in sys.argv: cover = Coverage(source=["ls.joyous"]) cover.start() failures = run() cover.stop() cover.save() cover.html_report() else: failures = run() return failures
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import rcfile = os.environ[COVERAGE_RCFILE_ENV] cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def run_tests(*here): if Coverage is None: for x in here: _run_tests(x) else: coverage = Coverage() coverage.start() for x in here: _run_tests(x) coverage.stop() coverage.save()
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage rcfile = getattr(multiprocessing, PATCHED_MARKER) cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def run(self): from coverage import Coverage cov = Coverage(source=self.distribution.packages) cov.start() super().run() cov.stop() cov.xml_report() cov.html_report()
def code_coverage(): from coverage import Coverage cov = Coverage(data_suffix="blender") cov.start() print("Code coverage started") try: yield finally: cov.stop() cov.save() print("Code coverage stopped and saved")
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage coverage_config = getattr(multiprocessing, PATCHED_MARKER) coverage_config.parallel = True cov = Coverage() cov.config = coverage_config cov.start() try: return original_bootstrap(self) finally: cov.stop() cov.save()
def run(self): import pytest cov = Coverage() cov.erase() cov.start() result = pytest.main() cov.stop() cov.save() cov.html_report(directory="covhtml") sys.exit(int(bool(len(result.failures) > 0 or len(result.errors) > 0)))
def run_test_suite(): cov = Coverage(config_file=True) cov.erase() cov.start() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="python-doc-inherit", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run print(colored(text="Step 1: Running unit tests.\n", color="yellow", attrs=["bold"])) test_suite = TestLoader().discover(str(Path("tests").absolute())) result = TextTestRunner(verbosity=1).run(test_suite) if not result.wasSuccessful(): sys.exit(len(result.failures) + len(result.errors)) # Announce coverage run print(colored(text="\nStep 2: Generating coverage results.\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: print(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: print("pep8 errors detected.") print(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") # Announce success print(colored(text="\nTests completed successfully with no errors. Congrats!", color="green", attrs=["bold"]))
def with_coverage(f, source, *, report=True, data=False): cov = Coverage(source=[source]) cov.start() try: exit_code = f() finally: cov.stop() if not exit_code: if report: print() # Print blank line. cov.report(show_missing=False) cov.html_report() if data: cov.save() return exit_code
def main(): cov = Coverage( omit=[ "*passlib*", "*test*", "*tornado*", "*backports_abc*", "*singledispatch*", "*six*", "*certifi*", "*daemon*", "*funcsigs*", "*mock*", "*pbr*", "*pkg_resources*", "*tablib*", ] ) cov.start() from app_test import ApplicationTest from database_test import DatabaseTest from http_test import HTTPTestCase from procinfo_test import ProcInfoTest from user_test import UserTest from token_test import TokenTest from ws_test import WebSocketTestCase from unittest import TestLoader, TextTestRunner, TestSuite loader = TestLoader() suite = TestSuite( ( loader.loadTestsFromTestCase(ProcInfoTest), loader.loadTestsFromTestCase(DatabaseTest), loader.loadTestsFromTestCase(UserTest), loader.loadTestsFromTestCase(TokenTest), loader.loadTestsFromTestCase(HTTPTestCase), loader.loadTestsFromTestCase(WebSocketTestCase), loader.loadTestsFromTestCase(ApplicationTest), ) ) runner = TextTestRunner(verbosity=2) runner.run(suite) cov.stop() cov.save()
def test_does_not_trace_files_outside_inclusion(tmpdir, branch, timid): @given(st.booleans()) def test(a): rnd() cov = Coverage( config_file=False, data_file=str(tmpdir.join('.coverage')), branch=branch, timid=timid, include=[__file__], ) cov._warn = escalate_warning cov.start() test() cov.stop() data = cov.get_data() assert len(list(data.measured_files())) == 1
def test_achieves_full_coverage(tmpdir, branch, timid): @given(st.booleans(), st.booleans(), st.booleans()) def test(a, b, c): some_function_to_test(a, b, c) cov = Coverage( config_file=False, data_file=str(tmpdir.join('.coverage')), branch=branch, timid=timid, ) cov._warn = escalate_warning cov.start() test() cov.stop() data = cov.get_data() lines = data.lines(__file__) for i in hrange(LINE_START + 1, LINE_END + 1): assert i in lines
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import cov = Coverage(data_suffix=True) cov._warn_preimported_source = False cov.start() debug = cov._debug try: if debug.should("multiproc"): debug.write("Calling multiprocessing bootstrap") return original_bootstrap(self) finally: if debug.should("multiproc"): debug.write("Finished multiprocessing bootstrap") cov.stop() cov.save() if debug.should("multiproc"): debug.write("Saved multiprocessing data")
def _bootstrap(self): """Wrapper around _bootstrap to start coverage.""" from coverage import Coverage # avoid circular import rcfile = os.environ[COVERAGE_RCFILE_ENV] cov = Coverage(data_suffix=True, config_file=rcfile) cov.start() debug = cov.debug try: if debug.should("multiproc"): debug.write("Calling multiprocessing bootstrap") return original_bootstrap(self) finally: if debug.should("multiproc"): debug.write("Finished multiprocessing bootstrap") cov.stop() cov.save() if debug.should("multiproc"): debug.write("Saved multiprocessing data")
def main(): """ The main function, mainly functioning to do the main functional work (thanks pylint) """ if len(sys.argv) > 1 and sys.argv[1] == 'cover': # FIXME - there are enough args now to need an arg parser cover = Coverage(branch=True, auto_data=True) min_percent = 0 if len(sys.argv) > 2: min_percent = float(sys.argv[2]) else: cover = False loader = unittest.defaultTestLoader runner = unittest.TextTestRunner(verbosity=2) if cover: cover.erase() cover.start() tests = loader.discover('.') tests_lib = loader.discover('lib', top_level_dir='lib') tests.addTests(tests_lib) result = runner.run(tests) if cover: cover.stop() # the debian coverage package didnt include jquery.debounce.min.js # (and additionally, that thing is not packaged for debian elsewhere) try: cover.html_report() except: pass percent = cover.report(show_missing=True) if min_percent > percent: print("The coverage ({:.1f}% reached) fails to reach the " "minimum required ({}%)\n".format(percent, min_percent)) exit(1) if not result.wasSuccessful(): exit(1)
def run_unit_tests(): from datetime import datetime from coverage import Coverage from io import StringIO import unittest from tests import testModels w = StringIO() cov = Coverage(omit=["/usr/*", "*/venv/*", "*-packages*"]) cov.start() runner = unittest.TextTestRunner(stream=w) runner.run(unittest.makeSuite(testModels)) cov.stop() cov.report(file=w) output = w.getvalue() db.app = app return ("You ran the tests on: " + datetime.now().strftime("%I:%M%p on %B %d, %Y") + " GMT\n" + output)
class AutotestConfig(AppConfig): name = 'autotest' def __init__(self, *args, **kwargs): super(AutotestConfig, self).__init__(*args, **kwargs) self.coverage = None def coverage_start(self): if coverage and not self.coverage: self.coverage = Coverage() self.coverage.start() return self.coverage def coverage_report(self): if coverage and self.coverage: self.coverage.stop() coverage.stop() self.coverage.get_data().update(coverage.get_data()) self.coverage.html_report()
def stmtCovered(code, test) : # Replace input lines with values code = feedInput(code, test) # Initialize the coverage object cov = Coverage() # Create a temporary file with the code fname = "tmp"+str(int(random()*100000)) f = open(fname, 'w') f.write("\n".join(code)) f.close() # Capture statement coverage print("Test case is: "+str(test)) cov.start() out = execfile(fname) cov.stop() # Obtain the report of coverage and remove the temporary file report = cov.analysis(fname) os.remove(fname) # Return the executed line numbers return set(range(1, len(code)+1)) - set(report[2])
def run(): print "running all tests for core" from coverage import Coverage coverage = Coverage() coverage.start() #run() sys.stdout.write("Test of Server... control-c to continue other tests.\n") sys.stdout.flush() from test.redfoot.server import all from test.redfoot.rdf.syntax import test from test.redfoot.rdf.syntax import test2 from test.redfoot.rdf.store import test sys.settrace(None)#coverage.end() import os contains = os.path.normpath('redfoot') print "CONTAINS" + contains coverage.result([contains,], open('coverage.html', 'w'))
class CodeCoverage(object): """ Code Coverage radish extension """ OPTIONS = [ ("--with-coverage", "enable code coverage"), ("--cover-packages=<cover_packages>", "specify source code package") ] LOAD_IF = staticmethod(lambda config: config.with_coverage) LOAD_PRIORITY = 70 def __init__(self): before.all(self.coverage_start) after.all(self.coverage_stop) if world.config.cover_packages: cover_packages = world.config.cover_packages.split(",") else: cover_packages = [] self.coverage = Coverage(source=cover_packages) def coverage_start(self, features, marker): """ Start the coverage measurement """ self.coverage.load() self.coverage.start() def coverage_stop(self, features, marker): """ Stop the coverage measurement and create report """ self.coverage.stop() self.coverage.save() self.coverage.report(file=sys.stdout)
#!/usr/bin/env python # -*- coding: UTF-8 -*- from flask_script import Manager, Shell, Server from flask_migrate import Migrate, MigrateCommand from unittest import TestLoader, TextTestRunner from coverage import Coverage from datetime import datetime import os import sys cov = None if os.environ.get('FLASK_COVERAGE'): cov = Coverage(branch=True, include=['app/*']) cov.start() from app import create_app, db app = create_app() manager = Manager(app) migrate = Migrate(app, db) def make_shell_context(): return dict(app=app, db=db) class GeventServer(Server): help = description = 'Runs the Flask development gevent server.'
def run_test_suite(args): enable_coverage = not args.no_coverage tests = args.tests if enable_coverage: cov = Coverage(config_file=True) cov.erase() cov.start() settings.configure( DEBUG=True, USE_TZ=True, TIME_ZONE="UTC", SITE_ID=1, DATABASES={ "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": "djstripe", "USER": "******", "PASSWORD": "", "HOST": "localhost", "PORT": "", }, }, TEMPLATES=[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', ], }, }, ], ROOT_URLCONF="tests.test_urls", INSTALLED_APPS=[ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "jsonfield", "djstripe", "tests", "tests.apps.testapp" ], MIDDLEWARE=( "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware" ), STRIPE_PUBLIC_KEY=os.environ.get("STRIPE_PUBLIC_KEY", ""), STRIPE_SECRET_KEY=os.environ.get("STRIPE_SECRET_KEY", ""), DJSTRIPE_PLANS={ "test0": { "stripe_plan_id": "test_id_0", "name": "Test Plan 0", "description": "A test plan", "price": 1000, # $10.00 "currency": "usd", "interval": "month" }, "test": { "stripe_plan_id": "test_id", "name": "Test Plan 1", "description": "Another test plan", "price": 2500, # $25.00 "currency": "usd", "interval": "month" }, "test2": { "stripe_plan_id": "test_id_2", "name": "Test Plan 2", "description": "Yet Another test plan", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_deletion": { "stripe_plan_id": "test_id_3", "name": "Test Plan 3", "description": "Test plan for deletion.", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_trial": { "stripe_plan_id": "test_id_4", "name": "Test Plan 4", "description": "Test plan for trails.", "price": 7000, # $70.00 "currency": "usd", "interval": "month", "trial_period_days": 7 }, "unidentified_test_plan": { "name": "Unidentified Test Plan", "description": "A test plan with no ID.", "price": 2500, # $25.00 "currency": "usd", "interval": "month" } }, DJSTRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS=( "(admin)", "test_url_name", "testapp_namespaced:test_url_namespaced", "fn:/test_fnmatch*" ), DJSTRIPE_USE_NATIVE_JSONFIELD=os.environ.get("USE_NATIVE_JSONFIELD", "") == "1", ) # Avoid AppRegistryNotReady exception # http://stackoverflow.com/questions/24793351/django-appregistrynotready if hasattr(django, "setup"): django.setup() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="dj-stripe", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run sys.stdout.write(colored(text="Step 1: Running unit tests.\n\n", color="yellow", attrs=["bold"])) # Hack to reset the global argv before nose has a chance to grab it # http://stackoverflow.com/a/1718407/1834570 args = sys.argv[1:] sys.argv = sys.argv[0:1] from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner(verbosity=1, keepdb=True, failfast=True) failures = test_runner.run_tests(tests) if failures: sys.exit(failures) if enable_coverage: # Announce coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results.\n\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: sys.stderr.write(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results [SKIPPED].", color="yellow", attrs=["bold"])) # Announce success if enable_coverage: sys.stdout.write(colored(text="\nTests completed successfully with no errors. Congrats!\n", color="green", attrs=["bold"])) else: sys.stdout.write(colored(text="\nTests completed successfully, but some step(s) were skipped!\n", color="green", attrs=["bold"])) sys.stdout.write(colored(text="Don't push without running the skipped step(s).\n", color="red", attrs=["bold"]))
def run_test_suite(args): skip_utc = args.skip_utc enable_coverage = not args.no_coverage enable_pep8 = not args.no_pep8 if enable_coverage: cov = Coverage(config_file=True) cov.erase() cov.start() settings.configure( DJSTRIPE_TESTS_SKIP_UTC=skip_utc, TIME_ZONE='America/Los_Angeles', DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": "djstripe", "USER": "", "PASSWORD": "", "HOST": "", "PORT": "", }, }, ROOT_URLCONF="tests.test_urls", INSTALLED_APPS=[ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "jsonfield", "djstripe", "tests", "tests.apps.testapp" ], MIDDLEWARE_CLASSES=( "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware" ), SITE_ID=1, STRIPE_PUBLIC_KEY=os.environ.get("STRIPE_PUBLIC_KEY", ""), STRIPE_SECRET_KEY=os.environ.get("STRIPE_SECRET_KEY", ""), DJSTRIPE_PLANS={ "test0": { "stripe_plan_id": "test_id_0", "name": "Test Plan 0", "description": "A test plan", "price": 1000, # $10.00 "currency": "usd", "interval": "month" }, "test": { "stripe_plan_id": "test_id", "name": "Test Plan 1", "description": "Another test plan", "price": 2500, # $25.00 "currency": "usd", "interval": "month" }, "test2": { "stripe_plan_id": "test_id_2", "name": "Test Plan 2", "description": "Yet Another test plan", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_deletion": { "stripe_plan_id": "test_id_3", "name": "Test Plan 3", "description": "Test plan for deletion.", "price": 5000, # $50.00 "currency": "usd", "interval": "month" }, "test_trial": { "stripe_plan_id": "test_id_4", "name": "Test Plan 4", "description": "Test plan for trails.", "price": 7000, # $70.00 "currency": "usd", "interval": "month", "trial_period_days": 7 }, "unidentified_test_plan": { "name": "Unidentified Test Plan", "description": "A test plan with no ID.", "price": 2500, # $25.00 "currency": "usd", "interval": "month" } }, DJSTRIPE_PLAN_HIERARCHY = { "bronze": { "level": 1, "plans": [ "test0", "test", ] }, "silver": { "level": 2, "plans": [ "test2", "test_deletion", ] }, "gold": { "level": 3, "plans": [ "test_trial", "unidentified_test_plan", ] }, }, DJSTRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS=( "(admin)", "test_url_name", "testapp_namespaced:test_url_namespaced", ), ) # Avoid AppRegistryNotReady exception # http://stackoverflow.com/questions/24793351/django-appregistrynotready if hasattr(django, "setup"): django.setup() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="dj-stripe", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run sys.stdout.write(colored(text="Step 1: Running unit tests.\n\n", color="yellow", attrs=["bold"])) # Hack to reset the global argv before nose has a chance to grab it # http://stackoverflow.com/a/1718407/1834570 args = sys.argv[1:] sys.argv = sys.argv[0:1] from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner(verbosity=1) failures = test_runner.run_tests(["."]) if failures: sys.exit(failures) if enable_coverage: # Announce coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results.\n\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: sys.stderr.write(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 2: Generating coverage results [SKIPPED].", color="yellow", attrs=["bold"])) if enable_pep8: # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: sys.stderr.write("pep8 errors detected.\n") sys.stderr.write(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") else: # Announce disabled coverage run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors [SKIPPED].\n", color="yellow", attrs=["bold"])) # Announce success if enable_coverage and enable_pep8: sys.stdout.write(colored(text="\nTests completed successfully with no errors. Congrats!\n", color="green", attrs=["bold"])) else: sys.stdout.write(colored(text="\nTests completed successfully, but some step(s) were skipped!\n", color="green", attrs=["bold"])) sys.stdout.write(colored(text="Don't push without running the skipped step(s).\n", color="red", attrs=["bold"]))
from django.apps import AppConfig try: from coverage import Coverage coverage = Coverage() coverage.start() except ImportError: coverage = None class AutotestConfig(AppConfig): name = 'autotest' def __init__(self, *args, **kwargs): super(AutotestConfig, self).__init__(*args, **kwargs) self.coverage = None def coverage_start(self): if coverage and not self.coverage: self.coverage = Coverage() self.coverage.start() return self.coverage def coverage_report(self): if coverage and self.coverage: self.coverage.stop() coverage.stop() self.coverage.get_data().update(coverage.get_data()) self.coverage.html_report()