def pytest(line, cell): with tempfile.TemporaryDirectory() as root: oldcwd = os.getcwd() os.chdir(root) tests_module_path = '{}.py'.format(TEST_MODULE_NAME) try: Path(tests_module_path).write_text(cell) args = shlex.split(line) os.environ['COLUMNS'] = '80' pytest_main(args + [tests_module_path]) if TEST_MODULE_NAME in sys.modules: del sys.modules[TEST_MODULE_NAME] finally: os.chdir(oldcwd)
def pytest(line, cell): with TemporaryDirectory() as root: oldcwd = os.getcwd() os.chdir(root) tests_module_path = '{}.py'.format(TEST_MODULE_NAME) try: with open(tests_module_path, 'w') as test_file: test_file.write(cell) # args = shlex.split(line) os.environ['COLUMNS'] = '80' pytest_main([tests_module_path]) if TEST_MODULE_NAME in sys.modules: del sys.modules[TEST_MODULE_NAME] finally: os.chdir(oldcwd)
def handle(self, *, verbosity, **options): self.prepare_global_state() # '-s' is to see print output # --tb=short is to show short tracebacks. I think this is # more expected and less verbose. # With the default pytest long tracebacks, # often the code that gets printed is in otree-core, which is not relevant. # also, this is better than using --tb=native, which loses line breaks # when a unicode char is contained in the output, and also doesn't get # color coded with colorama, the way short tracebacks do. argv = [otree.bots.runner.__file__, '-s', '--tb', 'short'] if verbosity == 0: argv.append('--quiet') if verbosity == 2: argv.append('--verbose') for k in ['session_config_name', 'num_participants', 'export_path']: v = options[k] if v: argv.extend([f'--{k}', v]) exit_code = pytest_main(argv) if not options['export_path']: logger.info('Tip: Run this command with the --export flag' ' to save the data generated by bots.') # exit with the exit code, so that CI systems can know if # the tests succeeded or failed. sys_exit(exit_code)
def test_pytest(capsys): class CollectResults: def pytest_sessionfinish(self, session): self.session = session results = CollectResults() return_code = pytest_main([join(example_dir, 'docs')], plugins=[results]) assert return_code == 0 assert results.session.testsfailed == 0 assert results.session.testscollected == 3
def run_tests(args): self.print("Running test") # Must be imported within coverage from noc.config import config db_name = "test_%d" % time.time() # Override database names config.pg.db = db_name config.mongo.db = db_name config.clickhouse.db = db_name return pytest_main(args)
def run_tests(args): self.print("Running test") # Must be imported within coverage from noc.config import config if test_db: db_name = test_db else: db_name = "test_%d" % time.time() # Generate unique database name # Override database names config.pg.db = db_name config.mongo.db = db_name config.clickhouse.db = db_name exit_code = pytest_main(args) if idea_bookmarks: self.dump_idea_bookmarks(idea_bookmarks) return exit_code
def test_pytest(capsys): class CollectResults: def pytest_sessionfinish(self, session): self.session = session results = CollectResults() return_code = pytest_main( ['-vvs', join(functional_test_dir, 'pytest')], plugins=[results]) assert return_code == 1 assert results.session.testsfailed == 4 assert results.session.testscollected == 10 out, err = capsys.readouterr() # check we're trimming tracebacks: index = out.find('sybil/example.py') if index > -1: # pragma: no cover raise AssertionError('\n' + out[index - 500:index + 500]) out = Finder(out) out.then_find('fail.rst::line:1,column:1') out.then_find( 'fail.rst::line:1,column:1 sybil setup session_fixture setup\n' 'module_fixture setup\n' 'class_fixture setup\n' 'function_fixture setup\n' 'x is currently: 0\n' 'FAILED function_fixture teardown\n' 'class_fixture teardown') out.then_find('fail.rst::line:6,column:1') out.then_find('fail.rst::line:6,column:1 class_fixture setup\n' 'function_fixture setup\n' '0smcf PASSED function_fixture teardown\n' 'class_fixture teardown') out.then_find('fail.rst::line:8,column:1') out.then_find('fail.rst::line:8,column:1 class_fixture setup\n' 'function_fixture setup\n' '1smcf FAILED function_fixture teardown\n' 'class_fixture teardown') out.then_find('fail.rst::line:10,column:1') out.then_find('fail.rst::line:10,column:1 class_fixture setup\n' 'function_fixture setup\n' '2smcf FAILED function_fixture teardown\n' 'class_fixture teardown') out.then_find('fail.rst::line:12,column:1') out.then_find('fail.rst::line:12,column:1 class_fixture setup\n' 'function_fixture setup\n' '3smcf PASSED function_fixture teardown\n' 'class_fixture teardown') out.then_find('fail.rst::line:14,column:1') out.then_find('fail.rst::line:14,column:1 class_fixture setup\n' 'function_fixture setup\n' 'FAILED function_fixture teardown\n' 'class_fixture teardown\n' 'module_fixture teardown\n' 'sybil teardown 5') out.then_find('pass.rst::line:1,column:1') out.then_find( 'pass.rst::line:1,column:1 sybil setup module_fixture setup\n' 'class_fixture setup\n' 'function_fixture setup\n' '0smcf PASSED function_fixture teardown\n' 'class_fixture teardown') out.then_find('pass.rst::line:3,column:1') out.then_find('pass.rst::line:3,column:1 class_fixture setup\n' 'function_fixture setup\n' '1smcf PASSED function_fixture teardown\n' 'class_fixture teardown') out.then_find('pass.rst::line:5,column:1') out.then_find('pass.rst::line:5,column:1 class_fixture setup\n' 'function_fixture setup\n' '2smcf PASSED function_fixture teardown\n' 'class_fixture teardown') out.then_find('pass.rst::line:7,column:1') out.then_find('pass.rst::line:7,column:1 class_fixture setup\n' 'function_fixture setup\n' '3smcf PASSED function_fixture teardown\n' 'class_fixture teardown\n' 'module_fixture teardown\n' 'sybil teardown 4\n' 'session_fixture teardown') out.then_find('_ fail.rst line=1 column=1 _') out.then_find("> raise Exception('the start!')") out.then_find('_ fail.rst line=8 column=1 _') out.then_find('Y count was 3 instead of 2') out.then_find('fail.rst:8: SybilFailure') out.then_find('_ fail.rst line=10 column=1 _') out.then_find('ValueError: X count was 3 instead of 4') out.then_find('_ fail.rst line=14 column=1 _') out.then_find("> raise Exception('boom!')") out.then_find('fail.rst:18: Exception')
#! /usr/bin/env python import sys if __name__ == '__main__': from pytest import main as pytest_main sys.exit(pytest_main())
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2009-2018 Joshua Bronson. All Rights Reserved. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # First run all tests that pytest discovers. from pytest import main as pytest_main exit_code = pytest_main() # pytest's doctest support doesn't support Sphinx extensions # (see https://www.sphinx-doc.org/en/latest/usage/extensions/doctest.html) # so †est the code in the Sphinx docs using Sphinx's own doctest support. from sphinx.cmd.build import main as sphinx_main exit_code = sphinx_main('-b doctest -d docs/_build/doctrees docs docs/_build/doctest'.split()) or exit_code exit(exit_code)
#! /usr/bin/env python if __name__ == '__main__': from pytest import main as pytest_main pytest_main()
stream=stdout, format="%(levelname)s:%(module)s:%(funcName)s: %(message)s" ) logging.getLogger().setLevel(logging.INFO) logging.getLogger('PySide2').setLevel(logging.CRITICAL) else: # Disable logs (only show critical) logging.basicConfig(level=logging.CRITICAL) resolve_conflicts(ARGS) # Checks for conflicting options # TEST PHASE ================================================== if ARGS.run_tests: logging.info("Running pre-launch tests...") # -x = stop at first failure EXIT_CODE = pytest_main(["tests", "-x", "--tb=no", "-s"]) if EXIT_CODE != 1: logging.error("Pre-launch tests failed! Aborting...") logging.shutdown() exit(EXIT_CODE) logging.info("All pre-launch tests succeeded...") elif ARGS.only_run_tests: logging.info("Running tests with detailed feedback...") logging.shutdown() exit(pytest_main(["tests", "--tb=long", "--durations=3", "-s"])) # BENCHMARK =========================================================== if ARGS.benchmark_n: