def monkeypatch_module(request: FixtureRequest) -> MonkeyPatch: mp = MonkeyPatch() request.addfinalizer(mp.undo) return mp
def monkeysessioncontext(): from _pytest.monkeypatch import MonkeyPatch mpatch = MonkeyPatch() yield mpatch mpatch.undo()
def monkeysession(request): mpatch = MonkeyPatch() yield mpatch mpatch.undo()
def mp() -> Generator[MonkeyPatch, None, None]: cwd = os.getcwd() sys_path = list(sys.path) yield MonkeyPatch() sys.path[:] = sys_path os.chdir(cwd)
def setUp(self): self.monkeypatch = MonkeyPatch()
def monkeymodule(): mp = MonkeyPatch() yield mp mp.undo()
def monkeysession(): mp = MonkeyPatch() yield mp mp.undo()
def mp(): cwd = os.getcwd() sys_path = list(sys.path) yield MonkeyPatch() sys.path[:] = sys_path os.chdir(cwd)
def pytest_generate_tests(metafunc): monkeypatch = MonkeyPatch() monkeypatch.setitem(os.environ, 'SLACK_SECRET_NAME', 'slack-security-bot') create_secret()
# -*- coding: utf-8 -*- ''' Test for hash_utils. ''' import base64 import bcrypt import pytest import steganosaurus.hash_utils as hash_utils import steganosaurus.constants as constants from _pytest.monkeypatch import MonkeyPatch monkeypatch = MonkeyPatch() def test_calculate_hash(): ''' Can calculate bcrypt hash of password. ''' salts = [ bytes.fromhex('2432622430382458533954464d334e525361394a594f4c2e76706a634f'), bytes.fromhex('2432622430382454613037774f566c4230396e7a47563953615671494f'), bytes.fromhex('24326224303824726b71795166354d566331485869387568317874662e') ] passwords = [ 'secret', '日本語',
class ShellFlagsTests(CompileShellTests): """"TestCase class for functions in shell_flags.py""" monkeypatch = MonkeyPatch() @pytest.mark.slow def test_add_random_arch_flags(self): """Test that we are able to obtain add shell runtime flags related to architecture.""" ShellFlagsTests.monkeypatch.setattr(js.shell_flags, "chance", mock_chance) all_flags = js.shell_flags.add_random_arch_flags( self.test_shell_compile(), []) assert "--enable-avx" in all_flags assert "--no-sse3" in all_flags if js.inspect_shell.queryBuildConfiguration(self.test_shell_compile(), "arm-simulator"): assert "--arm-sim-icache-checks" in all_flags if js.inspect_shell.queryBuildConfiguration(self.test_shell_compile(), "arm-simulator"): assert "--arm-asm-nop-fill=1" in all_flags if js.inspect_shell.queryBuildConfiguration(self.test_shell_compile(), "arm-simulator"): assert "--arm-hwcap=vfp" in all_flags @pytest.mark.slow def test_add_random_ion_flags(self): """Test that we are able to obtain add shell runtime flags related to IonMonkey.""" ShellFlagsTests.monkeypatch.setattr(js.shell_flags, "chance", mock_chance) all_flags = js.shell_flags.add_random_ion_flags( self.test_shell_compile(), []) assert "--cache-ir-stubs=on" in all_flags assert "--ion-pgo=on" in all_flags assert "--ion-sincos=on" in all_flags assert "--ion-instruction-reordering=on" in all_flags assert "--ion-shared-stubs=on" in all_flags assert "--ion-regalloc=testbed" in all_flags assert '--execute="setJitCompilerOption(\\"ion.forceinlineCaches\\",1)"' in all_flags assert "--ion-extra-checks" in all_flags # assert "--ion-sink=on" in all_flags assert "--ion-warmup-threshold=100" in all_flags assert "--ion-loop-unrolling=on" in all_flags assert "--ion-scalar-replacement=on" in all_flags assert "--ion-check-range-analysis" in all_flags # assert "--ion-regalloc=stupid" in all_flags assert "--ion-range-analysis=on" in all_flags assert "--ion-edgecase-analysis=on" in all_flags assert "--ion-limit-script-size=on" in all_flags assert "--ion-osr=on" in all_flags assert "--ion-inlining=on" in all_flags assert "--ion-eager" in all_flags assert "--ion-gvn=on" in all_flags assert "--ion-licm=on" in all_flags @pytest.mark.slow def test_add_random_wasm_flags(self): """Test that we are able to obtain add shell runtime flags related to WebAssembly (wasm).""" ShellFlagsTests.monkeypatch.setattr(js.shell_flags, "chance", mock_chance) all_flags = js.shell_flags.add_random_wasm_flags( self.test_shell_compile(), []) assert "--wasm-gc" in all_flags assert "--no-wasm-baseline" in all_flags assert "--no-wasm-ion" in all_flags assert "--test-wasm-await-tier2" in all_flags @pytest.mark.slow def test_basic_flag_sets(self): """Test that we are able to obtain a basic set of shell runtime flags for fuzzing.""" important_flag_set = ["--fuzzing-safe", "--no-threads", "--ion-eager"] # Important flag set combination assert important_flag_set in js.shell_flags.basic_flag_sets( self.test_shell_compile()) def test_chance(self): """Test that the chance function works as intended.""" ShellFlagsTests.monkeypatch.setattr(js.shell_flags, "chance", mock_chance) assert js.shell_flags.chance(0.6) assert js.shell_flags.chance(0.1) self.assertFalse(js.shell_flags.chance(0)) self.assertFalse(js.shell_flags.chance(-0.2)) @pytest.mark.slow def test_random_flag_set(self): """Test runtime flags related to SpiderMonkey.""" ShellFlagsTests.monkeypatch.setattr(js.shell_flags, "chance", mock_chance) all_flags = js.shell_flags.random_flag_set(self.test_shell_compile()) assert "--fuzzing-safe" in all_flags assert "--nursery-strings=on" in all_flags assert "--spectre-mitigations=on" in all_flags assert "--ion-offthread-compile=on" in all_flags # assert "--enable-streams" in all_flags assert "--no-unboxed-objects" in all_flags assert "--no-cgc" in all_flags assert "--gc-zeal=4,999" in all_flags assert "--no-incremental-gc" in all_flags assert "--no-threads" in all_flags assert "--no-native-regexp" in all_flags assert "--no-ggc" in all_flags assert "--no-baseline" in all_flags assert "--no-asmjs" in all_flags assert "--dump-bytecode" in all_flags @pytest.mark.slow def test_shell_supports_flag(self): """Test that the shell does support flags as intended.""" assert js.shell_flags.shell_supports_flag(self.test_shell_compile(), "--fuzzing-safe")
def setUp(self): # mock global variable MonkeyPatch().setattr('utils.flex.NAME', "Moon")
def monkeypatch_session(): from _pytest.monkeypatch import MonkeyPatch m = MonkeyPatch() yield m m.undo()
def setUp(self) -> None: self.monkeypatch = MonkeyPatch()
def patch_module(monkeypatch=MonkeyPatch()): monkeypatch.setattr(apps, "get_installation_access_token", mock_return) yield monkeypatch monkeypatch.undo()
def monkeymodule(request): from _pytest.monkeypatch import MonkeyPatch # type: ignore[import] # pylint: disable=import-outside-toplevel mpatch = MonkeyPatch() yield mpatch mpatch.undo()
def setUp(self): self.ecl_config_path = os.path.dirname( inspect.getsourcefile(Ecl100Config)) self.monkeypatch = MonkeyPatch()
def monkeymodule(): mpatch = MonkeyPatch() yield mpatch mpatch.undo()
def monkeyclass(): mp = MonkeyPatch() yield mp mp.undo()
def inline_run(self, *args, **kwargs): """Run ``pytest.main()`` in-process, returning a HookRecorder. Runs the :py:func:`pytest.main` function to run all of pytest inside the test process itself. This means it can return a :py:class:`HookRecorder` instance which gives more detailed results from that run than can be done by matching stdout/stderr from :py:meth:`runpytest`. :param args: command line arguments to pass to :py:func:`pytest.main` :param plugin: (keyword-only) extra plugin instances the ``pytest.main()`` instance should use :return: a :py:class:`HookRecorder` instance """ finalizers = [] try: # Do not load user config (during runs only). mp_run = MonkeyPatch() mp_run.setenv("HOME", str(self.tmpdir)) mp_run.setenv("USERPROFILE", str(self.tmpdir)) finalizers.append(mp_run.undo) # When running pytest inline any plugins active in the main test # process are already imported. So this disables the warning which # will trigger to say they can no longer be rewritten, which is # fine as they have already been rewritten. orig_warn = AssertionRewritingHook._warn_already_imported def revert_warn_already_imported(): AssertionRewritingHook._warn_already_imported = orig_warn finalizers.append(revert_warn_already_imported) AssertionRewritingHook._warn_already_imported = lambda *a: None # Any sys.module or sys.path changes done while running pytest # inline should be reverted after the test run completes to avoid # clashing with later inline tests run within the same pytest test, # e.g. just because they use matching test module names. finalizers.append(self.__take_sys_modules_snapshot().restore) finalizers.append(SysPathsSnapshot().restore) # Important note: # - our tests should not leave any other references/registrations # laying around other than possibly loaded test modules # referenced from sys.modules, as nothing will clean those up # automatically rec = [] class Collect(object): def pytest_configure(x, config): rec.append(self.make_hook_recorder(config.pluginmanager)) plugins = kwargs.get("plugins") or [] plugins.append(Collect()) ret = pytest.main(list(args), plugins=plugins) if len(rec) == 1: reprec = rec.pop() else: class reprec(object): pass reprec.ret = ret # typically we reraise keyboard interrupts from the child run # because it's our user requesting interruption of the testing if ret == EXIT_INTERRUPTED and not kwargs.get("no_reraise_ctrlc"): calls = reprec.getcalls("pytest_keyboard_interrupt") if calls and calls[-1].excinfo.type == KeyboardInterrupt: raise KeyboardInterrupt() return reprec finally: for finalizer in finalizers: finalizer()
def monkeysession(request): mp = MonkeyPatch() request.addfinalizer(mp.undo) return mp
def setUp(self): self.monkeypatch = MonkeyPatch() self.monkeypatch.setattr( "databaseConnection.DatabaseConnection.db_name", self.db_name) self.monkeypatch.setattr( "databaseConnection.DatabaseConnection.commit", lambda x: None)
def web2py_server(runestone_name, web2py_server_address, pytestconfig): password = "******" os.environ["WEB2PY_CONFIG"] = "test" # HINT: make sure that ``0.py`` has something like the following, that reads this environment variable: # # .. code:: Python # :number-lines: # # config = environ.get("WEB2PY_CONFIG","production") # # if config == "production": # settings.database_uri = environ["DBURL"] # elif config == "development": # settings.database_uri = environ.get("DEV_DBURL") # elif config == "test": # settings.database_uri = environ.get("TEST_DBURL") # else: # raise ValueError("unknown value for WEB2PY_CONFIG") # HINT: make sure that you export ``TEST_DBURL`` in your environment; it is # not set here because it's specific to the local setup, possibly with a # password, and thus can't be committed to the repo. assert os.environ["TEST_DBURL"] # Extract the components of the DBURL. The expected format is ``postgresql://user:password@netloc/dbname``, a simplified form of the `connection URI <https://www.postgresql.org/docs/9.6/static/libpq-connect.html#LIBPQ-CONNSTRING>`_. empty1, postgres_ql, pguser, pgpassword, pgnetloc, dbname, empty2 = re.split( "^postgres(ql)?://(.*):(.*)@(.*)/(.*)$", os.environ["TEST_DBURL"]) assert (not empty1) and (not empty2) os.environ["PGPASSWORD"] = pgpassword os.environ["PGUSER"] = pguser os.environ["DBHOST"] = pgnetloc rs_path = "applications/{}".format(runestone_name) # Assume we are running with working directory in tests. if pytestconfig.getoption("skipdbinit"): print("Skipping DB initialization.") else: # In the future, to print the output of the init/build process, see `pytest #1599 <https://github.com/pytest-dev/pytest/issues/1599>`_ for code to enable/disable output capture inside a test. # # Make sure runestone_test is nice and clean -- this will remove many # tables that web2py will then re-create. xqt("rsmanage --verbose initdb --reset --force") # Copy the test book to the books directory. rmtree("{}/books/test_course_1".format(rs_path), ignore_errors=True) # Sometimes this fails for no good reason on Windows. Retry. for retry in range(100): try: copytree( "{}/tests/test_course_1".format(rs_path), "{}/books/test_course_1".format(rs_path), ) break except OSError: if retry == 99: raise # Build the test book to add in db fields needed. with pushd("{}/books/test_course_1".format( rs_path)), MonkeyPatch().context() as m: # The runestone build process only looks at ``DBURL``. m.setenv("DBURL", os.environ["TEST_DBURL"]) xqt( "{} -m runestone build --all".format(sys.executable), "{} -m runestone deploy".format(sys.executable), ) xqt("{} -m coverage erase".format(sys.executable)) # For debug: # # #. Uncomment the next three lines. # #. Set ``WEB2PY_CONFIG`` to ``test``; all the other usual Runestone environment variables must also be set. # #. Run ``python -m celery --app=scheduled_builder worker --pool=gevent --concurrency=4 --loglevel=info`` from ``applications/runestone/modules`` to use the scheduler. I'm assuming the redis server (which the tests needs regardless of debug) is also running. # #. Run a test (in a separate window). When the debugger stops at the lines below: # # #. Run web2py manually to see all debug messages. Use a command line like ``python web2py.py -a pass``. # #. After web2py is started, type "c" then enter to continue the debugger and actually run the tests. ##import pdb; pdb.set_trace() ##yield DictToObject(dict(password=password)) ##return # Start the web2py server and the `web2py scheduler <http://web2py.com/books/default/chapter/29/04/the-core#Scheduler-Deployment>`_. web2py_server = subprocess.Popen( [ sys.executable, "-m", "coverage", "run", "--append", "--source=" + COVER_DIRS, "web2py.py", "-a", password, "--no_gui", "--minthreads=10", "--maxthreads=20", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, # Produce text (not binary) output for nice output in ``echo()`` below. universal_newlines=True, ) # Wait for the webserver to come up. for tries in range(50): try: urlopen(web2py_server_address, timeout=5) except URLError: # Wait for the server to come up. time.sleep(0.1) else: # The server is up. We're done. break # Run Celery. Per https://github.com/celery/celery/issues/3422, it sounds like celery doesn't support coverage, so omit it. celery_process = subprocess.Popen( [ sys.executable, "-m", "celery", "--app=scheduled_builder", "worker", "--pool=gevent", "--concurrency=4", "--loglevel=info", ], # Celery must be run in the ``modules`` directory, where the worker is defined. cwd="{}/modules".format(rs_path), stdout=subprocess.PIPE, stderr=subprocess.PIPE, # Produce text (not binary) output for nice output in ``echo()`` below. universal_newlines=True, ) # Start a thread to read web2py output and echo it. def echo(popen_obj, description_str): stdout, stderr = popen_obj.communicate() print("\n" "{} stdout\n" "--------------------\n".format(description_str)) print(stdout) print("\n" "{} stderr\n" "--------------------\n".format(description_str)) print(stderr) echo_threads = [ Thread(target=echo, args=(web2py_server, "web2py server")), Thread(target=echo, args=(celery_process, "celery process")), ] # TODO: Redis for Windows. for echo_thread in echo_threads: echo_thread.start() # Save the password used. web2py_server.password = password # Wait for the server to come up. The delay varies; this is a guess. # After this comes the `teardown code <https://docs.pytest.org/en/latest/fixture.html#fixture-finalization-executing-teardown-code>`_. yield web2py_server # Terminate the server and schedulers to give web2py time to shut down gracefully. web2py_server.terminate() celery_process.terminate() for echo_thread in echo_threads: echo_thread.join()
def monkey_class(): from _pytest.monkeypatch import MonkeyPatch mpatch = MonkeyPatch() yield mpatch mpatch.undo()
def setUp(self): self.monkeypatch = MonkeyPatch() self.monkeypatch.setenv( "TZ", "CET") # The ert_statoil case was generated in CET self.config = self.createTestPath("local/snake_oil/snake_oil.ert")
def inline_run(self, *args, plugins=(), no_reraise_ctrlc=False): """Run ``pytest.main()`` in-process, returning a HookRecorder. Runs the :py:func:`pytest.main` function to run all of pytest inside the test process itself. This means it can return a :py:class:`HookRecorder` instance which gives more detailed results from that run than can be done by matching stdout/stderr from :py:meth:`runpytest`. :param args: command line arguments to pass to :py:func:`pytest.main` :kwarg plugins: extra plugin instances the ``pytest.main()`` instance should use. :kwarg no_reraise_ctrlc: typically we reraise keyboard interrupts from the child run. If True, the KeyboardInterrupt exception is captured. :return: a :py:class:`HookRecorder` instance """ # (maybe a cpython bug?) the importlib cache sometimes isn't updated # properly between file creation and inline_run (especially if imports # are interspersed with file creation) importlib.invalidate_caches() plugins = list(plugins) finalizers = [] try: # Do not load user config (during runs only). mp_run = MonkeyPatch() for k, v in self._env_run_update.items(): mp_run.setenv(k, v) finalizers.append(mp_run.undo) # Any sys.module or sys.path changes done while running pytest # inline should be reverted after the test run completes to avoid # clashing with later inline tests run within the same pytest test, # e.g. just because they use matching test module names. finalizers.append(self.__take_sys_modules_snapshot().restore) finalizers.append(SysPathsSnapshot().restore) # Important note: # - our tests should not leave any other references/registrations # laying around other than possibly loaded test modules # referenced from sys.modules, as nothing will clean those up # automatically rec = [] class Collect: def pytest_configure(x, config): rec.append(self.make_hook_recorder(config.pluginmanager)) plugins.append(Collect()) ret = pytest.main(list(args), plugins=plugins) if len(rec) == 1: reprec = rec.pop() else: class reprec: # type: ignore pass reprec.ret = ret # typically we reraise keyboard interrupts from the child run # because it's our user requesting interruption of the testing if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: calls = reprec.getcalls("pytest_keyboard_interrupt") if calls and calls[-1].excinfo.type == KeyboardInterrupt: raise KeyboardInterrupt() return reprec finally: for finalizer in finalizers: finalizer()
def monkeypatch_session(request): from _pytest.monkeypatch import MonkeyPatch mpatch = MonkeyPatch() yield mpatch mpatch.undo()
def setUp(self): self.queryset = MockQueryset( [MockObject(pk=i, name=str(i)) for i in range(0, 1100)]) self.monkeypatch = MonkeyPatch()
def monkeymodule(self): from _pytest.monkeypatch import MonkeyPatch mpatch = MonkeyPatch() yield mpatch mpatch.undo()
def main(): import library.python.pytest.context as context context.Ctx["YA_PYTEST_START_TIMESTAMP"] = time.time() profile = None if '--profile-pytest' in sys.argv: sys.argv.remove('--profile-pytest') import pstats import cProfile profile = cProfile.Profile() profile.enable() # Reset influencing env. vars # For more info see library/python/testing/yatest_common/yatest/common/errors.py if FORCE_EXIT_TESTSFAILED_ENV in os.environ: del os.environ[FORCE_EXIT_TESTSFAILED_ENV] if "Y_PYTHON_CLEAR_ENTRY_POINT" in os.environ: if "Y_PYTHON_ENTRY_POINT" in os.environ: del os.environ["Y_PYTHON_ENTRY_POINT"] del os.environ["Y_PYTHON_CLEAR_ENTRY_POINT"] listing_mode = '--collect-only' in sys.argv yatest_runner = os.environ.get('YA_TEST_RUNNER') == '1' import pytest import library.python.pytest.plugins.collection as collection import library.python.pytest.plugins.ya as ya import library.python.pytest.plugins.conftests as conftests import _pytest.assertion from _pytest.monkeypatch import MonkeyPatch from . import rewrite m = MonkeyPatch() m.setattr(_pytest.assertion.rewrite, "AssertionRewritingHook", rewrite.AssertionRewritingHook) prefix = '__tests__.' test_modules = [ name[len(prefix):] for name in sys.extra_modules if name.startswith(prefix) and not name.endswith('.conftest') ] doctest_packages = __res.find("PY_DOCTEST_PACKAGES") or "" if isinstance(doctest_packages, bytes): doctest_packages = doctest_packages.decode('utf-8') doctest_packages = doctest_packages.split() def is_doctest_module(name): for package in doctest_packages: if name == package or name.startswith(str(package) + "."): return True return False doctest_modules = [ name for name in sys.extra_modules if is_doctest_module(name) ] def remove_user_site(paths): site_paths = ('site-packages', 'site-python') def is_site_path(path): for p in site_paths: if path.find(p) != -1: return True return False new_paths = list(paths) for p in paths: if is_site_path(p): new_paths.remove(p) return new_paths sys.path = remove_user_site(sys.path) rc = pytest.main(plugins=[ collection.CollectionPlugin(test_modules, doctest_modules), ya, conftests, ]) if rc == 5: # don't care about EXIT_NOTESTSCOLLECTED rc = 0 if rc == 1 and yatest_runner and not listing_mode and not os.environ.get( FORCE_EXIT_TESTSFAILED_ENV) == '1': # XXX it's place for future improvements # Test wrapper should terminate with 0 exit code if there are common test failures # and report it with trace-file machinery. # However, there are several case when we don't want to suppress exit_code: # - listing machinery doesn't use trace-file currently and rely on stdout and exit_code # - RestartTestException and InfrastructureException required non-zero exit_code to be processes correctly rc = 0 if profile: profile.disable() ps = pstats.Stats(profile, stream=sys.stderr).sort_stats('cumulative') ps.print_stats() sys.exit(rc)