def enable_requests_mock( monkeypatch: MonkeyPatch, requests_mock: Mocker, ) -> None: """ Enable a mock service backed by the Flask applications. """ add_flask_app_to_mock( mock_obj=requests_mock, flask_app=VWS_FLASK_APP, base_url='https://vws.vuforia.com', ) add_flask_app_to_mock( mock_obj=requests_mock, flask_app=CLOUDRECO_FLASK_APP, base_url='https://cloudreco.vuforia.com', ) add_flask_app_to_mock( mock_obj=requests_mock, flask_app=TARGET_MANAGER_FLASK_APP, base_url=_EXAMPLE_URL_FOR_TARGET_MANAGER, ) monkeypatch.setenv( name='TARGET_MANAGER_BASE_URL', value=_EXAMPLE_URL_FOR_TARGET_MANAGER, )
def test_ls_cli(monkeypatch: pytest.MonkeyPatch, tmp_path: pathlib.Path): monkeypatch.setenv("HOME", str(tmp_path)) monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path / ".config")) filenames = [ ".git/", ".gitignore/", "session_1.yaml", "session_2.yaml", "session_3.json", "session_4.txt", ] # should ignore: # - directories should be ignored # - extensions not covered in VALID_CONFIG_DIR_FILE_EXTENSIONS ignored_filenames = [".git/", ".gitignore/", "session_4.txt"] stems = [ os.path.splitext(f)[0] for f in filenames if f not in ignored_filenames ] for filename in filenames: location = tmp_path / f".tmuxp/{filename}" if filename.endswith("/"): location.mkdir(parents=True) else: location.touch() runner = CliRunner() cli_output = runner.invoke(command_ls).output assert cli_output == "\n".join(stems) + "\n"
def test_user_cache_dir_linux_home_slash( self, monkeypatch: pytest.MonkeyPatch) -> None: # Verify that we are not affected by https://bugs.python.org/issue14768 monkeypatch.delenv("XDG_CACHE_HOME", raising=False) monkeypatch.setenv("HOME", "/") assert appdirs.user_cache_dir("pip") == "/.cache/pip"
def default_config(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: monkeypatch.setenv("HOME", str(tmp_path)) monkeypatch.delenv("XDG_CONFIG_HOME", raising=False) monkeypatch.setenv("LOCALAPPDATA", str(tmp_path)) p = get_default_configpath() p.parent.mkdir(parents=True, exist_ok=True) p.write_text('[outgoing]\nmethod = "command"\n')
def test_missing_enable_ext(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: """Test missing enable-ext option fails. Check that a workflow that needs `--enable-ext` and `--enable-dev` fails without those options and passes with them. """ monkeypatch.delenv("CWLTOOL_OPTIONS", raising=False) assert (main([ "--outdir", str(tmp_path), get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper", ]) == 1) assert (main([ "--debug", "--enable-ext", "--enable-dev", "--outdir", str(tmp_path), get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper", ]) == 0) monkeypatch.setenv("CWLTOOL_OPTIONS", "--enable-ext --enable-dev") assert (main([ "--outdir", str(tmp_path), get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper", ]) == 0)
def test_valid_home(ee_enabled, tmp_path: Path, monkeypatch: pytest.MonkeyPatch): """Confirm a valid .ansible.cfg is parsed when in the home directory. When EE support is enabled, the .ansible.cfg file is not used When EE support is disabled the .ansible.cfg file is used :param ee_enabled: Indicate if EE support is enabled :param tmp_path: The path to a test temporary directory :param monkeypatch: The monkeypatch fixture """ cfg_path = tmp_path / ".ansible.cfg" with cfg_path.open(mode="w") as fh: fh.write(ANSIBLE_CFG_VALID) monkeypatch.chdir(tmp_path) monkeypatch.setenv("HOME", str(tmp_path)) parsed_cfg = parse_ansible_cfg(ee_enabled=ee_enabled) if ee_enabled: assert parsed_cfg.config.contents is Constants.NONE assert parsed_cfg.config.path is Constants.NONE assert parsed_cfg.config.text is Constants.NONE else: assert parsed_cfg.config.contents == { "defaults": {"cow_selection": "milk", "inventory": "inventory.yml"}, } assert parsed_cfg.config.path == cfg_path assert parsed_cfg.config.text == ANSIBLE_CFG_VALID.splitlines()
def test_handle_backend_remote_name( self, caplog: LogCaptureFixture, monkeypatch: MonkeyPatch, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: """Test handle_backend for remote backend with workspace prefix.""" caplog.set_level(LogLevels.DEBUG, logger=MODULE) monkeypatch.setenv("TF_WORKSPACE", "anything") mock_get_full_configuration = MagicMock(return_value={}) backend = { "type": "remote", "config": { "workspaces": { "name": "test" } } } obj = Terraform(runway_context, module_root=tmp_path) monkeypatch.setattr(obj, "tfenv", MagicMock(backend=backend)) monkeypatch.setattr( obj.options.backend_config, "get_full_configuration", mock_get_full_configuration, ) assert not obj.handle_backend() mock_get_full_configuration.assert_called_once_with() assert "TF_WORKSPACE" not in obj.ctx.env.vars assert obj.required_workspace == "default" assert 'forcing use of static workspace "default"' in "\n".join( caplog.messages)
def test_logging_filters( self, capfd: pytest.CaptureFixture, log_filters_input: str, log_filters_output: set[str], mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch, ) -> None: """Test that log message filters are applied as expected.""" monkeypatch.setenv("LOG_FILTERS", log_filters_input) mocker.patch.object(logging_conf, "LOG_FILTERS", logging_conf.LogFilter.set_filters()) mocker.patch.dict( logging_conf.LOGGING_CONFIG["filters"]["filter_log_message"], { "()": logging_conf.LogFilter, "filters": logging_conf.LOG_FILTERS }, clear=True, ) path_to_log = "/status" logger = logging_conf.logging.getLogger( "test.logging_conf.output.filters") logging_conf.configure_logging(logger=logger) logger.info(*self._uvicorn_access_log_args(path_to_log)) logger.info(log_filters_input) for log_filter in log_filters_output: logger.info(*self._uvicorn_access_log_args(log_filter)) captured = capfd.readouterr() assert logging_conf.LOG_FILTERS == log_filters_output assert path_to_log in captured.out for log_filter in log_filters_output: assert log_filter not in captured.out
def test_env_var_integrate_cli(self, option: str, value: int, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("PIP_" + option.upper(), str(value)) # FakeCommand intentionally returns the wrong type. options, args = cast(Tuple[Values, List[str]], main(["fake", "--" + option])) assert getattr(options, option) == value + 1
def test_move_local_delete_empty_dirs(monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset) -> None: starting_assets = list(moving_dandiset.dandiset.get_assets()) monkeypatch.chdir(moving_dandiset.dspath / "subdir4") monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key) move( "../subdir1/apple.txt", "../subdir2/banana.txt", "foo.json", dest="../subdir3", work_on="local", devel_debug=True, ) check_assets( moving_dandiset, starting_assets, "local", { "subdir1/apple.txt": "subdir3/apple.txt", "subdir2/banana.txt": "subdir3/banana.txt", "subdir4/foo.json": "subdir3/foo.json", }, ) assert not (moving_dandiset.dspath / "subdir1").exists() assert (moving_dandiset.dspath / "subdir2").exists() assert (moving_dandiset.dspath / "subdir4").exists()
def test_move_path_to_self( caplog: pytest.LogCaptureFixture, monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str, ) -> None: (moving_dandiset.dspath / "newdir").mkdir() starting_assets = list(moving_dandiset.dandiset.get_assets()) monkeypatch.chdir(moving_dandiset.dspath / "subdir1") monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key) move( "apple.txt", dest="../subdir1", work_on=work_on, devel_debug=True, dandi_instance=moving_dandiset.api.instance_id, ) for where in ["local", "remote"] if work_on == "both" else [work_on]: assert ( "dandi", logging.DEBUG, f"Would move {where} asset 'subdir1/apple.txt' to itself; ignoring", ) in caplog.record_tuples assert ("dandi", logging.INFO, "Nothing to move") in caplog.record_tuples check_assets(moving_dandiset, starting_assets, work_on, {})
def test_move_dandiset_url( monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, tmp_path: Path, work_on: str, ) -> None: starting_assets = list(moving_dandiset.dandiset.get_assets()) monkeypatch.chdir(tmp_path) monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key) move( "file.txt", "subdir2/banana.txt", dest="subdir1", work_on=work_on, dandiset=moving_dandiset.dandiset.api_url, devel_debug=True, ) check_assets( moving_dandiset, starting_assets, "remote", { "file.txt": "subdir1/file.txt", "subdir2/banana.txt": "subdir1/banana.txt", }, )
def test_move_regex_some_to_self( caplog: pytest.LogCaptureFixture, monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str, ) -> None: starting_assets = list(moving_dandiset.dandiset.get_assets()) monkeypatch.chdir(moving_dandiset.dspath) monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key) move( r"(.+[123])/([^.]+)\.(.+)", dest=r"\1/\2.dat", regex=True, work_on=work_on, dandi_instance=moving_dandiset.api.instance_id, devel_debug=True, ) for path in ["subdir3/red.dat", "subdir3/green.dat", "subdir3/blue.dat"]: for where in ["local", "remote"] if work_on == "both" else [work_on]: assert ( "dandi", logging.DEBUG, f"Would move {where} asset {path!r} to itself; ignoring", ) in caplog.record_tuples check_assets( moving_dandiset, starting_assets, work_on, { "subdir1/apple.txt": "subdir1/apple.dat", "subdir2/banana.txt": "subdir2/banana.dat", "subdir2/coconut.txt": "subdir2/coconut.dat", }, )
def test_start_server_uvicorn( self, app_module: str, logging_conf_dict: dict, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch, ) -> None: """Test `start.start_server` with Uvicorn.""" logger = mocker.patch.object(start.logging, "root", autospec=True) run = mocker.patch("inboard.start.uvicorn.run", autospec=True) monkeypatch.setenv("PROCESS_MANAGER", "uvicorn") start.start_server( str(os.getenv("PROCESS_MANAGER")), app_module=app_module, logger=logger, logging_conf_dict=logging_conf_dict, ) logger.debug.assert_called_once_with("Running Uvicorn without Gunicorn.") run.assert_called_once_with( app_module, host="0.0.0.0", port=80, log_config=logging_conf_dict, log_level="info", reload=False, reload_delay=None, reload_dirs=None, reload_excludes=None, reload_includes=None, )
def test_envset_error(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv(ENVVAR, "foo") with pytest.raises(RuntimeError, match="Catch this!"): with envset(ENVVAR, "bar"): assert os.environ[ENVVAR] == "bar" raise RuntimeError("Catch this!") assert os.environ[ENVVAR] == "foo"
def test_config_file_true(self, option: str, value: str, monkeypatch: pytest.MonkeyPatch) -> None: with tmpconfig(option, value) as name: monkeypatch.setenv("PIP_CONFIG_FILE", name) # FakeCommand intentionally returns the wrong type. options, args = cast(Tuple[Values, List[str]], main(["fake"])) assert getattr(options, option) == 1
def root_url(monkeypatch: pytest.MonkeyPatch) -> str: root_url = os.getenv( "MLHUB_ROOT_URL", "https://staging.api.radiant.earth/mlhub/v1/" ) monkeypatch.setenv("MLHUB_ROOT_URL", root_url) return root_url
def test_config_stdout( self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, comment: str, cli_entry: str, config_fixture: str, expected: Dict[str, str], ): # pylint: disable=too-many-arguments # pylint: disable=unused-argument """Test using config, stdout. :param mocker: The mocker fixture :param monkeypatch: The monkeypatch fixture :param tmp_path: A test specific temporary path :param comment: The test comment :param cli_entry: The CLI entry to set as ``sys.argv`` :param config_fixture: The settings fixture :param expected: the expected return value """ mocked_runner = mocker.patch( target="ansible_navigator.runner.command.run_command", side_effect=RunnerTestException, ) monkeypatch.setenv("ANSIBLE_NAVIGATOR_ALLOW_UI_TRACEBACK", "true") cli_entry = self.cli_entry.format(self.STDOUT["config"], cli_entry, "stdout") self.run_test(mocked_runner, monkeypatch, tmp_path, cli_entry, config_fixture, expected)
def test_application_doesnt_validate_document_due_to_env_var(self, check_integrity: MagicMock, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("BOKEH_VALIDATE_DOC", "false") a = baa.Application() d = Document() d.add_root(figure()) a.initialize_document(d) assert not check_integrity.called
def test_builtin_importable_with_env(self, monkeypatch: pytest.MonkeyPatch, env) -> None: cmd = [sys.executable, "-c", "import bokeh.resources"] monkeypatch.setenv(env, "foo") try: subprocess.check_call(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError: pytest.fail(f"resources import failed with {env} set")
def test_gets_with_basic_auth_incorrect_credentials( self, basic_auth_incorrect: tuple, client: TestClient, endpoint: str, monkeypatch: pytest.MonkeyPatch, ) -> None: """Test `GET` requests with incorrect HTTP Basic auth credentials.""" monkeypatch.setenv("BASIC_AUTH_USERNAME", "test_user") monkeypatch.setenv("BASIC_AUTH_PASSWORD", "r4ndom_bUt_memorable") error_response = client.get(endpoint, auth=basic_auth_incorrect) error_response_json = error_response.json() assert error_response.status_code in {401, 403} if isinstance(client.app, FastAPI): expected_json = { "detail": "HTTP Basic auth credentials not correct" } elif isinstance(client.app, Starlette): expected_json = { "detail": "HTTP Basic auth credentials not correct", "error": "Incorrect username or password", } else: # pragma: no cover raise AssertionError( "TestClient should have a FastAPI or Starlette app.") assert error_response_json == expected_json
def test_doesnt_validate_doc_due_to_env_var( self, check_integrity, monkeypatch: pytest.MonkeyPatch, test_plot) -> None: monkeypatch.setenv("BOKEH_VALIDATE_DOC", "false") with beu.OutputDocumentFor([test_plot]): pass assert not check_integrity.called
async def test_run_with_type( monkeypatch: pytest.MonkeyPatch, mocker: MockerFixture, root: Path, config: Config, ) -> None: """ Test creating a new post with a valid type. """ monkeypatch.setenv("EDITOR", "") config.templates = { "book": ["title", "author"], } mocker.patch("nefelibata.cli.new.get_config", return_value=config) await new.run(root, "A book I liked", "book") filepath = root / "posts/a_book_i_liked/index.mkd" with open(filepath, encoding="utf-8") as input_: content = input_.read() assert content == ( "subject: A book I liked\n" "summary: \n" "keywords: \n" "type: book\n" "book-title: \n" "book-author: \n" "\n\n" )
def test_launch_exec(tmp_path: Path, monkeypatch: pytest.MonkeyPatch, mocker: MockerFixture) -> None: # Start with taskcluster detection disabled, even on CI monkeypatch.delenv("TASK_ID", raising=False) monkeypatch.delenv("TASKCLUSTER_ROOT_URL", raising=False) exec_mock = mocker.patch("os.execvpe") dup2_mock = mocker.patch("os.dup2") pool = PoolLauncher(["cmd"], "testpool") assert pool.in_taskcluster is False pool.log_dir = tmp_path / "logs" pool.exec() dup2_mock.assert_not_called() exec_mock.assert_called_once_with("cmd", ["cmd"], pool.environment) assert not pool.log_dir.is_dir() # Then enable taskcluster detection monkeypatch.setenv("TASK_ID", "someTask") monkeypatch.setenv("TASKCLUSTER_ROOT_URL", "http://fakeTaskcluster") assert pool.in_taskcluster is True exec_mock.reset_mock() pool.exec() assert dup2_mock.call_count == 2 exec_mock.assert_called_once_with("cmd", ["cmd"], pool.environment) assert pool.log_dir.is_dir()
def test_cancel_zarr_upload( monkeypatch: pytest.MonkeyPatch, new_dandiset: SampleDandiset ) -> None: client = new_dandiset.client asset_path = "foo/bar/baz.zarr" r = client.post( "/zarr/", json={"name": asset_path, "dandiset": new_dandiset.dandiset_id} ) zarr_id = r["zarr_id"] client.post( f"{new_dandiset.dandiset.version_api_path}assets/", json={"metadata": {"path": asset_path}, "zarr_id": zarr_id}, ) client.post( f"/zarr/{zarr_id}/upload/", json=[ {"path": "0.dat", "etag": "0" * 32}, {"path": "1.dat", "etag": "1" * 32}, ], ) r = client.get(f"/zarr/{zarr_id}/") assert r["upload_in_progress"] is True (new_dandiset.dspath / "foo").mkdir() monkeypatch.chdir(new_dandiset.dspath / "foo") monkeypatch.setenv("DANDI_API_KEY", new_dandiset.api.api_key) r = CliRunner().invoke( service_scripts, ["cancel-zarr-upload", "-i", new_dandiset.api.instance_id, "bar/baz.zarr"], ) assert r.exit_code == 0 r = client.get(f"/zarr/{zarr_id}/") assert r["upload_in_progress"] is False
def test_environment_var_does_not_load_lowercase( self, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("pip_hello", "5") self.configuration.load() with pytest.raises(ConfigurationError): self.configuration.get_value(":env:.hello")
def test_site_config_dirs_osx(self, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("HOME", "/home/test") assert appdirs.site_config_dirs("pip") == [ "/Library/Application Support/pip", ]
def test( monkeypatch: pytest.MonkeyPatch, tmp_path: Path, data: StdoutCliTest, exec_env: bool, ) -> None: """Test doc using subcommand. :param monkeypatch: The monkeypatch fixture :param tmp_path: The temporary path to use :param data: The test data :param exec_env: Whether to use the exec environment :raises AssertionError: When test fails """ log_file = str(tmp_path / "log.txt") monkeypatch.setenv("PAGER", "cat") monkeypatch.setenv("NO_COLOR", "true") command = shlex_join( data.command + ("--lf", log_file, "--ee", str(exec_env), "--set-env", "PAGER=cat"), ) proc_out = subprocess.run( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False, universal_newlines=True, shell=True, ) assert all((d in proc_out.stdout for d in data.expected))
class SummaryCollectorTest(ResTest): def setUp(self): self.monkeypatch = MonkeyPatch() self.monkeypatch.setenv( "TZ", "CET" ) # The ert_statoil case was generated in CET self.config = self.createTestPath("local/snake_oil/snake_oil.ert") def tearDown(self): self.monkeypatch.undo() def test_summary_collector(self): with ErtTestContext( "python/enkf/export/summary_collector", self.config ) as context: ert = context.getErt() data = SummaryCollector.loadAllSummaryData(ert, "default_0") self.assertFloatEqual(data["WWCT:OP2"][0]["2010-01-10"], 0.385549) self.assertFloatEqual(data["WWCT:OP2"][24]["2010-01-10"], 0.498331) self.assertFloatEqual(data["FOPR"][0]["2010-01-10"], 0.118963) self.assertFloatEqual(data["FOPR"][0]["2015-06-23"], 0.133601) realization_20 = data.loc[20] with self.assertRaises(KeyError): realization_60 = data.loc[60] data = SummaryCollector.loadAllSummaryData( ert, "default_0", ["WWCT:OP1", "WWCT:OP2"] ) self.assertFloatEqual(data["WWCT:OP1"][0]["2010-01-10"], 0.352953) self.assertFloatEqual(data["WWCT:OP2"][0]["2010-01-10"], 0.385549) with self.assertRaises(KeyError): data["FOPR"] realization_index = 10 data = SummaryCollector.loadAllSummaryData( ert, "default_0", ["WWCT:OP1", "WWCT:OP2"], realization_index=realization_index, ) assert data.index.levels[0] == [realization_index] assert len(data.index.levels[1]) == 200 assert list(data.columns) == ["WWCT:OP1", "WWCT:OP2"] non_existing_realization_index = 150 with pytest.raises(IndexError): data = SummaryCollector.loadAllSummaryData( ert, "default_0", ["WWCT:OP1", "WWCT:OP2"], realization_index=non_existing_realization_index, )
def test_custom( self, high_quality_image: io.BytesIO, monkeypatch: MonkeyPatch, ) -> None: """ It is possible to use set a custom amount of time that it takes for the Query API on the mock to process that a target has been deleted. """ # We choose a low time for a quick test. query_processes_deletion = 0.1 database = VuforiaDatabase() databases_url = _EXAMPLE_URL_FOR_TARGET_MANAGER + '/databases' requests.post(url=databases_url, json=database.to_dict()) monkeypatch.setenv( name='DELETION_PROCESSING_SECONDS', value=str(query_processes_deletion), ) time_taken = process_deletion_seconds( high_quality_image=high_quality_image, vuforia_database=database, ) expected = query_processes_deletion assert abs(expected - time_taken) < self.LEEWAY