def test_check_link_requires_python__invalid_requires( caplog: pytest.LogCaptureFixture, ) -> None: """ Test the log message for an invalid Requires-Python. """ link = Link("https://example.com", requires_python="invalid") caplog.set_level(logging.DEBUG) actual = _check_link_requires_python(link, version_info=(3, 6, 5)) assert actual expected_message = ( "Ignoring invalid Requires-Python ('invalid') for link: https://example.com" ) check_caplog(caplog, "DEBUG", expected_message)
def test_plan( self, caplog: LogCaptureFixture, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: """Test plan.""" caplog.set_level(logging.INFO, logger="runway") obj = Serverless(runway_context, module_root=tmp_path) assert not obj.plan() assert [ f"{tmp_path.name}:plan not currently supported for Serverless" ] == caplog.messages
def test_get_expected_output(caplog: LogCaptureFixture) -> None: caplog.set_level(logging.INFO) exit_code, _ = get_expected_output(DATA_DIRECTORY / "t.toml", USER_SPECIFIC_PATH) assert "Too much .out files" in str(caplog.text) assert exit_code == -1 exit_code, _ = get_expected_output(DATA_DIRECTORY / "u.toml", USER_SPECIFIC_PATH) assert exit_code == -1 assert "Wrong format for .out file name" in str(caplog.text) exit_code, _ = get_expected_output(DATA_DIRECTORY / "v.toml", USER_SPECIFIC_PATH) assert exit_code == 0 assert ".out file does not exists" in str(caplog.text)
def test_kbenv_install(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: """Test ``runway kbenv install`` reading version from a file. For best results, remove any existing installs. """ caplog.set_level(logging.DEBUG, logger="runway.cli.commands.kbenv") (cd_tmp_path / ".kubectl-version").write_text("v1.14.1") runner = CliRunner() result = runner.invoke(cli, ["kbenv", "install"]) assert result.exit_code == 0 kb_bin = Path(caplog.messages[-1].strip("kubectl path: ")) assert kb_bin.exists()
def prepare_call( self, caplog: pytest.LogCaptureFixture, log_level: int, command: Optional[str] = None, ) -> Tuple[List[str], FakeSpinner]: if command is None: command = 'print("Hello"); print("world")' caplog.set_level(log_level) spinner = FakeSpinner() args = [sys.executable, "-c", command] return (args, spinner)
def test_autodetection_fail( self, caplog: LogCaptureFixture, cd_tmp_path: Path ) -> None: """Test autodetection fail.""" caplog.set_level(logging.ERROR, logger="runway") with pytest.raises(SystemExit) as excinfo: assert not RunwayModuleType(cd_tmp_path) assert excinfo.value.code == 1 assert ( 'module class could not be determined from path "{}"'.format( cd_tmp_path.name ) in caplog.messages )
def test_log_name( self, derived_from: str, expected: List[str], caplog: LogCaptureFixture, mocker: MockerFixture, ) -> None: """Test log_name.""" caplog.set_level(logging.INFO, logger="runway") mocker.patch.object(DeployEnvironment, "name", "test") obj = DeployEnvironment() obj.name_derived_from = derived_from obj.log_name() assert caplog.messages == expected
def test_test_no_tests( self, caplog: LogCaptureFixture, runway_config: MockRunwayConfig, runway_context: MockRunwayContext, ) -> None: """Test test with no tests defined.""" caplog.set_level(logging.ERROR, logger="runway") obj = Runway(runway_config, runway_context) # type: ignore obj.tests = [] with pytest.raises(SystemExit) as excinfo: assert obj.test() assert excinfo.value.code == 1 assert "no tests defined in runway.yml" in caplog.messages[0]
def test_get_cut_v2( self, caplog: pytest.LogCaptureFixture, temp_event_loop: asyncio.AbstractEventLoop, ): caplog.set_level(logging.DEBUG) mock_reader = self.MockAsyncReader(TEST_V2_DATA1_EXACT[0:20]) reslt = temp_event_loop.run_until_complete(get_proxy(mock_reader)) assert isinstance(reslt, ProxyData) assert not reslt.valid expect_msg = "PROXY exception: Connection lost while waiting for tail part" assert reslt.error == expect_msg expect = ("mail.debug", 30, expect_msg) assert expect in caplog.record_tuples
def test_skip_invalid_wheel_link(self, caplog: pytest.LogCaptureFixture, data: TestData) -> None: """ Test if PackageFinder skips invalid wheel filenames """ caplog.set_level(logging.DEBUG) req = install_req_from_line("invalid") # data.find_links contains "invalid.whl", which is an invalid wheel finder = make_test_finder(find_links=[data.find_links]) with pytest.raises(DistributionNotFound): finder.find_requirement(req, True) assert "Skipping link: invalid wheel filename:" in caplog.text
def test_collect_sources( self, caplog: pytest.LogCaptureFixture, data: TestData ) -> None: caplog.set_level(logging.DEBUG) link_collector = make_test_link_collector( find_links=[data.find_links], # Include two copies of the URL to check that the second one # is skipped. index_urls=[PyPI.simple_url, PyPI.simple_url], ) collected_sources = link_collector.collect_sources( "twine", candidates_from_page=lambda link: [ InstallationCandidate("twine", "1.0", link) ], ) files_it = itertools.chain.from_iterable( source.file_links() for sources in collected_sources for source in sources if source is not None ) pages_it = itertools.chain.from_iterable( source.page_candidates() for sources in collected_sources for source in sources if source is not None ) files = list(files_it) pages = list(pages_it) # Spot-check the returned sources. assert len(files) > 20 check_links_include(files, names=["simple-1.0.tar.gz"]) assert [page.link for page in pages] == [Link("https://pypi.org/simple/twine/")] # Check that index URLs are marked as *un*cacheable. assert not pages[0].link.cache_link_parsing expected_message = dedent( """\ 1 location(s) to search for versions of twine: * https://pypi.org/simple/twine/""" ) assert caplog.record_tuples == [ ("pip._internal.index.collector", logging.DEBUG, expected_message), ]
def test_k8s_cfn_repo(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: """Test ``runway gen-sample k8s-cfn-repo`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() result = runner.invoke(cli, ["gen-sample", "k8s-cfn-repo"]) assert result.exit_code == 0 files = [ "aws-auth-cm.k8s/base/kustomization.yaml", "aws-auth-cm.k8s/overlays/template/.kubectl-version", "aws-auth-cm.k8s/overlays/template/kustomization.yaml", "k8s-master.cfn/k8s_hooks/__init__.py", "k8s-master.cfn/k8s_hooks/auth_map.py", "k8s-master.cfn/k8s_hooks/aws-auth-cm.yaml", "k8s-master.cfn/k8s_hooks/awscli.py", "k8s-master.cfn/k8s_hooks/bootstrap.py", "k8s-master.cfn/templates/k8s_iam.yaml", "k8s-master.cfn/templates/k8s_master.yaml", "k8s-master.cfn/stacks.yaml", "k8s-workers.cfn/local_lookups/__init__.py", "k8s-workers.cfn/local_lookups/bootstrap_value.py", "k8s-workers.cfn/templates/k8s_workers.yaml", "k8s-workers.cfn/stacks.yaml", "service-hello-world.k8s/base/configMap.yaml", "service-hello-world.k8s/base/deployment.yaml", "service-hello-world.k8s/base/kustomization.yaml", "service-hello-world.k8s/base/service.yaml", "service-hello-world.k8s/overlays/prod/.kubectl-version", "service-hello-world.k8s/overlays/prod/deployment.yaml", "service-hello-world.k8s/overlays/prod/kustomization.yaml", "service-hello-world.k8s/overlays/template/.kubectl-version", "service-hello-world.k8s/overlays/template/kustomization.yaml", "service-hello-world.k8s/overlays/template/map.yaml", "service-hello-world.k8s/README.md", ".gitignore", "README.md", "runway.yml", ] repo = cd_tmp_path / "k8s-cfn-infrastructure" assert repo.is_dir() for file_ in files: assert (repo / file_).is_file() assert caplog.messages == [ "Sample k8s infrastructure repo created at {}".format(str(repo)), "See the README for setup and deployment instructions.", ]
def test_get_invalid_sig( self, caplog: pytest.LogCaptureFixture, temp_event_loop: asyncio.AbstractEventLoop, ): caplog.set_level(logging.DEBUG) mock_reader = self.MockAsyncReader( b"PROXI TCP4 1.2.3.4 5.6.7.8 9 10\r\n") reslt = temp_event_loop.run_until_complete(get_proxy(mock_reader)) assert isinstance(reslt, ProxyData) assert not reslt.valid expect_msg = "PROXY unrecognized signature" assert reslt.error == expect_msg expect = ("mail.debug", 30, "PROXY error: " + expect_msg) assert expect in caplog.record_tuples
def test_run_build_steps_empty( self, caplog: LogCaptureFixture, fake_process: FakeProcess, runway_context: RunwayContext, tmp_path: Path, ) -> None: """Test run_build_steps.""" caplog.set_level(logging.INFO, logger=MODULE) obj = CloudDevelopmentKit(runway_context, module_root=tmp_path, options={"build_steps": []}) assert not obj.run_build_steps() logs = "\n".join(caplog.messages) assert "build steps (in progress)" not in logs assert "build steps (complete)" not in logs
def test_projects(caplog: LogCaptureFixture) -> None: with event_loop(): runner = CliRunner() result = runner.invoke(cli.main, ["--projects=STDIN,asdf"]) assert result.exit_code == 0 assert "1 / 1 succeeded" in result.output assert "Projects not found: {'asdf'}" in caplog.text caplog.clear() with event_loop(): runner = CliRunner() result = runner.invoke(cli.main, ["--projects=fdsa,STDIN"]) assert result.exit_code == 0 assert "1 / 1 succeeded" in result.output assert "Projects not found: {'fdsa'}" in caplog.text
def test_load( caplog: pytest.LogCaptureFixture, client: happi.client.Client, happi_cfg: str, runner: CliRunner ): item_info = "\n".join( [ "HappiItem", "happi_name", "types.SimpleNamespace", "", "name", "my_name", "", "y", "docs", "y", ] ) # add item first add_result = runner.invoke(happi_cli, ['--path', happi_cfg, 'add'], input=item_info) assert add_result.exit_code == 0 # try to load the item devices = {} devices['happi_name'] = client.load_device(name='happi_name') with mock.patch.object(IPython, 'start_ipython') as m: _ = runner.invoke( happi_cli, ['--path', happi_cfg, 'load', 'happi_name'] ) m.assert_called_once_with(argv=['--quick'], user_ns=devices) with caplog.at_level(logging.INFO): assert "Creating shell with devices" in caplog.text
def test_matrix_unused(caplog: pytest.LogCaptureFixture) -> None: """test that unused CI matrix dimensions trigger a warning""" obj = yaml_load((FIXTURES / "matrix06" / "matrix.yaml").read_text()) mtx = CIMatrix(obj, "master", False) assert not mtx.secrets assert not mtx.jobs assert any(rec.levelname == "WARNING" for rec in caplog.get_records("call")) caplog.clear() del obj["language"] mtx = CIMatrix(obj, "master", False) assert not mtx.secrets assert not mtx.jobs assert any(rec.levelname == "WARNING" for rec in caplog.get_records("call"))
def test_warn_about_depr_platform(runtime_cfg: Dict[str, Any], fail_expected: bool, expected_warning: Optional[str], caplog: pytest.LogCaptureFixture): """Test warn_about_depr_platform()""" caplog.set_level(logging.WARNING, CYLC_LOG) cfg = {'runtime': runtime_cfg} if fail_expected: with pytest.raises(PlatformLookupError): warn_about_depr_platform(cfg) else: warn_about_depr_platform(cfg) if expected_warning: assert expected_warning in caplog.text else: assert caplog.record_tuples == []
def test_extra_referrer_error(self, caplog: pytest.LogCaptureFixture) -> None: d = document.Document() assert not d.roots class FakeMod: __name__ = 'junkjunkjunk' mod = FakeMod() import sys assert 'junkjunkjunk' not in sys.modules sys.modules['junkjunkjunk'] = mod d._modules.append(mod) assert 'junkjunkjunk' in sys.modules # add an extra referrer for delete_modules to complain about extra.append(mod) import gc # get_referrers behavior changed in Python 3.7, see https://github.com/bokeh/bokeh/issues/8221 assert len(gc.get_referrers(mod)) in (3,4) with caplog.at_level(logging.ERROR): d.delete_modules() assert "Module %r has extra unexpected referrers! This could indicate a serious memory leak. Extra referrers:" % mod in caplog.text assert len(caplog.records) == 1 assert 'junkjunkjunk' not in sys.modules assert d._modules == []
async def test_check_ws_no_ws(protect_client: ProtectApiClient, caplog: pytest.LogCaptureFixture): caplog.set_level(logging.DEBUG) protect_client._last_websocket_check = time.monotonic() protect_client.reset_ws() active_ws = await protect_client.check_ws() assert active_ws is False expected_logs = [ "Unifi OS: Websocket connection not active, failing back to polling" ] assert expected_logs == [rec.message for rec in caplog.records] assert caplog.records[0].levelname == "DEBUG"
def test_should_detect_new_reload_dirs( self, caplog: pytest.LogCaptureFixture, tmp_path: Path ) -> None: app_dir = tmp_path / "app" app_file = app_dir / "file.py" app_dir.mkdir() app_file.touch() app_first_dir = tmp_path / "app_first" app_first_file = app_first_dir / "file.py" with as_cwd(tmp_path), caplog.at_level(INFO): config = Config( app="tests.test_config:asgi_app", reload=True, reload_includes=["app*"] ) reloader = self._setup_reloader(config) assert self._reload_tester(reloader, app_file) app_first_dir.mkdir() assert self._reload_tester(reloader, app_first_file) assert caplog.records[-2].levelno == INFO assert ( caplog.records[-2].message == "WatchGodReload detected a new reload " f"dir '{app_first_dir.name}' in '{tmp_path}'; Adding to watch list." ) reloader.shutdown()
def send_response_after_eof_should_fail( app: Sanic, caplog: LogCaptureFixture, message_in_records: Callable[[List[LogRecord], str], bool], ): @app.get("/") async def handler(request: Request): response = await request.respond() await response.send("foo, ") await response.eof() await response.send("bar") error_msg1 = ( "The error response will not be sent to the client for the following " 'exception:"Second respond call is not allowed.". A previous ' "response has at least partially been sent.") error_msg2 = ("Response stream was ended, no more " "response data is allowed to be sent.") with caplog.at_level(ERROR): _, response = app.test_client.get("/") assert "foo, " in response.text assert message_in_records(caplog.records, error_msg1) assert message_in_records(caplog.records, error_msg2)
def test_context_manager_magic(self, caplog: LogCaptureFixture, monkeypatch: MonkeyPatch) -> None: """Test init and the attributes it sets.""" mock_reset_all = MagicMock() caplog.set_level(logging.DEBUG, "runway.SafeHaven") monkeypatch.setattr(MODULE + ".os", MagicMock()) monkeypatch.setattr(MODULE + ".sys", MagicMock()) monkeypatch.setattr(SafeHaven, "reset_all", mock_reset_all) with SafeHaven() as result: assert isinstance(result, SafeHaven) mock_reset_all.assert_called_once() assert caplog.messages == [ "entering a safe haven...", "leaving the safe haven...", ]
def test_latest_prerelease_install_message( caplog: pytest.LogCaptureFixture, monkeypatch: pytest.MonkeyPatch) -> None: """ Test documentation for installing pre-release packages is displayed """ hits: List["TransformedHit"] = [{ "name": "ni", "summary": "For knights who say Ni!", "versions": ["1.0.0", "1.0.1a"], }] installed_package = mock.Mock(project_name="ni") monkeypatch.setattr("pip._vendor.pkg_resources.working_set", [installed_package]) get_dist = mock.Mock() get_dist.return_value = mock.Mock(version="1.0.0") monkeypatch.setattr("pip._internal.commands.search.get_distribution", get_dist) with caplog.at_level(logging.INFO): print_results(hits) message = caplog.records[-1].getMessage() assert 'pre-release; install with "pip install --pre"' in message assert get_dist.call_args_list == [mock.call("ni")]
def test_intercept(caplog: LogCaptureFixture) -> None: """Test interceptor.""" caplog.set_level(logging.INFO, logger="root") interceptor = AccessLogInterceptor( name="root", handlers=( lambda _: "this", lambda _: "that", ), propagate=True, ) interceptor.intercept(Mock(), Mock(), Mock(), "/abc/Test") assert "this that" in caplog.text
def test_set_finder_trusted_host( self, line_processor: LineProcessor, caplog: pytest.LogCaptureFixture, session: PipSession, finder: PackageFinder, ) -> None: with caplog.at_level(logging.INFO): line_processor( "--trusted-host=host1 --trusted-host=host2:8080", "file.txt", 1, finder=finder, session=session, ) assert list(finder.trusted_hosts) == ["host1", "host2:8080"] session = finder._link_collector.session assert session.adapters[ "https://host1/"] is session._trusted_host_adapter assert session.adapters[ "https://host2:8080/"] is session._trusted_host_adapter # Test the log message. actual = [(r.levelname, r.message) for r in caplog.records] expected = ("INFO", "adding trusted host: 'host1' (from line 1 of file.txt)") assert expected in actual
async def example_flow(flow: Callable, scheduler: Callable, caplog: pytest.LogCaptureFixture) -> Scheduler: """Return a scheduler for interrogating its task pool. This is function-scoped so slower than mod_example_flow; only use this when the test mutates the scheduler or task pool. """ # The run(schd) fixture doesn't work for modifying the DB, so have to # set up caplog and do schd.install()/.initialise()/.configure() instead caplog.set_level(logging.INFO, CYLC_LOG) reg = flow(EXAMPLE_FLOW_CFG) schd: Scheduler = scheduler(reg) await schd.install() await schd.initialise() await schd.configure() return schd
def test_extra_referrer_error(self, caplog: pytest.LogCaptureFixture) -> None: d = Document() dm = bdm.DocumentModuleManager(d) mod = FakeMod() assert 'FakeMod' not in sys.modules dm.add(mod) assert 'FakeMod' in sys.modules assert len(dm) == 1 # add an extra referrer for Document.destroy to complain about extra.append(mod) import gc # get_referrers behavior changed in Python 3.7, see https://github.com/bokeh/bokeh/issues/8221 assert len(gc.get_referrers(mod)) in (3,4) with caplog.at_level(logging.ERROR): dm.destroy() assert "Module %r has extra unexpected referrers! This could indicate a serious memory leak. Extra referrers:" % mod in caplog.text assert len(caplog.records) == 1 assert 'FakeMod' not in sys.modules assert len(dm) ==0
def test_unicode_decode_error(caplog: pytest.LogCaptureFixture) -> None: if locale.getpreferredencoding() != "UTF-8": pytest.skip("locale.getpreferredencoding() is not UTF-8") caplog.set_level(INFO) call_subprocess( [ sys.executable, "-c", "import sys; sys.stdout.buffer.write(b'\\xff')", ], show_stdout=True, ) assert len(caplog.records) == 2 # First log record is "Running command ..." assert caplog.record_tuples[1] == ("pip.subprocessor", INFO, "\\xff")
async def test_owserver_switch( hass: HomeAssistant, config_entry: ConfigEntry, owproxy: MagicMock, device_id: str, caplog: pytest.LogCaptureFixture, ): """Test for 1-Wire switch. This test forces all entities to be enabled. """ device_registry = mock_device_registry(hass) entity_registry = mock_registry(hass) mock_device = MOCK_OWPROXY_DEVICES[device_id] expected_entities = mock_device.get(Platform.SWITCH, []) expected_devices = ensure_list(mock_device.get(ATTR_DEVICE_INFO)) setup_owproxy_mock_devices(owproxy, Platform.SWITCH, [device_id]) with caplog.at_level(logging.WARNING, logger="homeassistant.components.onewire"): await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() if mock_device.get(ATTR_UNKNOWN_DEVICE): assert "Ignoring unknown device family/type" in caplog.text else: assert "Ignoring unknown device family/type" not in caplog.text check_device_registry(device_registry, expected_devices) assert len(entity_registry.entities) == len(expected_entities) check_and_enable_disabled_entities(entity_registry, expected_entities) setup_owproxy_mock_devices(owproxy, Platform.SWITCH, [device_id]) await hass.config_entries.async_reload(config_entry.entry_id) await hass.async_block_till_done() check_entities(hass, entity_registry, expected_entities) # Test TOGGLE service for expected_entity in expected_entities: entity_id = expected_entity[ATTR_ENTITY_ID] if expected_entity[ATTR_STATE] == STATE_ON: owproxy.return_value.read.side_effect = [b" 0"] expected_entity[ATTR_STATE] = STATE_OFF elif expected_entity[ATTR_STATE] == STATE_OFF: owproxy.return_value.read.side_effect = [b" 1"] expected_entity[ATTR_STATE] = STATE_ON await hass.services.async_call( SWITCH_DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id}, blocking=True, ) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == expected_entity[ATTR_STATE]