def stats_length(self): return len(ResultStats(Snapshot.null()))
def duration(self): return Snapshot.null()
def _run_fetchers_from_file( config_path: ConfigPath, host_name: HostName, timeout: int, mode: Mode, ) -> None: """Writes to the stdio next data: Count Answer Content Action ----- ------ ------- ------ 1 Result Fetcher Blob Send to the checker 0..n Log Message to be logged Log 1 End of reply empty End IO """ messages: List[protocol.FetcherMessage] = [] with CPUTracker() as cpu_tracker, Timeout( timeout, message= f'Fetcher for host "{host_name}" timed out after {timeout} seconds', ) as timeout_manager: fetchers = tuple(_parse_config(config_path, host_name)) try: # fill as many messages as possible before timeout exception raised for fetcher in fetchers: messages.append(_run_fetcher(fetcher, mode)) except MKTimeout as exc: # fill missing entries with timeout errors messages.extend( protocol.FetcherMessage.timeout( FetcherType.from_fetcher(fetcher), exc, Snapshot.null(), ) for fetcher in fetchers[len(messages):]) if timeout_manager.signaled: messages = _replace_netsnmp_obfuscated_timeout(messages, timeout_manager.message) logger.debug("Produced %d messages", len(messages)) write_bytes( bytes( protocol.CMCMessage.result_answer( messages, timeout, cpu_tracker.duration, ))) for msg in filter( lambda msg: msg.header.payload_type is protocol.PayloadType.ERROR, messages, ): logger.log( msg.header.status, "Error in %s fetcher: %r", msg.header.fetcher_type.name, msg.raw_data.error, ) logger.debug("".join( traceback.format_exception( msg.raw_data.error.__class__, msg.raw_data.error, msg.raw_data.error.__traceback__, )))
def test_get_host_sections_cluster(monkeypatch, mocker): hostname = HostName("testhost") hosts = { HostName("host0"): "10.0.0.0", HostName("host1"): "10.0.0.1", HostName("host2"): "10.0.0.2", } address = "1.2.3.4" tags = {"agent": "no-agent"} section_name = SectionName("test_section") config_cache = make_scenario(hostname, tags).apply(monkeypatch) host_config = config.HostConfig.make_host_config(hostname) def fake_lookup_ip_address(host_config, family=None): return hosts[host_config.hostname] def check(_, *args, **kwargs): return result.OK( AgentHostSections(sections={section_name: [[str(section_name)]]})) monkeypatch.setattr( config, "lookup_ip_address", fake_lookup_ip_address, ) monkeypatch.setattr( Source, "parse", check, ) mocker.patch.object( cmk.utils.piggyback, "remove_source_status_file", autospec=True, ) mocker.patch.object( cmk.utils.piggyback, "_store_status_file_of", autospec=True, ) # Create a cluster host_config.nodes = list(hosts.keys()) nodes = make_nodes( config_cache, host_config, address, sources=make_sources(host_config, address), ) host_sections = _collect_host_sections( nodes=nodes, file_cache_max_age=host_config.max_cachefile_age, fetcher_messages=[ FetcherMessage.from_raw_data( result.OK(source.default_raw_data), Snapshot.null(), source.fetcher_type, ) for _h, _i, sources in nodes for source in sources ], selected_sections=NO_SELECTION, )[0] assert len(host_sections) == len(hosts) == 3 cmk.utils.piggyback._store_status_file_of.assert_not_called( ) # type: ignore[attr-defined] assert cmk.utils.piggyback.remove_source_status_file.call_count == 3 # type: ignore[attr-defined] for host, addr in hosts.items(): remove_source_status_file = cmk.utils.piggyback.remove_source_status_file remove_source_status_file.assert_any_call( host) # type: ignore[attr-defined] key = HostKey(host, addr, SourceType.HOST) assert key in host_sections section = host_sections[key] assert len(section.sections) == 1 assert next(iter(section.sections)) == section_name assert not section.cache_info assert not section.piggybacked_raw_data
def l3stats(self): return ResultStats(Snapshot.null())