def test__perform_host_label_discovery_on_cluster(cluster_scenario, discovery_test_case): scenario = cluster_scenario discovery_parameters = discovery_test_case.parameters with cmk_debug_enabled(): host_label_result = discovery._host_labels.analyse_cluster_host_labels( host_config=scenario.host_config, ipaddress=scenario.ipaddress, parsed_sections_broker=scenario.parsed_sections_broker, discovery_parameters=discovery_parameters, ) assert (host_label_result.vanished == discovery_test_case.on_cluster.expected_vanished_host_labels) assert host_label_result.old == discovery_test_case.on_cluster.expected_old_host_labels assert host_label_result.new == discovery_test_case.on_cluster.expected_new_host_labels assert DiscoveredHostLabelsStore(scenario.host_config.hostname).load( ) == discovery_test_case.on_cluster.expected_stored_labels_cluster assert (DiscoveredHostLabelsStore(scenario.node1_hostname).load() == discovery_test_case.on_cluster.expected_stored_labels_node1) assert (DiscoveredHostLabelsStore(scenario.node2_hostname).load() == discovery_test_case.on_cluster.expected_stored_labels_node2)
def test_discovered_host_labels_store_save(labels, discovered_host_labels_dir): store = DiscoveredHostLabelsStore("host") labels["xyz"] = "äbc" label_dict = labels.to_dict() assert not store.file_path.exists() # pylint: disable=no-member store.save(label_dict) assert store.file_path.exists() # pylint: disable=no-member assert store.load() == label_dict
def test_discovered_host_labels_store_save(discovered_host_labels_dir): store = DiscoveredHostLabelsStore("host") labels = DiscoveredHostLabels(HostLabel(u"xyz", u"äbc")) label_dict = labels.to_dict() assert not store.file_path.exists() store.save(label_dict) assert store.file_path.exists() assert store.load() == label_dict
def test_discovered_host_labels_store_save(discovered_host_labels_dir): store = DiscoveredHostLabelsStore(HostName("host")) labels = DiscoveredHostLabels( HostLabel("xyz", "äbc", SectionName("sectionname"))) label_dict = labels.to_dict() assert not store.file_path.exists() store.save(label_dict) assert store.file_path.exists() assert store.load() == label_dict
def _load_existing_host_labels( *, host_name: HostName, discovery_parameters: DiscoveryParameters, ) -> Sequence[HostLabel]: # Take over old items if -I is selected if not discovery_parameters.load_labels: return [] raw_label_dict = DiscoveredHostLabelsStore(host_name).load() return [ HostLabel.from_dict(name, value) for name, value in raw_label_dict.items() ]
def test_discovered_host_labels_path(discovered_host_labels_dir): hostname = "test.host.de" config.get_config_cache().initialize() assert not (discovered_host_labels_dir / hostname).exists() DiscoveredHostLabelsStore(hostname).save( DiscoveredHostLabels(HostLabel("foo", "1.5")).to_dict()) assert (discovered_host_labels_dir / (hostname + ".mk")).exists()
def test_discovered_host_labels_store_save( discovered_host_labels_dir: Path) -> None: store = DiscoveredHostLabelsStore(HostName("host")) label_dict: Dict[ str, HostLabelValueDict] = { # save below expects Dict[Any, Any] :-| "xyz": { "value": "äbc", "plugin_name": "sectionname" } } assert not store.file_path.exists() store.save(label_dict) assert store.file_path.exists() assert store.load() == label_dict
def test_do_discovery(monkeypatch): ts = Scenario().add_host("test-host", ipaddress="127.0.0.1") ts.fake_standard_linux_agent_output("test-host") ts.apply(monkeypatch) with cmk_debug_enabled(): discovery.do_discovery( arg_hostnames={"test-host"}, selected_sections=NO_SELECTION, run_only_plugin_names=None, arg_only_new=False, ) services = autochecks.parse_autochecks_file("test-host", config.service_description) found = {(s.check_plugin_name, s.item): s.service_labels.to_dict() for s in services} assert found == _expected_services store = DiscoveredHostLabelsStore("test-host") assert store.load() == _expected_host_labels
def test_discovered_host_labels_path(discovered_host_labels_dir: Path) -> None: hostname = "test.host.de" config.get_config_cache().initialize() assert not (discovered_host_labels_dir / hostname).exists() DiscoveredHostLabelsStore(HostName(hostname)).save( {"something": { "value": "wonderful", "plugin_name": "norris", }}) assert (discovered_host_labels_dir / (hostname + ".mk")).exists()
def _analyse_host_labels( *, host_name: HostName, discovered_host_labels: Sequence[HostLabel], existing_host_labels: Sequence[HostLabel], discovery_parameters: DiscoveryParameters, ) -> QualifiedDiscovery[HostLabel]: section.section_step("Analyse discovered host labels") host_labels = QualifiedDiscovery( preexisting=existing_host_labels, current=discovered_host_labels, key=lambda hl: hl.label, ) if discovery_parameters.save_labels: DiscoveredHostLabelsStore(host_name).save({ # TODO (mo): Im not sure this is desired. If it is, it should be explained. **{l.name: l.to_dict() for l in host_labels.vanished}, **{l.name: l.to_dict() for l in host_labels.present}, }) if host_labels.new: # Some check plugins like 'df' may discover services based on host labels. # A rule may look like: # [{ # 'value': { # 'ignore_fs_types': ['tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660'], # 'never_ignore_mountpoints': ['~.*/omd/sites/[^/]+/tmp$'] # }, # 'condition': { # 'host_labels': { # 'cmk/check_mk_server': 'yes' # } # } # }] # In the first step '_discover_host_labels' the ruleset optimizer caches the # result of the evaluation of these rules. Contemporary we may find new host # labels which are not yet taken into account by the ruleset optimizer. # In the next step '_discover_services' we want to discover new services # based on these new host labels but we only got the cached result. # If we found new host labels, we have to evaluate these rules again in order # to find new services, eg. in 'inventory_df'. Thus we have to clear these caches. config.get_config_cache( ).ruleset_matcher.ruleset_optimizer.clear_caches() return host_labels
def test_perform_host_label_discovery(discovered_host_labels_dir, existing_labels, new_labels, expected_labels, load_labels): hostname = "testhost" config.get_config_cache().initialize() store = DiscoveredHostLabelsStore(hostname) store.save( DiscoveredHostLabels(*[HostLabel(*x) for x in existing_labels]).to_dict()) discovery_parameters = DiscoveryParameters( on_error="raise", load_labels=load_labels, save_labels=False, ) new_host_labels, _host_labels_per_plugin = _perform_host_label_discovery( hostname, DiscoveredHostLabels(*[HostLabel(*x) for x in new_labels]), discovery_parameters) labels_expected = DiscoveredHostLabels( *[HostLabel(*x) for x in expected_labels]) assert new_host_labels.to_dict() == labels_expected.to_dict()
def test__perform_host_label_discovery_on_realhost(realhost_scenario, discovery_test_case): scenario = realhost_scenario discovery_parameters = discovery_test_case.parameters with cmk_debug_enabled(): host_label_result = discovery.analyse_host_labels( host_name=scenario.hostname, ipaddress=scenario.ipaddress, parsed_sections_broker=scenario.parsed_sections_broker, discovery_parameters=discovery_parameters, ) assert host_label_result.vanished == discovery_test_case.on_realhost.expected_vanished_host_labels assert host_label_result.old == discovery_test_case.on_realhost.expected_old_host_labels assert host_label_result.new == discovery_test_case.on_realhost.expected_new_host_labels assert DiscoveredHostLabelsStore( scenario.hostname).load() == discovery_test_case.on_realhost.expected_stored_labels
def _do_inv_for(sources, multi_host_sections, host_config, ipaddress, do_status_data_inv): # type: (data_sources.DataSources, data_sources.MultiHostSections, config.HostConfig, Optional[str], bool) -> Tuple[Optional[float], StructuredDataTree, StructuredDataTree, DiscoveredHostLabels] hostname = host_config.hostname _initialize_inventory_tree() inventory_tree = g_inv_tree status_data_tree = StructuredDataTree() discovered_host_labels = DiscoveredHostLabels(inventory_tree) node = inventory_tree.get_dict("software.applications.check_mk.cluster.") if host_config.is_cluster: node["is_cluster"] = True _do_inv_for_cluster(host_config, inventory_tree) else: node["is_cluster"] = False _do_inv_for_realhost(host_config, sources, multi_host_sections, hostname, ipaddress, inventory_tree, status_data_tree, discovered_host_labels) inventory_tree.normalize_nodes() old_timestamp = _save_inventory_tree(hostname, inventory_tree) _run_inventory_export_hooks(host_config, inventory_tree) success_msg = [ "Found %s%s%d%s inventory entries" % (tty.bold, tty.yellow, inventory_tree.count_entries(), tty.normal) ] if host_config.do_host_label_discovery: DiscoveredHostLabelsStore(hostname).save(discovered_host_labels.to_dict()) success_msg.append("and %s%s%d%s host labels" % (tty.bold, tty.yellow, len(discovered_host_labels), tty.normal)) console.section_success(", ".join(success_msg)) if do_status_data_inv: status_data_tree.normalize_nodes() _save_status_data_tree(hostname, status_data_tree) console.section_success( "Found %s%s%d%s status entries" % (tty.bold, tty.yellow, status_data_tree.count_entries(), tty.normal)) return old_timestamp, inventory_tree, status_data_tree, discovered_host_labels
def test_discovered_host_labels_store_file_path(discovered_host_labels_dir): assert DiscoveredHostLabelsStore("host").file_path == discovered_host_labels_dir / "host.mk"
def test_discovered_host_labels_store_file_path(discovered_host_labels_dir: Path) -> None: assert ( DiscoveredHostLabelsStore(HostName("host")).file_path == discovered_host_labels_dir / "host.mk" )
def _discovered_labels_of_host(hostname: HostName) -> Labels: return { label_id: label["value"] for label_id, label in DiscoveredHostLabelsStore( hostname).load().items() }
def test_discovered_host_labels_store_load_default(discovered_host_labels_dir): store = DiscoveredHostLabelsStore("host") assert not store.file_path.exists() assert store.load() == {}
def _cluster_scenario(monkeypatch): hostname = "test-clusterhost" ipaddress = "1.2.3.4" node1_hostname = 'test-node1' node2_hostname = 'test-node2' def fake_lookup_ip_address(*_a, **_kw): return ipaddress monkeypatch.setattr(ip_lookup, "lookup_ip_address", fake_lookup_ip_address) ts = Scenario() ts.add_host(node1_hostname) ts.add_host(node2_hostname) ts.add_cluster(hostname, nodes=[node1_hostname, node2_hostname]) ts.set_ruleset("inventory_df_rules", [{ 'value': { 'ignore_fs_types': ['tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660'], 'never_ignore_mountpoints': ['~.*/omd/sites/[^/]+/tmp$'] }, 'condition': { 'host_labels': { 'cmk/check_mk_server': 'yes' } } }]) ts.set_ruleset("clustered_services", [([], [node1_hostname], ['fs_'])]) host_config = ts.apply(monkeypatch).get_host_config(hostname) DiscoveredHostLabelsStore(node1_hostname).save({ 'node1_existing_label': { 'plugin_name': 'node1_plugin', 'value': 'true', } }) DiscoveredHostLabelsStore(hostname).save({ 'existing_label': { 'plugin_name': 'foo', 'value': 'bar', }, 'another_label': { 'plugin_name': 'labels', 'value': 'true', } }) broker = ParsedSectionsBroker({ HostKey(hostname=node1_hostname, ipaddress=ipaddress, source_type=SourceType.HOST): SectionsParser(host_sections=AgentHostSections(sections={ SectionName("labels"): [ [ '{"cmk/check_mk_server":"yes"}', ], ], SectionName("df"): [ [ '/dev/sda1', 'vfat', '523248', '3668', '519580', '1%', '/boot/test-efi', ], [ 'tmpfs', 'tmpfs', '8152916', '244', '8152672', '1%', '/opt/omd/sites/test-heute/tmp', ], ], }, ), ), HostKey(hostname=node2_hostname, ipaddress=ipaddress, source_type=SourceType.HOST): SectionsParser(host_sections=AgentHostSections(sections={ SectionName("labels"): [ [ '{"node2_live_label":"true"}', ], ], SectionName("df"): [ [ '/dev/sda1', 'vfat', '523248', '3668', '519580', '1%', '/boot/test-efi', ], [ 'tmpfs', 'tmpfs', '8152916', '244', '8152672', '1%', '/opt/omd/sites/test-heute2/tmp', ], ], }, ), ), }) return ClusterScenario( host_config, ipaddress, broker, node1_hostname, node2_hostname, )
def _load_existing_host_labels(host_name: HostName) -> Sequence[HostLabel]: raw_label_dict = DiscoveredHostLabelsStore(host_name).load() return [HostLabel.from_dict(name, value) for name, value in raw_label_dict.items()]
def _realhost_scenario(monkeypatch): hostname = "test-realhost" ipaddress = "1.2.3.4" ts = Scenario().add_host(hostname, ipaddress=ipaddress) ts.set_ruleset("inventory_df_rules", [{ 'value': { 'ignore_fs_types': ['tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660'], 'never_ignore_mountpoints': ['~.*/omd/sites/[^/]+/tmp$'] }, 'condition': { 'host_labels': { 'cmk/check_mk_server': 'yes' } } }]) ts.apply(monkeypatch) def fake_lookup_ip_address(*_a, **_kw): return ipaddress monkeypatch.setattr(ip_lookup, "lookup_ip_address", fake_lookup_ip_address) DiscoveredHostLabelsStore(hostname).save({ 'existing_label': { 'plugin_name': 'foo', 'value': 'bar', }, 'another_label': { 'plugin_name': 'labels', 'value': 'true', } }) broker = ParsedSectionsBroker({ HostKey(hostname=hostname, ipaddress=ipaddress, source_type=SourceType.HOST): AgentHostSections( sections={ SectionName("labels"): [ [ '{"cmk/check_mk_server":"yes"}', ], ], SectionName("df"): [ [ '/dev/sda1', 'vfat', '523248', '3668', '519580', '1%', '/boot/test-efi', ], [ 'tmpfs', 'tmpfs', '8152916', '244', '8152672', '1%', '/opt/omd/sites/test-heute/tmp', ], ], }) }) return RealHostScenario(hostname, ipaddress, broker)