def scenario_fixture(monkeypatch): test_hosts = [ "ds-test-host1", "ds-test-host2", "ds-test-node1", "ds-test-node2" ] ts = Scenario() for h in test_hosts: ts.add_host(h) ts.set_option("ipaddresses", dict((h, "127.0.0.1") for h in test_hosts)) ts.add_cluster("ds-test-cluster1", nodes=["ds-test-node1", "ds-test-node2"]) ts.set_ruleset( "datasource_programs", [ ("cat %s/<HOST>" % cmk.utils.paths.tcp_cache_dir, [], test_hosts, {}), ], ) linux_agent_output = get_standard_linux_agent_output() for h in test_hosts: cache_path = Path(cmk.utils.paths.tcp_cache_dir, h) cache_path.parent.mkdir(parents=True, exist_ok=True) with cache_path.open("w", encoding="utf-8") as f: f.write(linux_agent_output) return ts.apply(monkeypatch)
def test_get_section_content(monkeypatch, hostname, nodes, host_entries, cluster_mapping, service_descr, expected_result): ts = Scenario() if nodes is None: ts.add_host(hostname) else: ts.add_cluster(hostname, nodes=nodes) for node in nodes or []: ts.add_host(node) config_cache = ts.apply(monkeypatch) def host_of_clustered_service(hostname, service_description): return cluster_mapping[hostname] multi_host_sections = MultiHostSections() for nodename, node_section_content in host_entries: multi_host_sections.add_or_get_host_sections( nodename, "127.0.0.1", AgentHostSections(sections={"check_plugin_name": node_section_content})) monkeypatch.setattr(ip_lookup, "lookup_ip_address", lambda h: "127.0.0.1") monkeypatch.setattr(config_cache, "host_of_clustered_service", host_of_clustered_service) section_content = multi_host_sections.get_section_content(hostname, "127.0.0.1", "check_plugin_name", False, service_description=service_descr) assert expected_result == section_content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, section_content)
def test_cfg(monkeypatch): test_hosts = [ "ds-test-host1", "ds-test-host2", "ds-test-node1", "ds-test-node2" ] ts = Scenario() for h in test_hosts: ts.add_host(h) ts.set_option("ipaddresses", dict((h, "127.0.0.1") for h in test_hosts)) ts.add_cluster("ds-test-cluster1", nodes=["ds-test-node1", "ds-test-node2"]) ts.set_ruleset("datasource_programs", [ ('cat %s/<HOST>' % cmk.utils.paths.tcp_cache_dir, [], test_hosts, {}), ]) with open("%s/tests/integration/cmk/base/test-files/linux-agent-output" % repo_path()) as f: linux_agent_output = f.read().decode("utf-8") for h in test_hosts: cache_path = Path(cmk.utils.paths.tcp_cache_dir, h) cache_path.parent.mkdir(parents=True, exist_ok=True) # pylint: disable=no-member with cache_path.open("w", encoding="utf-8") as f: f.write(linux_agent_output) return ts.apply(monkeypatch)
def test_create_nagios_host_spec(hostname, result, monkeypatch): ts = Scenario().add_host("localhost") ts.add_host("host2") ts.add_cluster("cluster1") ts.add_cluster("cluster2", nodes=["node1", "node2"]) ts.add_host("node1") ts.add_host("node2") ts.add_host("switch") ts.set_option("ipaddresses", { "node1": "127.0.0.1", "node2": "127.0.0.2", }) ts.set_option("extra_host_conf", { "alias": [(u'lOCALhost', ['localhost']),], }) ts.set_option( "extra_host_conf", { "alias": [ (u'lOCALhost', ['host2']), (u'CLUSTer', ['cluster2']), ], "parents": [('switch', ['node1', 'node2']),], }) outfile = StringIO() cfg = core_nagios.NagiosConfig(outfile, [hostname]) config_cache = ts.apply(monkeypatch) host_attrs = core_config.get_host_attributes(hostname, config_cache) host_spec = core_nagios._create_nagios_host_spec(cfg, config_cache, hostname, host_attrs) assert host_spec == result
def test_get_check_table(monkeypatch, hostname, expected_result): autochecks = { "ping-host": [Service("smart.temp", "bla", u'Temperature SMART bla', {})], "autocheck-overwrite": [ Service('smart.temp', '/dev/sda', u'Temperature SMART /dev/sda', {"is_autocheck": True}), Service('smart.temp', '/dev/sdb', u'Temperature SMART /dev/sdb', {"is_autocheck": True}), ], "ignore-not-existing-checks": [ Service("bla.blub", "ITEM", u'Blub ITEM', {}), ], "node1": [ Service("smart.temp", "auto-clustered", u"Temperature SMART auto-clustered", {}), Service("smart.temp", "auto-not-clustered", u'Temperature SMART auto-not-clustered', {}) ], } ts = Scenario().add_host(hostname, tags={"criticality": "test"}) ts.add_host("ping-host", tags={"agent": "no-agent"}) ts.add_host("node1") ts.add_cluster("cluster1", nodes=["node1"]) ts.set_option( "static_checks", { "temperature": [ (('smart.temp', '/dev/sda', {}), [], ["no-autochecks", "autocheck-overwrite"]), (('blub.bla', 'ITEM', {}), [], ["ignore-not-existing-checks"]), (('smart.temp', 'ITEM1', {}), [], ["ignore-disabled-rules"], { "disabled": True }), (('smart.temp', 'ITEM2', {}), [], ["ignore-disabled-rules"]), (('smart.temp', '/dev/sda', { "rule": 1 }), [], ["static-check-overwrite"]), (('smart.temp', '/dev/sda', { "rule": 2 }), [], ["static-check-overwrite"]), (('smart.temp', 'static-node1', {}), [], ["node1"]), (('smart.temp', 'static-cluster', {}), [], ["cluster1"]), ] }, ) ts.set_ruleset("clustered_services", [ ([], ['node1'], [u'Temperature SMART auto-clustered$']), ]) config_cache = ts.apply(monkeypatch) monkeypatch.setattr(config_cache, "get_autochecks_of", lambda h: autochecks.get(h, [])) CheckManager().load(["smart"]) assert check_table.get_check_table(hostname) == expected_result
def test_all_active_hosts(monkeypatch): ts = Scenario(site_id="site1") ts.add_host("real1", tags={"site": "site1"}) ts.add_host("real2", tags={"site": "site2"}) ts.add_host("real3") ts.add_cluster("cluster1", {"site": "site1"}, nodes=["node1"]) ts.add_cluster("cluster2", {"site": "site2"}, nodes=["node2"]) ts.add_cluster("cluster3", nodes=["node3"]) config_cache = ts.apply(monkeypatch) assert config_cache.all_active_clusters() == {"cluster1", "cluster3"} assert config_cache.all_active_realhosts() == {"real1", "real3"} assert config_cache.all_active_hosts() == {"cluster1", "cluster3", "real1", "real3"}
def scenario_fixture(monkeypatch): test_hosts = ["ds-test-host1", "ds-test-host2", "ds-test-node1", "ds-test-node2"] ts = Scenario() if is_enterprise_repo(): ts.set_option("monitoring_core", "cmc") else: ts.set_option("monitoring_core", "nagios") for h in test_hosts: ts.add_host(h) ts.set_option("ipaddresses", dict((h, "127.0.0.1") for h in test_hosts)) ts.add_cluster("ds-test-cluster1", nodes=["ds-test-node1", "ds-test-node2"]) ts.fake_standard_linux_agent_output(*test_hosts) return ts.apply(monkeypatch)
def _set_up(monkeypatch, hostname, nodes, cluster_mapping): test_scen = Scenario() if nodes is None: test_scen.add_host(hostname) else: test_scen.add_cluster(hostname, nodes=nodes) for node in nodes or []: test_scen.add_host(node) config_cache = test_scen.apply(monkeypatch) def host_of_clustered_service(hostname, _service_description): return cluster_mapping[hostname] monkeypatch.setattr(ip_lookup, "lookup_ip_address", lambda h: "127.0.0.1") monkeypatch.setattr(config_cache, "host_of_clustered_service", host_of_clustered_service) monkeypatch.setattr(config, "get_registered_section_plugin", MOCK_SECTIONS.get)
def _set_up(monkeypatch, hostname, nodes, cluster_mapping) -> None: test_scen = Scenario() if nodes is None: test_scen.add_host(hostname) else: test_scen.add_cluster(hostname, nodes=nodes) for node in nodes or []: test_scen.add_host(node) config_cache = test_scen.apply(monkeypatch) def host_of_clustered_service(hostname, _service_description): return cluster_mapping[hostname] def fake_lookup_ip_address(hostname, family=None, for_mgmt_board=False): # pylint: disable=unused-argument return "127.0.0.1" monkeypatch.setattr(ip_lookup, "lookup_ip_address", fake_lookup_ip_address) monkeypatch.setattr(config_cache, "host_of_clustered_service", host_of_clustered_service) monkeypatch.setattr(agent_based_register._config, "get_section_plugin", MOCK_SECTIONS.__getitem__)
def _cluster_scenario(monkeypatch): hostname = "test-clusterhost" ipaddress = "1.2.3.4" node1_hostname = 'test-node1' node2_hostname = 'test-node2' def fake_lookup_ip_address(*_a, **_kw): return ipaddress monkeypatch.setattr(ip_lookup, "lookup_ip_address", fake_lookup_ip_address) ts = Scenario() ts.add_host(node1_hostname) ts.add_host(node2_hostname) ts.add_cluster(hostname, nodes=[node1_hostname, node2_hostname]) ts.set_ruleset("inventory_df_rules", [{ 'value': { 'ignore_fs_types': ['tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660'], 'never_ignore_mountpoints': ['~.*/omd/sites/[^/]+/tmp$'] }, 'condition': { 'host_labels': { 'cmk/check_mk_server': 'yes' } } }]) ts.set_ruleset("clustered_services", [([], [node1_hostname], ['fs_'])]) host_config = ts.apply(monkeypatch).get_host_config(hostname) DiscoveredHostLabelsStore(node1_hostname).save({ 'node1_existing_label': { 'plugin_name': 'node1_plugin', 'value': 'true', } }) DiscoveredHostLabelsStore(hostname).save({ 'existing_label': { 'plugin_name': 'foo', 'value': 'bar', }, 'another_label': { 'plugin_name': 'labels', 'value': 'true', } }) broker = ParsedSectionsBroker({ HostKey(hostname=node1_hostname, ipaddress=ipaddress, source_type=SourceType.HOST): SectionsParser(host_sections=AgentHostSections(sections={ SectionName("labels"): [ [ '{"cmk/check_mk_server":"yes"}', ], ], SectionName("df"): [ [ '/dev/sda1', 'vfat', '523248', '3668', '519580', '1%', '/boot/test-efi', ], [ 'tmpfs', 'tmpfs', '8152916', '244', '8152672', '1%', '/opt/omd/sites/test-heute/tmp', ], ], }, ), ), HostKey(hostname=node2_hostname, ipaddress=ipaddress, source_type=SourceType.HOST): SectionsParser(host_sections=AgentHostSections(sections={ SectionName("labels"): [ [ '{"node2_live_label":"true"}', ], ], SectionName("df"): [ [ '/dev/sda1', 'vfat', '523248', '3668', '519580', '1%', '/boot/test-efi', ], [ 'tmpfs', 'tmpfs', '8152916', '244', '8152672', '1%', '/opt/omd/sites/test-heute2/tmp', ], ], }, ), ), }) return ClusterScenario( host_config, ipaddress, broker, node1_hostname, node2_hostname, )
def cluster_config(monkeypatch): ts = Scenario().add_host("node1").add_host("host1") ts.add_cluster("cluster1", nodes=["node1"]) return ts.apply(monkeypatch)