def generate(self) -> None: save_global_settings(self._initial_global_settings()) # A contact group for all hosts and services groups: AllGroupSpecs = { "contact": { GroupName("all"): { "alias": "Everything" } }, } save_group_information(groups) self._initialize_tag_config() # Rules that match the upper host tag definition ruleset_config = { # Make the tag 'offline' remove hosts from the monitoring "only_hosts": [ { "id": "10843c55-11ea-4eb2-bfbc-bce65cd2ae22", "condition": { "host_tags": { "criticality": { "$ne": "offline" } } }, "value": True, "options": { "description": 'Do not monitor hosts with the tag "offline"' }, }, ], # Rule for WAN hosts with adapted PING levels "ping_levels": [ { "id": "0365b634-30bf-40a3-8516-08e86051508e", "condition": { "host_tags": { "networking": "wan", } }, "value": { "loss": (80.0, 100.0), "packets": 6, "timeout": 20, "rta": (1500.0, 3000.0), }, "options": { "description": "Allow longer round trip times when pinging WAN hosts" }, }, ], # All hosts should use SNMP v2c if not specially tagged "bulkwalk_hosts": [ { "id": "b92a5406-1d57-4f1d-953d-225b111239e5", "condition": { "host_tags": { "snmp": "snmp", "snmp_ds": { "$ne": "snmp-v1" }, }, }, "value": True, "options": { "description": 'Hosts with the tag "snmp-v1" must not use bulkwalk' }, }, ], # All SNMP managment boards should use SNMP v2c if not specially tagged "management_bulkwalk_hosts": [ { "id": "59d84cde-ee3a-4f8d-8bec-fce35a2b0d15", "condition": {}, "value": True, "options": { "description": "All management boards use SNMP v2 (incl. bulk walks) by default" }, }, ], # Put all hosts and the contact group 'all' "host_contactgroups": [ { "id": "efd67dab-68f8-4d3c-a417-9f7e29ab48d5", "condition": {}, "value": "all", "options": { "description": 'Put all hosts into the contact group "all"' }, }, ], # Docker container specific host check commands "host_check_commands": [ { "id": "24da4ccd-0d1b-40e3-af87-0097df8668f2", "condition": { "host_labels": { "cmk/docker_object": "container" } }, "value": ("service", "Docker container status"), "options": { "description": 'Make all docker container host states base on the "Docker container status" service', }, }, ], # Enable HW/SW inventory + status data inventory for docker # containers, kubernetes objects and Check-MK servers by default to # simplify the setup procedure for them "active_checks": { "cmk_inv": [ { "id": "7ba2ac2a-5a49-47ce-bc3c-1630fb191c7f", "condition": { "host_labels": { "cmk/docker_object": "node", } }, "value": { "status_data_inventory": True }, }, { "id": "b4b151f9-c7cc-4127-87a6-9539931fcd73", "condition": { "host_labels": { "cmk/check_mk_server": "yes", } }, "value": { "status_data_inventory": True }, }, { "id": "2527cb37-e9da-4a15-a7d9-80825a7f6661", "condition": { "host_labels": { "cmk/kubernetes": "yes", } }, "value": { "status_data_inventory": True }, }, ] }, # Interval for HW/SW-Inventory check "extra_service_conf": { "check_interval": [ { "id": "b3847203-84b3-4f5b-ac67-0f06d4403905", "condition": { "service_description": [{ "$regex": "Check_MK HW/SW Inventory$" }] }, "value": 1440, "options": { "description": "Restrict HW/SW-Inventory to once a day" }, }, ], }, # Disable unreachable notifications by default "extra_host_conf": { "notification_options": [ { "id": "814bf932-6341-4f96-983d-283525b5416d", "condition": {}, "value": "d,r,f,s", }, ], }, # Periodic service discovery "periodic_discovery": [ { "id": "95a56ffc-f17e-44e7-a162-be656f19bedf", "condition": {}, "value": { "severity_unmonitored": 1, "severity_vanished": 0, "check_interval": 120.0, }, "options": { "description": "Perform every two hours a service discovery" }, }, ], # Include monitoring of checkmk's tmpfs "inventory_df_rules": [ { "id": "b0ee8a51-703c-47e4-aec4-76430281604d", "condition": { "host_labels": { "cmk/check_mk_server": "yes", }, }, "value": { "ignore_fs_types": ["tmpfs", "nfs", "smbfs", "cifs", "iso9660"], "never_ignore_mountpoints": ["~.*/omd/sites/[^/]+/tmp$"], }, }, ], } rulesets = FolderRulesets(Folder.root_folder()) rulesets.load() rulesets.from_config(Folder.root_folder(), ruleset_config) rulesets.save() notification_rules = [ { "allow_disable": True, "contact_all": False, "contact_all_with_email": False, "contact_object": True, "description": "Notify all contacts of a host/service via HTML email", "disabled": False, "notify_plugin": ("mail", {}), }, ] save_notification_rules(notification_rules)
def perform_rename_hosts(renamings, job_interface=None): """Rename hosts mechanism Args: renamings: tuple consisting of folder, oldname, newname job_interface: only relevant for WATO interaction, allows to update the interface with the current update info """ def update_interface(message): if job_interface is None: return job_interface.send_progress_update(_(message)) actions = [] all_hosts = Host.all() # 1. Fix WATO configuration itself ---------------- auth_problems = [] successful_renamings = [] update_interface("Renaming WATO configuration...") for folder, oldname, newname in renamings: try: this_host_actions = [] update_interface("Renaming host(s) in folders...") this_host_actions += _rename_host_in_folder( folder, oldname, newname) update_interface("Renaming host(s) in cluster nodes...") this_host_actions += _rename_host_as_cluster_node( all_hosts, oldname, newname) update_interface("Renaming host(s) in parents...") this_host_actions += _rename_host_in_parents(oldname, newname) update_interface("Renaming host(s) in rulesets...") this_host_actions += _rename_host_in_rulesets( folder, oldname, newname) update_interface("Renaming host(s) in BI aggregations...") this_host_actions += _rename_host_in_bi(oldname, newname) actions += this_host_actions successful_renamings.append((folder, oldname, newname)) except MKAuthException as e: auth_problems.append((oldname, e)) # 2. Checkmk stuff ------------------------------------------------ update_interface( "Renaming host(s) in base configuration, rrd, history files, etc.") update_interface( "This might take some time and involves a core restart...") action_counts = _rename_hosts_in_check_mk(successful_renamings) # 3. Notification settings ---------------------------------------------- # Notification rules - both global and users' ones update_interface("Renaming host(s) in notification rules...") for folder, oldname, newname in successful_renamings: actions += _rename_host_in_event_rules(oldname, newname) actions += _rename_host_in_multisite(oldname, newname) for action in actions: action_counts.setdefault(action, 0) action_counts[action] += 1 update_interface("Calling final hooks") call_hook_hosts_changed(Folder.root_folder()) return action_counts, auth_problems
def _update_host_custom_attrs(): config.load_config() Folder.invalidate_caches() Folder.root_folder().rewrite_hosts_files()
def generate(self): save_global_settings({ "use_new_descriptions_for": [ "df", "df_netapp", "df_netapp32", "esx_vsphere_datastores", "hr_fs", "vms_diskstat.df", "zfsget", "ps", "ps.perf", "wmic_process", "services", "logwatch", "logwatch.groups", "cmk-inventory", "hyperv_vms", "ibm_svc_mdiskgrp", "ibm_svc_system", "ibm_svc_systemstats.diskio", "ibm_svc_systemstats.iops", "ibm_svc_systemstats.disk_latency", "ibm_svc_systemstats.cache", "casa_cpu_temp", "cmciii.temp", "cmciii.psm_current", "cmciii_lcp_airin", "cmciii_lcp_airout", "cmciii_lcp_water", "etherbox.temp", "liebert_bat_temp", "nvidia.temp", "ups_bat_temp", "innovaphone_temp", "enterasys_temp", "raritan_emx", "raritan_pdu_inlet", "mknotifyd", "mknotifyd.connection", "postfix_mailq", "nullmailer_mailq", "barracuda_mailqueues", "qmail_stats", "http", "mssql_backup", "mssql_counters.cache_hits", "mssql_counters.transactions", "mssql_counters.locks", "mssql_counters.sqlstats", "mssql_counters.pageactivity", "mssql_counters.locks_per_batch", "mssql_counters.file_sizes", "mssql_databases", "mssql_datafiles", "mssql_tablespaces", "mssql_transactionlogs", "mssql_versions", ], "enable_rulebased_notifications": True, "ui_theme": "facelift", "lock_on_logon_failures": 10, }) # A contact group for all hosts and services groups = { "contact": { 'all': { 'alias': u'Everything' } }, } save_group_information(groups) self._initialize_tag_config() # Rules that match the upper host tag definition ruleset_config = { # Make the tag 'offline' remove hosts from the monitoring 'only_hosts': [ { 'condition': { 'host_tags': { 'criticality': { '$ne': 'offline' } } }, 'value': True, 'options': { 'description': u'Do not monitor hosts with the tag "offline"' }, }, ], # Rule for WAN hosts with adapted PING levels 'ping_levels': [ { 'condition': { 'host_tags': { 'networking': 'wan', } }, 'value': { 'loss': (80.0, 100.0), 'packets': 6, 'timeout': 20, 'rta': (1500.0, 3000.0) }, 'options': { 'description': u'Allow longer round trip times when pinging WAN hosts' }, }, ], # All hosts should use SNMP v2c if not specially tagged 'bulkwalk_hosts': [ { 'condition': { 'host_tags': { 'snmp': 'snmp', 'snmp_ds': { '$ne': 'snmp-v1' }, }, }, 'value': True, 'options': { 'description': u'Hosts with the tag "snmp-v1" must not use bulkwalk' }, }, ], # Put all hosts and the contact group 'all' 'host_contactgroups': [ { 'condition': {}, 'value': 'all', 'options': { 'description': u'Put all hosts into the contact group "all"' }, }, ], # Interval for HW/SW-Inventory check 'extra_service_conf': { 'check_interval': [ { 'condition': { 'service_description': [{ '$regex': 'Check_MK HW/SW Inventory$' }] }, 'value': 1440, 'options': { 'description': u'Restrict HW/SW-Inventory to once a day' }, }, ], }, # Disable unreachable notifications by default 'extra_host_conf': { 'notification_options': [ { 'condition': {}, 'value': 'd,r,f,s' }, ], }, # Periodic service discovery 'periodic_discovery': [ { 'condition': {}, 'value': { 'severity_unmonitored': 1, 'severity_vanished': 0, 'check_interval': 120.0, 'inventory_check_do_scan': True }, 'options': { 'description': u'Perform every two hours a service discovery' }, }, ], } rulesets = FolderRulesets(Folder.root_folder()) rulesets.load() rulesets.from_config(Folder.root_folder(), ruleset_config) rulesets.save() notification_rules = [ { 'allow_disable': True, 'contact_all': False, 'contact_all_with_email': False, 'contact_object': True, 'description': 'Notify all contacts of a host/service via HTML email', 'disabled': False, 'notify_plugin': ('mail', {}), }, ] save_notification_rules(notification_rules)
def compute_foldertree(): sites.live().set_prepend_site(True) query = "GET hosts\nStats: state >= 0\nColumns: filename" hosts = sites.live().query(query) sites.live().set_prepend_site(False) def get_folder(path, num=0): folder = Folder.folder(path) return { "title": folder.title() or path.split("/")[-1], ".path": path, ".num_hosts": num, ".folders": {}, } # After the query we have a list of lists where each # row is a folder with the number of hosts on this level. # # Now get number of hosts by folder # Count all childs for each folder user_folders = {} for _site, filename, num in sorted(hosts): # Remove leading /wato/ wato_folder_path = filename[6:] # Loop through all levels of this folder to add the # host count to all parent levels path_parts = wato_folder_path.split("/") for num_parts in range(0, len(path_parts)): this_folder_path = "/".join(path_parts[:num_parts]) if Folder.folder_exists(this_folder_path): if this_folder_path not in user_folders: user_folders[this_folder_path] = get_folder(this_folder_path, num) else: user_folders[this_folder_path][".num_hosts"] += num # # Now build the folder tree # for folder_path, folder in sorted(user_folders.items(), reverse=True): if not folder_path: continue folder_parts = folder_path.split("/") parent_folder = "/".join(folder_parts[:-1]) user_folders[parent_folder][".folders"][folder_path] = folder del user_folders[folder_path] # # Now reduce the tree by e.g. removing top-level parts which the user is not # permitted to see directly. Example: # Locations # -> Hamburg: Permitted to see all hosts # -> Munich: Permitted to see no host # In this case, where only a single child with hosts is available, remove the # top level def reduce_tree(folders): for folder_path, folder in folders.items(): if len(folder[".folders"]) == 1 and folder[".num_hosts"] == 0: child_path, child_folder = list(folder[".folders"].items())[0] folders[child_path] = child_folder del folders[folder_path] reduce_tree(folders) reduce_tree(user_folders) return user_folders
def generate(self): save_global_settings(self._initial_global_settings()) content = "# Written by WATO Basic config (%s)\n\n" % time.strftime( "%Y-%m-%d %H:%M:%S") content += 'df_use_fs_used_as_metric_name = True\n' store.save_file( os.path.join(cmk.utils.paths.omd_root, 'etc/check_mk/conf.d/fs_cap.mk'), content) # A contact group for all hosts and services groups = { "contact": { 'all': { 'alias': u'Everything' } }, } save_group_information(groups) self._initialize_tag_config() # Rules that match the upper host tag definition ruleset_config = { # Make the tag 'offline' remove hosts from the monitoring 'only_hosts': [ { 'condition': { 'host_tags': { 'criticality': { '$ne': 'offline' } } }, 'value': True, 'options': { 'description': u'Do not monitor hosts with the tag "offline"' }, }, ], # Rule for WAN hosts with adapted PING levels 'ping_levels': [ { 'condition': { 'host_tags': { 'networking': 'wan', } }, 'value': { 'loss': (80.0, 100.0), 'packets': 6, 'timeout': 20, 'rta': (1500.0, 3000.0) }, 'options': { 'description': u'Allow longer round trip times when pinging WAN hosts' }, }, ], # All hosts should use SNMP v2c if not specially tagged 'bulkwalk_hosts': [ { 'condition': { 'host_tags': { 'snmp': 'snmp', 'snmp_ds': { '$ne': 'snmp-v1' }, }, }, 'value': True, 'options': { 'description': u'Hosts with the tag "snmp-v1" must not use bulkwalk' }, }, ], # Put all hosts and the contact group 'all' 'host_contactgroups': [ { 'condition': {}, 'value': 'all', 'options': { 'description': u'Put all hosts into the contact group "all"' }, }, ], # Docker container specific host check commands 'host_check_commands': [ { 'condition': { 'host_labels': { u'cmk/docker_object': u'container' } }, 'value': ('service', u'Docker container status'), 'options': { 'description': u'Make all docker container host states base on the "Docker container status" service', }, }, ], # Enable HW/SW inventory + status data inventory for docker containers by default to # simplify the setup procedure of docker monitoring 'active_checks': { 'cmk_inv': [ { 'condition': { 'host_labels': { u'cmk/docker_object': u'node' } }, 'value': { 'status_data_inventory': True }, }, ] }, # Interval for HW/SW-Inventory check 'extra_service_conf': { 'check_interval': [ { 'condition': { 'service_description': [{ '$regex': 'Check_MK HW/SW Inventory$' }] }, 'value': 1440, 'options': { 'description': u'Restrict HW/SW-Inventory to once a day' }, }, ], }, # Disable unreachable notifications by default 'extra_host_conf': { 'notification_options': [ { 'condition': {}, 'value': 'd,r,f,s' }, ], }, # Periodic service discovery 'periodic_discovery': [ { 'condition': {}, 'value': { 'severity_unmonitored': 1, 'severity_vanished': 0, 'check_interval': 120.0, 'inventory_check_do_scan': True }, 'options': { 'description': u'Perform every two hours a service discovery' }, }, ], } rulesets = FolderRulesets(Folder.root_folder()) rulesets.load() rulesets.from_config(Folder.root_folder(), ruleset_config) rulesets.save() notification_rules = [ { 'allow_disable': True, 'contact_all': False, 'contact_all_with_email': False, 'contact_object': True, 'description': 'Notify all contacts of a host/service via HTML email', 'disabled': False, 'notify_plugin': ('mail', {}), }, ] save_notification_rules(notification_rules)
def generate(self): save_global_settings(self._initial_global_settings()) # A contact group for all hosts and services groups = { "contact": { 'all': { 'alias': u'Everything' } }, } save_group_information(groups) self._initialize_tag_config() # Rules that match the upper host tag definition ruleset_config = { # Make the tag 'offline' remove hosts from the monitoring 'only_hosts': [ { 'id': '10843c55-11ea-4eb2-bfbc-bce65cd2ae22', 'condition': { 'host_tags': { 'criticality': { '$ne': 'offline' } } }, 'value': True, 'options': { 'description': u'Do not monitor hosts with the tag "offline"' }, }, ], # Rule for WAN hosts with adapted PING levels 'ping_levels': [ { 'id': '0365b634-30bf-40a3-8516-08e86051508e', 'condition': { 'host_tags': { 'networking': 'wan', } }, 'value': { 'loss': (80.0, 100.0), 'packets': 6, 'timeout': 20, 'rta': (1500.0, 3000.0) }, 'options': { 'description': u'Allow longer round trip times when pinging WAN hosts' }, }, ], # All hosts should use SNMP v2c if not specially tagged 'bulkwalk_hosts': [ { 'id': 'b92a5406-1d57-4f1d-953d-225b111239e5', 'condition': { 'host_tags': { 'snmp': 'snmp', 'snmp_ds': { '$ne': 'snmp-v1' }, }, }, 'value': True, 'options': { 'description': u'Hosts with the tag "snmp-v1" must not use bulkwalk' }, }, ], # Put all hosts and the contact group 'all' 'host_contactgroups': [ { 'id': 'efd67dab-68f8-4d3c-a417-9f7e29ab48d5', 'condition': {}, 'value': 'all', 'options': { 'description': u'Put all hosts into the contact group "all"' }, }, ], # Docker container specific host check commands 'host_check_commands': [ { 'id': '24da4ccd-0d1b-40e3-af87-0097df8668f2', 'condition': { 'host_labels': { u'cmk/docker_object': u'container' } }, 'value': ('service', u'Docker container status'), 'options': { 'description': u'Make all docker container host states base on the "Docker container status" service', }, }, ], # Enable HW/SW inventory + status data inventory for docker containers and Check-MK servers by default to # simplify the setup procedure for them 'active_checks': { 'cmk_inv': [ { 'id': '7ba2ac2a-5a49-47ce-bc3c-1630fb191c7f', 'condition': { 'host_labels': { u'cmk/docker_object': u'node', } }, 'value': { 'status_data_inventory': True }, }, { 'id': 'b4b151f9-c7cc-4127-87a6-9539931fcd73', 'condition': { 'host_labels': { u'cmk/check_mk_server': u'yes', } }, 'value': { 'status_data_inventory': True }, }, ] }, # Interval for HW/SW-Inventory check 'extra_service_conf': { 'check_interval': [ { 'id': 'b3847203-84b3-4f5b-ac67-0f06d4403905', 'condition': { 'service_description': [{ '$regex': 'Check_MK HW/SW Inventory$' }] }, 'value': 1440, 'options': { 'description': u'Restrict HW/SW-Inventory to once a day' }, }, ], }, # Disable unreachable notifications by default 'extra_host_conf': { 'notification_options': [ { 'id': '814bf932-6341-4f96-983d-283525b5416d', 'condition': {}, 'value': 'd,r,f,s' }, ], }, # Periodic service discovery 'periodic_discovery': [ { 'id': '95a56ffc-f17e-44e7-a162-be656f19bedf', 'condition': {}, 'value': { 'severity_unmonitored': 1, 'severity_vanished': 0, 'check_interval': 120.0, 'inventory_check_do_scan': True }, 'options': { 'description': u'Perform every two hours a service discovery' }, }, ], # Include monitoring of checkmk's tmpfs 'inventory_df_rules': [ { 'id': 'b0ee8a51-703c-47e4-aec4-76430281604d', 'condition': { 'host_labels': { u'cmk/check_mk_server': u'yes', }, }, 'value': { 'ignore_fs_types': ['tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660'], 'never_ignore_mountpoints': [u'~.*/omd/sites/[^/]+/tmp$'] } }, ], } rulesets = FolderRulesets(Folder.root_folder()) rulesets.load() rulesets.from_config(Folder.root_folder(), ruleset_config) rulesets.save() notification_rules = [ { 'allow_disable': True, 'contact_all': False, 'contact_all_with_email': False, 'contact_object': True, 'description': 'Notify all contacts of a host/service via HTML email', 'disabled': False, 'notify_plugin': ('mail', {}), }, ] save_notification_rules(notification_rules)
def save(self): """Save all rulesets of all folders recursively""" self._save_rulesets_recursively(Folder.root_folder())
def _update_tag_dependencies(): load_config() Folder.invalidate_caches() Folder.root_folder().rewrite_hosts_files()
def load_wato_data(self): self.tree = Folder.root_folder() self.path_to_tree: Dict[str, str] = {} # will be filled by self.folder_selection self.selection = list(self.folder_selection(self.tree)) self.last_wato_data_update = time.time()
def analyse_ruleset(self, hostname, svc_desc_or_item, svc_desc): resultlist = [] resultdict: Dict[str, Any] = {} effectiverules = [] for folder, rule_index, rule in self.get_rules(): if rule.is_disabled(): continue if not rule.matches_host_and_item( Folder.current(), hostname, svc_desc_or_item, svc_desc ): continue if self.match_type() == "all": resultlist.append(rule.value) effectiverules.append((folder, rule_index, rule)) elif self.match_type() == "list": assert isinstance(rule.value, list) resultlist += rule.value effectiverules.append((folder, rule_index, rule)) elif self.match_type() == "dict": # It may happen that a ruleset started with non-dict values. For example # a ruleset that has only has a WARN and CRIT threshold in a two element # tuple. # When we then have to extend the ruleset to hold dict values and change # the match type to dict, we normally do this by adding a top-level # Transform() valuespec which encapsulates the Dictionary() valuespec. # The logic for migrating the parameters is implemented in the forth() # method of the transform. # Users which already have saved rules using the previous valuespec now # have tuples in their ruleset and reach this code with other data # structures than dictionaries. # We currently have no 100% safe way of automatically fixing this on the # fly. The best we can do is print a meaningful error message to the user. # Would be better to do these transforms once during site update. The # cmk-update-config command would be a good place to do this. if not isinstance(rule.value, dict): raise MKGeneralException( _( 'Failed to process rule #%d of ruleset "%s" in folder "%s". ' "The value of a rule is incompatible to the current rule " "specification. You can try fix this by opening the rule " "for editing and save the rule again without modification." ) % (rule_index, self.title(), folder.title()) ) new_result = rule.value.copy() new_result.update(resultdict) resultdict = new_result effectiverules.append((folder, rule_index, rule)) else: return rule.value, [(folder, rule_index, rule)] if self.match_type() in ("list", "all"): return resultlist, effectiverules if self.match_type() == "dict": return resultdict, effectiverules return None, [] # No match
def __init__(self) -> None: super().__init__() self._folder = Folder.current()
def _import(self, csv_reader: CSVReader) -> ActionResult: if self._has_title_line: try: next(csv_reader) # skip header except StopIteration: pass num_succeeded, num_failed = 0, 0 fail_messages = [] selected = [] imported_hosts = [] for row_num, row in enumerate(csv_reader): if not row: continue # skip empty lines host_name, attributes = self._get_host_info_from_row(row, row_num) try: Folder.current().create_hosts( [(host_name, attributes, None)], bake_hosts=False, ) imported_hosts.append(host_name) selected.append("_c_%s" % host_name) num_succeeded += 1 except Exception as e: fail_messages.append( _("Failed to create a host from line %d: %s") % (csv_reader.line_num, e) ) num_failed += 1 bakery.try_bake_agents_for_hosts(imported_hosts) self._delete_csv_file() msg = _("Imported %d hosts into the current folder.") % num_succeeded if num_failed: msg += "<br><br>" + (_("%d errors occured:") % num_failed) msg += "<ul>" for fail_msg in fail_messages: msg += "<li>%s</li>" % fail_msg msg += "</ul>" folder_path = Folder.current().path() if num_succeeded > 0 and request.var("do_service_detection") == "1": # Create a new selection for performing the bulk discovery user.set_rowselection( weblib.selection_id(), "wato-folder-/" + folder_path, selected, "set", ) return redirect( mode_url( "bulkinventory", _bulk_inventory="1", show_checkboxes="1", folder=folder_path, selection=weblib.selection_id(), ) ) flash(msg) return redirect(mode_url("folder", folder=folder_path))
def _back_url(self): return Folder.current().url()
def buttons(self): html.context_button(_("Folder"), Folder.current().url(), "back")
def load(self): self._load_rulesets_recursively(Folder.root_folder())
def fixture_created_host_url(with_admin_login) -> str: folder = Folder.root_folder() folder.create_hosts([("host", {}, [])]) return "wato.py?folder=&host=host&mode=edit_host"
def load(self): self._initialize_rulesets(only_varname=self._name) self._load_rulesets_recursively(Folder.root_folder(), only_varname=self._name)
def execute(self, request): folder = Folder.folder(request.folder_path) return do_network_scan(folder)
def wato_folder_choices_autocompleter(value: str, params: Dict) -> Choices: # select2 omits empty strings ("") as option therefore the path of the Main folder is replaced by a placeholder return [(path, name) if path != "" else ("@main", name) for path, name in Folder.folder_choices_fulltitle()]
def page(self): # Show outcome of host validation. Do not validate new hosts errors = None if self._mode == "edit": errors = (validate_all_hosts([self._host.name()]).get( self._host.name(), []) + self._host.validation_errors()) if errors: html.open_div(class_="info") html.open_table(class_="validationerror", boder="0", cellspacing="0", cellpadding="0") html.open_tr() html.open_td(class_="img") html.icon("validation_error") html.close_td() html.open_td() html.open_p() html.h3(_("Warning: This host has an invalid configuration!")) html.open_ul() for error in errors: html.li(error) html.close_ul() html.close_p() if html.form_submitted(): html.br() html.b(_("Your changes have been saved nevertheless.")) html.close_td() html.close_tr() html.close_table() html.close_div() lock_message = "" locked_hosts = Folder.current().locked_hosts() if locked_hosts: if locked_hosts is True: lock_message = _( "Host attributes locked (You cannot edit this host)") elif isinstance(locked_hosts, str): lock_message = locked_hosts if lock_message: html.div(lock_message, class_="info") html.begin_form("edit_host", method="POST") html.prevent_password_auto_completion() basic_attributes = [ # attribute name, valuepec, default value ("host", self._vs_host_name(), self._host.name()), ] if self._is_cluster(): basic_attributes += [ # attribute name, valuepec, default value ( "nodes", self._vs_cluster_nodes(), self._host.cluster_nodes() if self._host else [], ), ] configure_attributes( new=self._mode != "edit", hosts={self._host.name(): self._host} if self._mode != "new" else {}, for_what="host" if not self._is_cluster() else "cluster", parent=Folder.current(), basic_attributes=basic_attributes, ) if self._mode != "edit": html.set_focus("host") forms.end() html.hidden_fields() html.end_form()
def generate(self): save_global_settings(self._initial_global_settings()) # A contact group for all hosts and services groups = { "contact": { 'all': { 'alias': u'Everything' } }, } save_group_information(groups) self._initialize_tag_config() # Rules that match the upper host tag definition ruleset_config = { # Make the tag 'offline' remove hosts from the monitoring 'only_hosts': [{ 'condition': { 'host_tags': { 'criticality': { '$ne': 'offline' } } }, 'value': True, 'options': { 'description': u'Do not monitor hosts with the tag "offline"' }, },], # Rule for WAN hosts with adapted PING levels 'ping_levels': [{ 'condition': { 'host_tags': { 'networking': 'wan', } }, 'value': { 'loss': (80.0, 100.0), 'packets': 6, 'timeout': 20, 'rta': (1500.0, 3000.0) }, 'options': { 'description': u'Allow longer round trip times when pinging WAN hosts' }, },], # All hosts should use SNMP v2c if not specially tagged 'bulkwalk_hosts': [{ 'condition': { 'host_tags': { 'snmp': 'snmp', 'snmp_ds': { '$ne': 'snmp-v1' }, }, }, 'value': True, 'options': { 'description': u'Hosts with the tag "snmp-v1" must not use bulkwalk' }, },], # Put all hosts and the contact group 'all' 'host_contactgroups': [{ 'condition': {}, 'value': 'all', 'options': { 'description': u'Put all hosts into the contact group "all"' }, },], # Docker container specific host check commands 'host_check_commands': [{ 'condition': { 'host_labels': { u'cmk/docker_object': u'container' } }, 'value': ('service', u'Docker container status'), 'options': { 'description': u'Make all docker container host states base on the "Docker container status" service', }, },], # Enable HW/SW inventory + status data inventory for docker containers and Check-MK servers by default to # simplify the setup procedure for them 'active_checks': { 'cmk_inv': [ { 'condition': { 'host_labels': { u'cmk/docker_object': u'node', } }, 'value': { 'status_data_inventory': True }, }, { 'condition': { 'host_labels': { u'cmk/check_mk_server': u'yes', } }, 'value': { 'status_data_inventory': True }, }, ] }, # Interval for HW/SW-Inventory check 'extra_service_conf': { 'check_interval': [{ 'condition': { 'service_description': [{ '$regex': 'Check_MK HW/SW Inventory$' }] }, 'value': 1440, 'options': { 'description': u'Restrict HW/SW-Inventory to once a day' }, },], }, # Disable unreachable notifications by default 'extra_host_conf': { 'notification_options': [{ 'condition': {}, 'value': 'd,r,f,s' },], }, # Periodic service discovery 'periodic_discovery': [{ 'condition': {}, 'value': { 'severity_unmonitored': 1, 'severity_vanished': 0, 'check_interval': 120.0, 'inventory_check_do_scan': True }, 'options': { 'description': u'Perform every two hours a service discovery' }, },], # Include monitoring of checkmk's tmpfs 'inventory_df_rules': [{ 'condition': { 'host_labels': { u'cmk/check_mk_server': u'yes', }, }, 'value': { 'ignore_fs_types': ['tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660'], 'never_ignore_mountpoints': [u'~.*/omd/sites/[^/]+/tmp$'] } },], } rulesets = FolderRulesets(Folder.root_folder()) rulesets.load() rulesets.from_config(Folder.root_folder(), ruleset_config) rulesets.save() notification_rules = [ { 'allow_disable': True, 'contact_all': False, 'contact_all_with_email': False, 'contact_object': True, 'description': 'Notify all contacts of a host/service via HTML email', 'disabled': False, 'notify_plugin': ('mail', {}), }, ] save_notification_rules(notification_rules)
def execute(self, api_request): folder = Folder.folder(api_request.folder_path) return _do_network_scan(folder)
def load(self): """Load all rules of all folders""" self._initialize_rulesets() self._load_rulesets_recursively(Folder.root_folder())
def _show_patterns(self): import cmk.gui.logwatch as logwatch collection = SingleRulesetRecursively("logwatch_rules") collection.load() ruleset = collection.get("logwatch_rules") html.h3(_("Logfile patterns")) if ruleset.is_empty(): html.open_div(class_="info") html.write_text( "There are no logfile patterns defined. You may create " 'logfile patterns using the <a href="%s">Rule Editor</a>.' % folder_preserving_link([ ("mode", "edit_ruleset"), ("varname", "logwatch_rules"), ])) html.close_div() # Loop all rules for this ruleset already_matched = False abs_rulenr = 0 for folder, rulenr, rule in ruleset.get_rules(): # Check if this rule applies to the given host/service if self._hostname: service_desc = self._get_service_description( self._hostname, "logwatch", self._item) # If hostname (and maybe filename) try match it rule_matches = rule.matches_host_and_item( Folder.current(), self._hostname, self._item, service_desc) else: # If no host/file given match all rules rule_matches = True with foldable_container( treename="rule", id_=str(abs_rulenr), isopen=True, title=HTML("<b>Rule #%d</b>" % (abs_rulenr + 1)), indent=False, ), table_element("pattern_editor_rule_%d" % abs_rulenr, sortable=False, css="logwatch") as table: abs_rulenr += 1 # TODO: What's this? pattern_list = rule.value if isinstance(pattern_list, dict): pattern_list = pattern_list["reclassify_patterns"] # Each rule can hold no, one or several patterns. Loop them all here for state, pattern, comment in pattern_list: match_class = "" disp_match_txt = HTML("") match_img = "" if rule_matches: # Applies to the given host/service matched = re.search(pattern, self._match_txt) if matched: # Prepare highlighted search txt match_start = matched.start() match_end = matched.end() disp_match_txt = ( escape_to_html(self._match_txt[:match_start]) + HTMLWriter.render_span( self._match_txt[match_start:match_end], class_="match") + escape_to_html(self._match_txt[match_end:])) if not already_matched: # First match match_class = "match first" match_img = "match" match_title = _( "This logfile pattern matches first and will be used for " "defining the state of the given line.") already_matched = True else: # subsequent match match_class = "match" match_img = "imatch" match_title = _( "This logfile pattern matches but another matched first." ) else: match_img = "nmatch" match_title = _( "This logfile pattern does not match the given string." ) else: # rule does not match match_img = "nmatch" match_title = _("The rule conditions do not match.") table.row() table.cell(_("Match")) html.icon("rule%s" % match_img, match_title) cls = ([ "state%d" % logwatch.level_state(state), "fillbackground" ] if match_class == "match first" else []) table.cell(_("State"), HTMLWriter.render_span( logwatch.level_name(state)), css=cls) table.cell(_("Pattern"), HTMLWriter.render_tt(pattern)) table.cell(_("Comment"), comment) table.cell(_("Matched line"), disp_match_txt) table.row(fixed=True) table.cell(colspan=5) edit_url = folder_preserving_link([ ("mode", "edit_rule"), ("varname", "logwatch_rules"), ("rulenr", rulenr), ("item", mk_repr(self._item).decode()), ("rule_folder", folder.path()), ("rule_id", rule.id), ]) html.icon_button(edit_url, _("Edit this rule"), "edit")