def action(self): if not html.transaction_valid(): return "folder" attributes = watolib.collect_attributes(self._host_type_name(), new=True) cluster_nodes = self._get_cluster_nodes() hostname = html.request.get_ascii_input_mandatory("host") Hostname().validate_value(hostname, "host") if html.check_transaction(): watolib.Folder.current().create_hosts([(hostname, attributes, cluster_nodes)]) self._host = watolib.Folder.current().host(hostname) inventory_url = watolib.folder_preserving_link([ ("mode", "inventory"), ("host", self._host.name()), ("_scan", "1"), ]) create_msg = None if self._host.is_ping_host() else ( _('Successfully created the host. Now you should do a ' '<a href="%s">service discovery</a> in order to auto-configure ' 'all services to be checked on this host.') % inventory_url) if html.request.var("services"): raise HTTPRedirect(inventory_url) if html.request.var("diag_host"): html.request.set_var("_try", "1") return "diag_host", create_msg return "folder", create_msg
def _check_new_host_name(self, varname, host_name): if not host_name: raise MKUserError(varname, _("Please specify a host name.")) if watolib.Folder.current().has_host(host_name): raise MKUserError(varname, _("A host with this name already exists in this folder.")) validate_host_uniqueness(varname, host_name) Hostname().validate_value(host_name, varname)
def _get_host_info_from_row(self, row): host_name = None attributes = {} for col_num, value in enumerate(row): attribute = html.request.var("attribute_%d" % col_num) if attribute == "host_name": Hostname().validate_value(value, "host") host_name = value elif attribute and attribute != "-": if attribute in attributes: raise MKUserError( None, _("The attribute \"%s\" is assigned to multiple columns. " "You can not populate one attribute from multiple columns. " "The column to attribute associations need to be unique.") % attribute) # FIXME: Couldn't we decode all attributes? if attribute == "alias": attributes[attribute] = value.decode("utf-8") else: try: six.text_type(value) except UnicodeDecodeError: raise MKUserError( None, _("Non-ASCII characters are not allowed in the " "attribute \"%s\".") % attribute) attributes[attribute] = value if host_name is None: raise MKUserError(None, _("The host name attribute needs to be assigned to a column.")) return host_name, attributes
def _get_host_info_from_row(self, row, row_num): host_name = None attributes: Dict[str, str] = {} for col_num, value in enumerate(row): if not value: continue attribute = request.var("attribute_%d" % col_num) if attribute == "host_name": Hostname().validate_value(value, "host") host_name = value elif attribute and attribute != "-": if attribute in attributes: raise MKUserError( None, _('The attribute "%s" is assigned to multiple columns. ' "You can not populate one attribute from multiple columns. " "The column to attribute associations need to be unique." ) % attribute, ) attr = host_attribute_registry[attribute]() # TODO: The value handling here is incorrect. The correct way would be to use the # host attributes from_html_vars and validate_input, just like collect_attributes() # from cmk/gui/watolib/host_attributes.py is doing it. # The problem here is that we get the value in a different way (from row instead of # HTTP request vars) which from_html_vars can not work with. if attribute == "alias": attributes[attribute] = value else: if not value.isascii(): raise MKUserError( None, _("Non-ASCII characters are not allowed in the " 'attribute "%s".') % attribute, ) try: attr.validate_input(value, "") except MKUserError as e: raise MKUserError( None, _("Invalid value in column %d (%s) of row %d: %s") % (col_num, attribute, row_num, e), ) attributes[attribute] = value if host_name is None: raise MKUserError( None, _("The host name attribute needs to be assigned to a column.")) return host_name, attributes
def action(self) -> ActionResult: if not transactions.transaction_valid(): return redirect(mode_url("folder")) attributes = watolib.collect_attributes(self._host_type_name(), new=True) cluster_nodes = self._get_cluster_nodes() hostname = request.get_ascii_input_mandatory("host") Hostname().validate_value(hostname, "host") folder = watolib.Folder.current() if transactions.check_transaction(): folder.create_hosts([(hostname, attributes, cluster_nodes)]) self._host = folder.load_host(hostname) inventory_url = watolib.folder_preserving_link( [ ("mode", "inventory"), ("host", self._host.name()), ("_scan", "1"), ] ) create_msg = ( None if self._host.is_ping_host() else ( _( "Successfully created the host. Now you should do a " '<a href="%s">service discovery</a> in order to auto-configure ' "all services to be checked on this host." ) % inventory_url ) ) if request.var("_save"): return redirect(inventory_url) if create_msg: flash(create_msg) if request.var("diag_host"): return redirect( mode_url("diag_host", folder=folder.path(), host=self._host.name(), _try="1") ) return redirect(mode_url("folder", folder=folder.path()))
def check_hostname(hostname, should_exist=True): # Validate hostname with valuespec Hostname().validate_value(hostname, "hostname") if should_exist: host = watolib.Host.host(hostname) if not host: raise MKUserError(None, _("No such host")) else: if watolib.Host.host_exists(hostname): raise MKUserError( None, _("Host %s already exists in the folder %s") % (hostname, watolib.Host.host(hostname).folder().path()))
def check_hostname(hostname: HostName, should_exist=True) -> None: # Validate hostname with valuespec Hostname().validate_value(hostname, "hostname") if should_exist: host = Host.host(hostname) if not host: raise MKUserError(None, _("No such host")) else: if (host := Host.host(hostname)) is not None: raise MKUserError( None, _("Host %s already exists in the folder %s") % (hostname, host.folder().path()), )
def _valuespec_special_agents_kube(): return Dictionary( elements=[ ( "cluster-name", Hostname( title=_("Cluster name"), allow_empty=False, help= _("You must specify a name for your Kubernetes cluster. The provided name" " will be used to make the objects from your cluster unique in a " "multi-cluster setup."), ), ), ( "token", IndividualOrStoredPassword( title=_("Token"), allow_empty=False, ), ), ( "kubernetes-api-server", Dictionary( elements=[ ( "endpoint", HTTPUrl( title=_("Endpoint"), allow_empty=False, default_value="https://<control plane ip>:443", help=_( "The full URL to the Kubernetes API server including the " "protocol (http or https) and the port."), size=80, ), ), ssl_verification(), ( "proxy", HTTPProxyReference( {"http", "https"}), # Kubernetes client does not # support socks proxies. ), _tcp_timeouts(), ], required_keys=["endpoint", "verify-cert"], title=_("API server connection"), ), ), ( "cluster-collector", # TODO: adjust help texts depending on ingress inclusion Dictionary( elements=[ ( "endpoint", HTTPUrl( title=_( "Collector NodePort / Ingress endpoint"), allow_empty=False, default_value="https://<service url>:30035", help= _("The full URL to the Cluster Collector service including " "the protocol (http or https) and the port. Depending on " "the deployed configuration of the service this can " "either be the NodePort or the Ingress endpoint." ), size=80, ), ), ssl_verification(), ( "proxy", HTTPProxyReference(), ), _tcp_timeouts(), ], required_keys=["endpoint", "verify-cert"], title=_( "Enrich with usage data from Checkmk Cluster Collector" ), ), ), ( "monitored-objects", ListChoice( choices=[ ("deployments", _("Deployments")), ("daemonsets", _("DaemonSets")), ("statefulsets", _("StatefulSets")), ("namespaces", _("Namespaces")), ("nodes", _("Nodes")), ("pods", _("Pods")), ("cronjobs_pods", _("Pods of CronJobs")), ], default_value=[ "deployments", "daemonsets", "statefulsets", "namespaces", "nodes", "pods", ], allow_empty=False, title=_("Collect information about..."), help= _("Select the Kubernetes objects you would like to monitor. Pods " "controlled by CronJobs are treated separately as they are usually " "quite short lived. Those pods will be monitored in the same " "manner as regular pods. Your Dynamic host management rule should " "be configured accordingly to avoid that the piggyback hosts for " "terminated CronJob pods are kept for too long. This 'Pods of CronJobs' " "option has no effect if Pods are not monitored"), ), ), ( "namespaces", CascadingDropdown( choices=[ ( "namespace-include-patterns", _("Monitor namespaces matching"), ListOf( valuespec=RegExp( mode=RegExp.infix, title=_("Pattern"), allow_empty=False, ), add_label=_("Add new pattern"), allow_empty=False, help= _("You can specify a list of regex patterns to monitor specific " "namespaces. Only those that do match the predefined patterns " "will be monitored."), ), ), ( "namespace-exclude-patterns", _("Exclude namespaces matching"), ListOf( valuespec=RegExp( mode=RegExp.infix, title=_("Pattern"), allow_empty=False, ), add_label=_("Add new pattern"), allow_empty=False, help= _("You can specify a list of regex patterns to exclude " "namespaces. Only those that do not match the predefined " "patterns are monitored."), ), ), ], orientation="horizontal", title=_("Monitor namespaces"), help= _("If your cluster has multiple namespaces, you can filter specific ones " "to be monitored. Note that this concerns everything which is part of the " "selected namespaces such as pods for example."), ), ), ( "cluster-resource-aggregation", CascadingDropdown( title=("Cluster resource aggregation"), choices=[ ( "cluster-aggregation-exclude-node-roles", _("Exclude Nodes based on their role"), ListOf( valuespec=RegExp( mode=RegExp.infix, allow_empty=False, size=50, ), add_label=_("Add new role"), allow_empty=True, movable=False, default_value=["control-plane", "infra"], ), ), ("cluster-aggregation-include-all-nodes", _("Include all Nodes")), ], orientation="horizontal", help=_( "You may find that some Nodes don't add resources to the overall " "workload your Cluster can handle. This option allows you to remove " "Nodes from aggregations on the Cluster host based on their role. A " "node will be omitted, if any of the listed {role}s matches a label " "with name 'node-role.kubernetes.io/{role}'. This affects the " "following services: Memory resources, CPU resources, Pod resources. " "Only Services on the Cluster host are affected. By default, Nodes " "with role control-plane and infra are omitted.", ), ), ), ( "import-annotations", CascadingDropdown( title=("Import annotations as host labels"), choices=[ ( "include-matching-annotations-as-host-labels", _("Filter valid annotations by key pattern"), RegExp( mode=RegExp.infix, allow_empty=False, default_value="checkmk-monitoring$", size=50, ), ), ( "include-annotations-as-host-labels", _("Import all valid annotations"), None, ), ], orientation="horizontal", help= _("By default, Checkmk does not import annotations. If " "this option is enabled, Checkmk will import any " "annotation that is a valid Kubernetes label. These " "imported annotations are added as host labels to their " "respective piggyback host using the syntax " "'cmk/kubernetes/annotation/{key}:{value}'. You can " "further restrict the imported annotations by specifying " "a pattern which Checkmk searches for in the key of the " "annotation."), ), ), ], optional_keys=[ "namespaces", "cluster-collector", "cluster-resource-aggregation", "import-annotations", ], default_keys=["cluster-collector"], title=_("Kubernetes"), )
def _valuespec_active_checks_icmp(): return Dictionary( title=_("Check hosts with PING (ICMP Echo Request)"), help= _("This ruleset allows you to configure explicit PING monitoring of hosts. " "Usually a PING is being used as a host check, so this is not neccessary. " "There are some situations, however, where this can be useful. One of them " "is when using the Check_MK Micro Core with SMART Ping and you want to " "track performance data of the PING to some hosts, nevertheless."), elements=[ ( "description", TextInput( title=_("Service Description"), allow_empty=False, default_value="PING", ), ), ( "address", CascadingDropdown( title=_("Alternative address to ping"), help= _("If you omit this setting then the configured IP address of that host " "will be pinged. In the host configuration you can provide additional " "addresses besides the main IP address (additional IP addresses section). " "In this option you can select which set of addresses you want to include " 'for this check. "Ping additional IP addresses" will omit the host ' 'configured main address while the "Ping all addresses" option will ' "include both the main and additional addresses."), orientation="horizontal", choices=[ ("address", _("Ping the normal IP address")), ("alias", _("Use the alias as DNS name / IP address")), ( "explicit", _("Ping the following explicit address / DNS name" ), Hostname(), ), ("all_ipv4addresses", _("Ping all IPv4 addresses")), ("all_ipv6addresses", _("Ping all IPv6 addresses")), ("additional_ipv4addresses", _("Ping additional IPv4 addresses")), ("additional_ipv6addresses", _("Ping additional IPv6 addresses")), ( "indexed_ipv4address", _("Ping IPv4 address identified by its index"), Integer(default_value=1), ), ( "indexed_ipv6address", _("Ping IPv6 address identified by its index"), Integer(default_value=1), ), ], ), ), ( "min_pings", Integer( title=_( "Number of positive responses required for OK state"), help= _("When pinging multiple addresses, failure to ping one of the " "provided addresses will lead to a Crit status of the service. " "This option allows to specify the minimum number of successful " "pings which will still classify the service as OK. The smallest " "number is 1 and the maximum number should be (number of addresses - 1). " "A number larger than the suggested number will always lead to a " "Crit Status. One must also select a suitable option from the " '"Alternative address to ping" above.'), minvalue=1, ), ), ] + check_icmp_params(), )
def _valuespec_generic_metrics_prometheus(): namespace_element = ( "prepend_namespaces", DropdownChoice( title=_("Prepend namespace prefix for hosts"), help=_( "If a cluster uses multiple namespaces you need to activate this option. " "Hosts for namespaced Kubernetes objects will then be prefixed with the " "name of their namespace. This makes Kubernetes resources in different " "namespaces that have the same name distinguishable, but results in " "longer hostnames." ), choices=[ ("use_namespace", _("Use a namespace prefix")), ("omit_namespace", _("Don't use a namespace prefix")), ], ), ) return Transform( valuespec=Dictionary( elements=[ ( "connection", CascadingDropdown( choices=[ ( "ip_address", _("IP Address"), Dictionary( elements=api_request_connection_elements( help_text=_( "Specifies a URL path prefix, which is prepended to API calls " "to the Prometheus API. If this option is not relevant for " "your installation, please leave it unchecked." ), default_port=6443, ), ), ), ( "host_name", _("Host name"), Dictionary( elements=api_request_connection_elements( help_text=_( "Specifies a URL path prefix, which is prepended to API calls " "to the Prometheus API. If this option is not relevant for " "your installation, please leave it unchecked." ), default_port=6443, ), ), ), ( "url_custom", _("Custom URL"), Dictionary( elements=[ ( "url_address", TextInput( title=_("Custom URL server address"), help=_( "Specify a custom URL to connect to " "your server. Do not include the " "protocol. This option overwrites " "all available options such as port and " "other URL prefixes." ), allow_empty=False, ), ) ], optional_keys=[], ), ), ], title=_("Prometheus connection option"), ), ), ssl_verification(), api_request_authentication(), ( "protocol", DropdownChoice( title=_("Protocol"), choices=[ ("http", "HTTP"), ("https", "HTTPS"), ], ), ), ( "exporter", ListOf( valuespec=CascadingDropdown( choices=[ ( "node_exporter", _("Node Exporter"), Dictionary( elements=[ ( "host_mapping", Hostname( title=_("Explicitly map Node Exporter host"), allow_empty=True, help=_( "Per default, Checkmk tries to map the underlying Checkmk host " "to the Node Exporter host which contains either the Checkmk " 'hostname, host address or "localhost" in its endpoint address. ' "The created services of the mapped Node Exporter will " "be assigned to the Checkmk host. A piggyback host for each " "Node Exporter host will be created if none of the options are " "valid. " "This option allows you to explicitly map one of your Node " "Exporter hosts to the underlying Checkmk host. This can be " "used if the default options do not apply to your setup." ), ), ), ( "entities", ListChoice( choices=[ ("df", _("Filesystems")), ("diskstat", _("Disk IO")), ("mem", _("Memory")), ( "kernel", _( "CPU utilization & Kernel performance" ), ), ], default_value=[ "df", "diskstat", "mem", "kernel", ], allow_empty=False, title=_("Retrieve information about..."), help=_( "For your respective kernel select the hardware or OS entity " "you would like to retrieve information about." ), ), ), ], title=_("Node Exporter metrics"), optional_keys=["host_mapping"], ), ), ( "kube_state", _("Kube-state-metrics"), Dictionary( elements=[ ( "cluster_name", Hostname( title=_("Cluster name"), allow_empty=False, help=_( "You must specify a name for your Kubernetes cluster. The provided name" " will be used to create a piggyback host for the cluster related services." ), ), ), namespace_element, filter_kubernetes_namespace_element(), ( "entities", ListChoice( choices=[ ("cluster", _("Cluster")), ("nodes", _("Nodes")), ("services", _("Services")), ("pods", _("Pods")), ("daemon_sets", _("Daemon sets")), ], default_value=[ "cluster", "nodes", "services", "pods", "daemon_sets", ], allow_empty=False, title=_("Retrieve information about..."), help=_( "For your Kubernetes cluster select for which entity levels " "you would like to retrieve information about. Piggyback hosts " "for the respective entities will be created." ), ), ), ], title=_("Kube state metrics"), optional_keys=["namespace_include_patterns"], ), ), ( "cadvisor", _("cAdvisor"), Dictionary( elements=[ ( "entity_level", CascadingDropdown( title=_( "Entity level used to create Checkmk piggyback hosts" ), help=_( "The retrieved information from the cAdvisor will be aggregated according" " to the selected entity level. Resulting services will be allocated to the created" " Checkmk piggyback hosts." ), choices=[ ( "container", _( "Container - Display the information on container level" ), Dictionary( elements=[ ( "container_id", DropdownChoice( title=_( "Host name used for containers" ), help=_( "For Containers - Choose which identifier is used for the monitored containers." " This will affect the name used for the piggyback host" " corresponding to the container, as well as items for" " services created on the node for each container." ), choices=[ ( "short", _( "Short - Use the first 12 characters of the docker container ID" ), ), ( "long", _( "Long - Use the full docker container ID" ), ), ( "name", _( "Name - Use the name of the container" ), ), ], ), ) ], optional_keys=[], ), ), ( "pod", _( "Pod - Display the information for pod level" ), Dictionary( elements=[namespace_element], optional_keys=[], ), ), ( "both", _( "Both - Display the information for both, pod and container, levels" ), Dictionary( elements=[ ( "container_id", DropdownChoice( title=_( "Host name used for containers" ), help=_( "For Containers - Choose which identifier is used for the monitored containers." " This will affect the name used for the piggyback host" " corresponding to the container, as well as items for" " services created on the node for each container." ), choices=[ ( "short", _( "Short - Use the first 12 characters of the docker container ID" ), ), ( "long", _( "Long - Use the full docker container ID" ), ), ( "name", _( "Name - Use the name of the container" ), ), ], ), ), namespace_element, ], optional_keys=[], ), ), ], ), ), filter_kubernetes_namespace_element(), ( "entities", ListChoice( choices=[ ("diskio", _("Disk IO")), ("cpu", _("CPU utilization")), ("df", _("Filesystem")), ("if", _("Network")), ("memory", _("Memory")), ], default_value=[ "diskio", "cpu", "df", "if", "memory", ], allow_empty=False, title=_("Retrieve information about..."), help=_( "For your respective kernel select the hardware or OS entity " "you would like to retrieve information about." ), ), ), ], title=_("CAdvisor"), validate=_check_not_empty_exporter_dict, optional_keys=[ "diskio", "cpu", "df", "if", "memory", "namespace_include_patterns", ], ), ), ] ), add_label=_("Add new Scrape Target"), title=_( "Prometheus Scrape Targets (include Prometheus Exporters) to fetch information from" ), help=_( "You can specify which Scrape Targets including Exporters " "are connected to your Prometheus instance. The Prometheus " "Special Agent will automatically generate services for the " "selected monitoring information. You can create your own " "defined services with the custom PromQL query option below " "if one of the Scrape Target types are not listed here." ), ), ), ( "promql_checks", ListOf( valuespec=Dictionary( elements=[ ( "service_description", TextInput( title=_("Service name"), allow_empty=False, ), ), ( "host_name", Hostname( title=_("Assign service to following host"), allow_empty=False, help=_( "Specify the host to which the resulting " "service will be assigned to. The host " "should be configured to allow Piggyback " "data." ), ), ), ( "metric_components", ListOf( valuespec=Dictionary( title=_("PromQL query"), elements=[ ( "metric_label", TextInput( title=_("Metric label"), allow_empty=False, help=_( "The metric label is displayed alongside the " "queried value in the status detail the resulting service. " "The metric name will be taken as label if " "nothing was specified." ), ), ), ("metric_name", MetricName()), ( "promql_query", TextInput( title=_( "PromQL query (only single return value permitted)" ), allow_empty=False, size=80, help=_( 'Example PromQL query: up{job="node_exporter"}' ), ), ), ( "levels", Dictionary( elements=[ ( "lower_levels", Tuple( title=_("Lower levels"), elements=[ Float( title=_("Warning below") ), Float( title=_( "Critical below" ) ), ], ), ), ( "upper_levels", Tuple( title=_("Upper levels"), elements=[ Float( title=_("Warning at") ), Float( title=_("Critical at") ), ], ), ), ], title="Metric levels", validate=_verify_prometheus_empty, help=_( "Specify upper and/or lower levels for the queried PromQL value. This option " "should be used for simple cases where levels are only required once. You " "should use the Prometheus custom services monitoring rule if you want to " "specify a rule which applies to multiple Prometheus custom services at once. " "The custom rule always has priority over the rule specified here " "if the two overlap." ), ), ), ], optional_keys=["metric_name", "levels"], ), title=_("PromQL queries for Service"), add_label=_("Add new PromQL query"), allow_empty=False, magic="@;@", validate=_validate_prometheus_service_metrics, ), ), ], optional_keys=["host_name"], ), title=_("Service creation using PromQL queries"), add_label=_("Add new Service"), ), ), ], title=_("Prometheus"), optional_keys=["auth_basic"], ), forth=_transform_agent_prometheus, )
def _show_host_name(self): forms.section(_("Hostname")) Hostname().render_input("host", "") html.set_focus("host")