"""Parses `string_table` into a PodContainers isinstance >>> section_kube_pod_containers_v1 = '{"containers": {"busybox": {"container_id": null, "image_id": "", "name": "busybox", "image": "busybox", "ready": false, "state": {"type": "waiting", "reason": "PodInitializing", "detail": null}, "restart_count": 0}}}' >>> parse([[section_kube_pod_containers_v1]]) PodContainers(containers={'busybox': ContainerStatus(container_id=None, image_id='', name='busybox', image='busybox', ready=False, state=ContainerWaitingState(type='waiting', reason='PodInitializing', detail=None), restart_count=0)}) >>> section_kube_pod_init_containers_v1 = '{"containers": {"busybox-init": {"container_id": "docker://some-id", "image_id": "docker-pullable://busybox@sha256:some-id", "name": "busybox-init", "image": "busybox:latest", "ready": false, "state": {"type": "waiting", "reason": "CrashLoopBackOff", "detail": "back-off 5m0s restarting failed container=busybox-init pod=failing-initcontainer-64ff5bdcd-vhl59_pod-status(8c812676-6e30-45ae-8271-16a279c95168)"}, "restart_count": 144}}}' >>> parse([[section_kube_pod_init_containers_v1]]) PodContainers(containers={'busybox-init': ContainerStatus(container_id='docker://some-id', image_id='docker-pullable://busybox@sha256:some-id', name='busybox-init', image='busybox:latest', ready=False, state=ContainerWaitingState(type='waiting', reason='CrashLoopBackOff', detail='back-off 5m0s restarting failed container=busybox-init pod=failing-initcontainer-64ff5bdcd-vhl59_pod-status(8c812676-6e30-45ae-8271-16a279c95168)'), restart_count=144)}) """ if not string_table: return None return PodContainers(**json.loads(string_table[0][0])) register.agent_section( name="kube_pod_containers_v1", parsed_section_name="kube_pod_containers", parse_function=parse, ) register.agent_section( name="kube_pod_init_containers_v1", parsed_section_name="kube_pod_init_containers", parse_function=parse, ) def discovery(section: PodContainers) -> DiscoveryResult: for container in section.containers.values(): yield Service(item=container.name)
# aparrently this was not clear when designing the sections output section: Section = {} for datacenter_name, key, cluster_name, *values in string_table: section.setdefault(cluster_name, {"datacenter": datacenter_name})[key] = ", ".join(values) return section def inventory_esx_vsphere_clusters(section: Section) -> InventoryResult: for cluster_name, cluster_data in section.items(): yield TableRow( path=["software", "applications", "vmwareesx"], key_columns={ "cluster": cluster_name, "datacenter": cluster_data["datacenter"], "hostsystems": cluster_data["hostsystems"], "vms": cluster_data["vms"], }, ) register.agent_section( name="esx_vsphere_clusters", parse_function=parse_esx_vsphere_clusters, ) register.inventory_plugin( name="inventory_esx_vsphere_clusters", sections=["esx_vsphere_clusters"], inventory_function=inventory_esx_vsphere_clusters, )
def check(params: KubeContainersLevelsUpperLower, section: ContainerCount) -> CheckResult: """Computes `total` and uses `check_levels` for each section element, setting levels from `params` individually""" section_dict = section.dict() section_dict["total"] = sum(section_dict.values()) for name, value in section_dict.items(): yield from check_levels( value, levels_upper=params.get(f"{name}_upper"), levels_lower=params.get(f"{name}_lower"), metric_name=f"kube_node_container_count_{name}", label=f"{name.title()}", ) register.agent_section( name="kube_node_container_count_v1", parsed_section_name="kube_node_container_count", parse_function=parse, ) register.check_plugin( name="kube_node_container_count", service_name="Containers", discovery_function=discovery, check_function=check, check_ruleset_name="kube_node_container_count", check_default_parameters={}, )
# Copyright (C) 2022 tribe29 GmbH - License: GNU General Public License v2 # This file is part of Checkmk (https://checkmk.com). It is subject to the terms and # conditions defined in the file COPYING, which is part of this source code package. from typing import Any, Mapping, Optional from cmk.base.plugins.agent_based.agent_based_api.v1 import register, render, Service, ServiceLabel from cmk.base.plugins.agent_based.utils import gcp from .agent_based_api.v1.type_defs import CheckResult, DiscoveryResult, StringTable def parse(string_table: StringTable) -> gcp.Section: return gcp.parse_gcp(string_table, "instance_name") register.agent_section(name="gcp_service_filestore", parse_function=parse) def discover( section_gcp_service_filestore: Optional[gcp.Section], section_gcp_assets: Optional[gcp.AssetSection], ) -> DiscoveryResult: if section_gcp_assets is None: return asset_type = "file.googleapis.com/Instance" shares = [a for a in section_gcp_assets if a.asset.asset_type == asset_type] for share in shares: data = share.asset.resource.data item = data["name"].split("/")[-1] labels = [ ServiceLabel("gcp/location", share.asset.resource.location),
... '"namespace": "have-mercy",' ... '"labels": {},' ... '"annotations": {},' ... '"selector": {"match_labels": {}, "match_expressions": [{"key": "app", "operator": "In", "values": ["sleep"]}]},' ... '"creation_timestamp": 1638798546.0,' ... '"containers": {"images": ["i/name:0.5"], "names": ["name"]},' ... '"cluster": "sweet-jesus"}' ... ]]) StatefulSetInfo(name='oh-lord', namespace='have-mercy', labels={}, annotations={}, selector=Selector(match_labels={}, match_expressions=[{'key': 'app', 'operator': 'In', 'values': ['sleep']}]), creation_timestamp=1638798546.0, containers=ThinContainers(images=frozenset({'i/name:0.5'}), names=['name']), cluster='sweet-jesus') """ return StatefulSetInfo(**json.loads(string_table[0][0])) register.agent_section( name="kube_statefulset_info_v1", parsed_section_name="kube_statefulset_info", parse_function=parse, host_label_function=host_labels("statefulset"), ) def discovery(section: StatefulSetInfo) -> DiscoveryResult: yield Service() def check_kube_statefulset_info(section: StatefulSetInfo) -> CheckResult: yield from check_info({ "name": section.name, "namespace": section.namespace, "creation_timestamp": section.creation_timestamp, })
return yield HostLabel("cmk/kubernetes/object", "deployment") yield HostLabel("cmk/kubernetes/namespace", section.namespace) yield HostLabel("cmk/kubernetes/deployment", section.name) for container in section.containers: yield HostLabel("cmk/container_name", container) for image in section.images: yield HostLabel("cmk/container_image", image) register.agent_section( name="kube_deployment_info_v1", parsed_section_name="kube_deployment_info", parse_function=parse, host_label_function=host_labels, ) def discovery(section: DeploymentInfo) -> DiscoveryResult: yield Service() def check_kube_deployment_info(section: DeploymentInfo) -> CheckResult: yield from check_info({ "name": section.name, "namespace": section.namespace, "creation_timestamp": section.creation_timestamp, })
data[item]['OpenSlots'] = value.count('.') data[item][label] = value # Count number of total slots after all needed infos are present if 'OpenSlots' in data[item] and 'IdleWorkers' in data[item] \ and 'BusyWorkers' in data[item]: data[item]['TotalSlots'] = data[item]['OpenSlots'] \ + data[item]['IdleWorkers'] \ + data[item]['BusyWorkers'] return data register.agent_section( name="apache_status", parse_function=apache_status_parse, ) def discover_apache_status(section: Section) -> DiscoveryResult: for item in section: yield Service(item=item) def check_apache_status(item: str, params: Parameters, section: Section) -> CheckResult: if item.endswith(":None"): # fix item name discovered before werk 2763 item = item[:-5] data = section.get(item) if data is None:
StringTable, ) from cmk.base.plugins.agent_based.utils.kube import PodLifeCycle def parse_kube_pod_lifecycle(string_table: StringTable) -> PodLifeCycle: """ >>> parse_kube_pod_lifecycle([['{"phase": "running"}']]) PodLifeCycle(phase=<Phase.RUNNING: 'running'>) """ return PodLifeCycle(**json.loads(string_table[0][0])) register.agent_section( name="kube_pod_lifecycle_v1", parse_function=parse_kube_pod_lifecycle, parsed_section_name="kube_pod_lifecycle", ) def discovery_kube_pod_phase(section: PodLifeCycle) -> DiscoveryResult: yield Service() def check_kube_pod_phase(section: PodLifeCycle) -> CheckResult: yield Result(state=State.OK, summary=section.phase.title()) register.check_plugin( name="kube_pod_phase", service_name="Phase",
CollectorDaemons, CollectorHandlerLog, CollectorProcessingLogs, CollectorState, NodeComponent, ) # TODO: change section from info to components def parse_collector_processing_logs(string_table: StringTable) -> CollectorProcessingLogs: return CollectorProcessingLogs(**json.loads(string_table[0][0])) register.agent_section( name="kube_collector_processing_logs_v1", parsed_section_name="kube_collector_processing_logs", parse_function=parse_collector_processing_logs, ) def parse_collector_metadata(string_table: StringTable) -> CollectorComponentsMetadata: return CollectorComponentsMetadata(**json.loads(string_table[0][0])) register.agent_section( name="kube_collector_metadata_v1", parsed_section_name="kube_collector_metadata", parse_function=parse_collector_metadata, )
return # timestamps and timezones... age = max(time.time() - min(section["snaptimes"]), 0) yield from check_levels( age, levels_upper=params["oldest_levels"], metric_name="age", render_func=render.timespan, label="Age", boundaries=params["oldest_levels"], ) register.agent_section( name="proxmox_ve_vm_snapshot_age", parse_function=parse_proxmox_ve_snapshot_age, ) register.check_plugin( name="proxmox_ve_vm_snapshot_age", service_name="Proxmox VE VM Snapshot age", discovery_function=discover_single, check_function=check_proxmox_ve_snapshot_age, check_ruleset_name="proxmox_ve_vm_snapshot_age", check_default_parameters={ "oldest_levels": ( 60 * 60 * 24 * 1, 60 * 60 * 24 * 2, ) }, )
cond.reason, cond.detail), ) else: yield Result( state=State. CRIT, # TODO: change valuespec in a way to support user-defined type-to-state mappings summary=condition_detailed_description(cond.type_, cond.status, cond.reason, cond.detail), ) register.agent_section( name="kube_node_conditions_v1", parsed_section_name="kube_node_conditions", parse_function=parse_node_conditions, ) register.agent_section( name="kube_node_custom_conditions_v1", parsed_section_name="kube_node_custom_conditions", parse_function=parse_node_custom_conditions, ) register.check_plugin( name="kube_node_conditions", service_name="Condition", sections=["kube_node_conditions", "kube_node_custom_conditions"], discovery_function=discovery, check_function=check,
# The ReleaseDate property indicates the release date of the # Win32 BIOS in the Coordinated Universal Time (UTC) format # of YYYYMMDDHHMMSS.MMMMMM(+-)OOO. date = value.replace("*", "0").split(".", maxsplit=1)[0] section["date"] = int( time.mktime(time.strptime(date, "%Y%m%d%H%M%S"))) elif varname == "Manufacturer": section["vendor"] = value elif varname == "Name": section["model"] = value return section register.agent_section( name="win_bios", parse_function=parse_win_bios, ) def inventory_win_bios(section: Mapping[str, Union[str, int]]): attr = { k: section[k] for k in ("date", "model", "vendor", "version") if k in section } with suppress(KeyError): attr[ "version"] = f"{section['smbios_version']} {section['major_version']}.{section['minor_version']}" yield Attributes( path=["software", "bios"],
def parse_kube_statefulset_strategy( string_table: StringTable) -> StatefulSetStrategy: """ >>> parse_kube_statefulset_strategy([['{"strategy": {"type_": "OnDelete"}}']]) StatefulSetStrategy(strategy=OnDelete(type_='OnDelete')) >>> parse_kube_statefulset_strategy([['{"strategy": {"type_": "RollingUpdate", "partition": 0}}']]) StatefulSetStrategy(strategy=StatefulSetRollingUpdate(type_='RollingUpdate', partition=0)) """ return StatefulSetStrategy(**json.loads(string_table[0][0])) register.agent_section( name="kube_statefulset_strategy_v1", parsed_section_name="kube_statefulset_strategy", parse_function=parse_kube_statefulset_strategy, ) def inventory_kube_statefulset( section_kube_statefulset_info: Optional[StatefulSetInfo], section_kube_statefulset_strategy: Optional[StatefulSetStrategy], ) -> InventoryResult: if section_kube_statefulset_info is None or section_kube_statefulset_strategy is None: return selector = section_kube_statefulset_info.selector yield Attributes( path=["software", "applications", "kube", "statefulset"], inventory_attributes={ "name":
>>> parse([[ ... '{"logs": [' ... '{"status": "ok", ' ... '"component": "Container Metrics", ' ... '"message": "message", ' ... '"detail": "detail"}]}' ... ]]) CollectorLogs(logs=[CollectorLog(component='Container Metrics', status=<CollectorState.OK: 'ok'>, message='message', detail='detail')]) """ return CollectorLogs(**json.loads(string_table[0][0])) register.agent_section( name="kube_collector_connection_v1", parsed_section_name="kube_collector_connection", parse_function=parse, ) def discover(section: CollectorLogs) -> DiscoveryResult: yield Service() def check(section: CollectorLogs) -> CheckResult: for entry in section.logs: if entry.status == CollectorState.OK: yield Result(state=State.OK, summary=f"{entry.component}: OK") continue component_message = f"{entry.component}: {entry.message}"
CONDITIONS_OK_MAPPINGS = { "available": ConditionStatus.TRUE, "progressing": ConditionStatus.TRUE, "replicafailure": ConditionStatus.FALSE, } def parse(string_table: StringTable) -> DeploymentConditions: """Parses `string_table` into a DeploymentConditions instance""" return DeploymentConditions(**json.loads(string_table[0][0])) register.agent_section( name="kube_deployment_conditions_v1", parsed_section_name="kube_deployment_conditions", parse_function=parse, ) def discovery(section: DeploymentConditions) -> DiscoveryResult: yield Service() def condition_levels(params: Mapping[str, VSResultAge], condition: str) -> Optional[Tuple[int, int]]: if (levels := params.get(condition, "no_levels")) == "no_levels": return None return levels[1]
section_kube_allocatable_cpu_resource: Optional[AllocatableResource], ) -> CheckResult: assert section_kube_cpu_resources is not None yield from check_resource( params, section_kube_performance_cpu, section_kube_cpu_resources, section_kube_allocatable_cpu_resource, "cpu", lambda x: f"{x:0.3f}", ) register.agent_section( name="kube_performance_cpu_v1", parsed_section_name="kube_performance_cpu", parse_function=parse_performance_usage, ) register.agent_section( name="kube_cpu_resources_v1", parsed_section_name="kube_cpu_resources", parse_function=parse_resources, ) register.agent_section( name="kube_allocatable_cpu_resource_v1", parsed_section_name="kube_allocatable_cpu_resource", parse_function=parse_allocatable_resource, )
render, Result, Service, ServiceLabel, State, ) from cmk.base.plugins.agent_based.utils import gcp from .agent_based_api.v1.type_defs import CheckResult, DiscoveryResult, StringTable def parse(string_table: StringTable) -> gcp.Section: return gcp.parse_gcp(string_table, "database_id", extract=lambda x: x.split(":")[-1]) register.agent_section(name="gcp_service_cloud_sql", parse_function=parse) def discover( section_gcp_service_cloud_sql: Optional[gcp.Section], section_gcp_assets: Optional[gcp.AssetSection], ) -> DiscoveryResult: if section_gcp_assets is None: return asset_type = "sqladmin.googleapis.com/Instance" services = [a for a in section_gcp_assets if a.asset.asset_type == asset_type] for service in services: data = service.asset.resource.data item = data["name"] labels = [ ServiceLabel(f"gcp/labels/{k}", v) for k, v in data["settings"]["userLabels"].items()
yield Result( state=State.OK if not req_subs_status or subs_status == req_subs_status else State.WARN, summary=(f"Subscription: {subs_status}" f"{req_subs_status and f' (required: {req_subs_status})'}"), ) yield Result(state=State.OK, summary=f"Version: {proxmox_ve_version}") yield Result( state=State.OK, summary=(f"Hosted VMs: {len(section.get('lxc', []))}x LXC," f" {len(section.get('qemu', []))}x Qemu"), ) register.agent_section( name="proxmox_ve_node_info", parse_function=parse_proxmox_ve_node_info, ) register.check_plugin( name="proxmox_ve_node_info", service_name="Proxmox VE Node Info", discovery_function=discover_single, check_function=check_proxmox_ve_node_info, check_ruleset_name="proxmox_ve_node_info", check_default_parameters={ "required_node_status": None, "required_subscription_status": None, }, )
def parse_sansymphony_pool(string_table: StringTable) -> Section: return { name: SansymphonyPool( name=name, percent_allocated=float(percent_allocated), status=status, cache_mode=cache_mode, pool_type=type_, ) for name, percent_allocated, status, cache_mode, type_ in string_table } register.agent_section( name="sansymphony_pool", parse_function=parse_sansymphony_pool, ) def discover_sansymphony_pool(section: Section) -> DiscoveryResult: yield from (Service(item=item) for item in section) def check_sansymphony_pool( item: str, params: Mapping[str, Any], section: Section, ) -> CheckResult: if (pool := section.get(item)) is None: return
# Copyright (C) 2022 tribe29 GmbH - License: GNU General Public License v2 # This file is part of Checkmk (https://checkmk.com). It is subject to the terms and # conditions defined in the file COPYING, which is part of this source code package. from typing import Any, Mapping, Optional from cmk.base.plugins.agent_based.agent_based_api.v1 import register, render, Service, ServiceLabel from cmk.base.plugins.agent_based.utils import gcp from .agent_based_api.v1.type_defs import CheckResult, DiscoveryResult, StringTable def parse_gcp_function(string_table: StringTable) -> gcp.Section: return gcp.parse_gcp(string_table, "function_name") register.agent_section(name="gcp_service_cloud_functions", parse_function=parse_gcp_function) service_namer = gcp.service_name_factory("Function") def discover( section_gcp_service_cloud_functions: Optional[gcp.Section], section_gcp_assets: Optional[gcp.AssetSection], ) -> DiscoveryResult: if section_gcp_assets is None: return asset_type = "cloudfunctions.googleapis.com/CloudFunction" functions = [ a for a in section_gcp_assets if a.asset.asset_type == asset_type ] for function in functions:
parsed = [] for msg_list in string_table: try: name, severity, server, timeCreated_iso = msg_list[0:4] message = " ".join(msg_list[5:]) parsed.append( SplunkMessage(name, severity, server, timeCreated_iso, message)) except (IndexError, ValueError): pass return parsed register.agent_section(name="splunk_system_msg", parse_function=parse) def discovery(section: Section) -> DiscoveryResult: yield Service() def check(section: Section) -> CheckResult: if not section: yield Result(state=State.OK, summary="No open messages") return data = section for msg in data: state = _handle_severity(msg.severity)
... for l in s: ... print(l) == (('/dev', 777.00390625, 777.00390625, 0.0), ('/persist', 12371.9765625, 11396.71484375, 623.51953125), ('/dev', 777.00390625, 777.00390625, 0.0)) {'/dev': {'volume_name': 'devtmpfs', 'fs_type': None}, '/persist': {'volume_name': '/dev/sda5', 'fs_type': None}} == ('/dev', 198913, 198548) ('/', 65536, 40003) ('/persist', 799680, 799562) """ blocks_subsection: StringTable = [] inodes_subsection: StringTable = [] current_list = blocks_subsection for line in string_table: if line[-1] == '[df_inodes_start]': current_list = inodes_subsection continue if line[-1] == '[df_inodes_end]': current_list = blocks_subsection continue current_list.append(line) return parse_blocks_subsection(blocks_subsection), parse_inodes_subsection(inodes_subsection) register.agent_section( name="df", parse_function=parse_df, supersedes=['hr_fs'], )
# server2 ... mydb_backup ... SYNCHRONISED ... MIRROR ... mydb ... # This leads to duplicate services and alerts in the event that services are not clustered. # To avoid this, mirrors are actively skipped. Also, the monitoring user that queries the # databases' mirroring status needs extended permissions to view any mirroring role other # than "PRINCIPAL". These include destructive permissions, which should not be given to the # user. Mirros are still skipped to be safe. continue mirroring_config = MirroringConfig(*_convert_datatypes(raw_configline)) section[mirroring_config.database_name] = mirroring_config return section register.agent_section( name='mssql_mirroring', parse_function=parse_mssql_mirroring, ) def discover_mssql_mirroring(section: MirroringSection) -> DiscoveryResult: yield from (Service(item=database_name) for database_name in section) def check_mssql_mirroring( item: str, params: Mapping[str, int], # the int is actually a Checkmk state section: MirroringSection, ) -> CheckResult: mirroring_config = section.get(item) if not mirroring_config:
if cond["status"] is True: yield Result(state=State.OK, summary=cond_service_text.passed) continue summary_prefix = f"{cond_service_text.not_passed} ({cond['reason']}: {cond['detail']})" else: summary_prefix = cond_service_text.not_passed for result in check_levels(time_diff, levels_upper=get_levels_for(params, name), render_func=render.timespan): yield Result(state=result.state, summary=f"{summary_prefix} for {result.summary}") register.agent_section( name="k8s_pod_conditions_v1", parsed_section_name="k8s_pod_conditions", parse_function=parse, ) register.check_plugin( name="k8s_pod_conditions", service_name="Condition", discovery_function=discovery, check_function=check, check_default_parameters=dict( scheduled="no_levels", initialized="no_levels", containersready="no_levels", ready="no_levels", ), check_ruleset_name="k8s_pod_conditions",
metric_name="age", render_func=render.timespan, label="Age", ) yield Result(state=State.OK, summary=f"Time: {last_backup.get('started_time')}") yield Result(state=State.OK, summary=f"Size: {render.bytes(last_backup['archive_size'])}") transfer_size = last_backup.get("transfer_size", last_backup.get("archive_size", 0)) yield Result( state=State.OK, summary=f"Bandwidth: {render.iobandwidth(transfer_size / last_backup['transfer_time'])}", ) register.agent_section( name="proxmox_ve_vm_backup_status", parse_function=parse_proxmox_ve_vm_backup_status, ) def check_proxmox_ve_vm_backup_status_unpure(params: Mapping[str, Any], section: Section) -> CheckResult: """Because of datetime.now() this function is not testable. Test check_proxmox_ve_vm_backup_status() instead.""" yield from check_proxmox_ve_vm_backup_status(datetime.now(), params, section) register.check_plugin( name="proxmox_ve_vm_backup_status", service_name="Proxmox VE VM Backup Status", discovery_function=discover_single, check_function=check_proxmox_ve_vm_backup_status_unpure,
if section.node is not None: yield HostLabel("cmk/kubernetes/node", section.node) if section.namespace: yield HostLabel("cmk/kubernetes/namespace", section.namespace) for controller in section.controllers: yield HostLabel(f"cmk/kubernetes/{controller.type_.value}", controller.name) yield from kube_labels_to_cmk_labels(section.labels) register.agent_section( name="kube_pod_info_v1", parsed_section_name="kube_pod_info", parse_function=parse_kube_pod_info, host_label_function=host_labels, ) def discovery_kube_pod_info(section: PodInfo) -> DiscoveryResult: yield Service() def check_kube_pod_info(section: PodInfo) -> CheckResult: # To get an understanding of API objects this check deals with, one can take a look at # PodInfo and the definition of its fields if section.namespace is None: raise KubernetesError("Pod has no namespace")
""" >>> parse_kube_pod_resources([[ ... '{"running": ["checkmk-cluster-agent", "storage-provisioner"],' ... ' "pending": ["success2"], "succeeded":' ... ' ["hello-27303194--1-9vtft"],' ... ' "failed": [], ' ... '"unknown": []}' ... ]]) PodResources(running=['checkmk-cluster-agent', 'storage-provisioner'], pending=['success2'], succeeded=['hello-27303194--1-9vtft'], failed=[], unknown=[]) """ return PodResources(**json.loads(string_table[0][0])) register.agent_section( name="kube_pod_resources_v1", parse_function=parse_kube_pod_resources, parsed_section_name="kube_pod_resources", ) def parse_kube_allocatable_pods(string_table: StringTable): """ >>> parse_kube_allocatable_pods([[ ... '{"kubernetes_object": "cluster",' ... '"capacity": 110,' ... '"allocatable": 110}' ... ]]) AllocatablePods(kubernetes_object='cluster', capacity=110, allocatable=110) """ return AllocatablePods(**json.loads(string_table[0][0]))
total_bytes, boundaries=(0, None), ) yield Metric( "fs_used_percent", 100.0 * used_bytes / total_bytes, levels=(warn, crit), boundaries=(0.0, 100.0), ) yield Result(state=(State.CRIT if used_bytes >= crit_bytes else State.WARN if used_bytes >= warn_bytes else State.OK), summary="%s used (%s of %s)" % (render.percent(100.0 * used_bytes / total_bytes), render.disksize(used_bytes), render.disksize(total_bytes))) register.agent_section( name="proxmox_ve_disk_usage", parse_function=parse_proxmox_ve_disk_usage, ) register.check_plugin( name="proxmox_ve_disk_usage", service_name="Proxmox VE Disk Usage", discovery_function=discover_single, check_function=check_proxmox_ve_disk_usage, check_ruleset_name="proxmox_ve_disk_percentage_used", check_default_parameters={"levels": (80., 90.)}, )
def parse(string_table: StringTable) -> Resources: """Parses limit and requests values into Resources""" return Resources(**json.loads(string_table[0][0])) def discovery(section: Resources) -> DiscoveryResult: yield Service() def check(params: Dict[str, Tuple[int, int]], section: Resources) -> CheckResult: yield Result(state=State.OK, summary=f"Limit: {section.limit}") yield Result(state=State.OK, summary=f"Requests: {section.requests}") # TODO: suggest a new name for section register.agent_section( name="kube_cpu_resources_v1", parsed_section_name="kube_cpu_resources", parse_function=parse, ) register.check_plugin( name="kube_cpu_resources", service_name="CPU Load", discovery_function=discovery, check_function=check, check_default_parameters={}, )