def check_storeonce_space(item, params, values): total_bytes, cloud_bytes, local_bytes = _get_storeonce_space_values( values, "Capacity") free_bytes, free_cloud_bytes, free_local_bytes = _get_storeonce_space_values( values, "Free Space") factor = 1024 * 1024 yield df_check_filesystem_list(item, params, [ (item, total_bytes / factor, free_bytes / factor, 0) ]) # fixed: true-division if cloud_bytes: yield 0, "Total cloud: %s" % get_bytes_human_readable(cloud_bytes) if local_bytes: yield 0, "Total local: %s" % get_bytes_human_readable(local_bytes) if free_cloud_bytes: yield 0, "Free cloud: %s" % get_bytes_human_readable(free_cloud_bytes) if free_local_bytes: yield 0, "Free local: %s" % get_bytes_human_readable(free_local_bytes) dedupl_ratio_str = values.get('Deduplication Ratio') or values.get( "dedupeRatio") if dedupl_ratio_str is not None: dedupl_ratio = float(dedupl_ratio_str) yield 0, "Dedup ratio: %.2f" % dedupl_ratio, [("dedup_rate", dedupl_ratio)]
def check_memory_element( label, used, total, levels, label_total="", show_free=False, metric_name=None, create_percent_metric=False, ): """Return a check result for one memory element""" if show_free: show_value = total - used show_text = " free" else: show_value = used show_text = "" infotext = "%s: %s%s - %s of %s%s" % ( label, get_percent_human_readable(100.0 * show_value / total), show_text, get_bytes_human_readable(show_value, base=1024), get_bytes_human_readable(total, base=1024), (" %s" % label_total).rstrip(), ) try: mode, (warn, crit) = levels except (ValueError, TypeError): # handle None, "ignore" mode, (warn, crit) = "ignore", (None, None) warn, crit, levels_text = normalize_mem_levels(mode, warn, crit, total) state = _compute_state(used, warn, crit) if state and levels_text: infotext = "%s (%s)" % (infotext, levels_text) perf = [] if metric_name: perf.append((metric_name, used, warn, crit, 0, total)) if create_percent_metric: scale_to_perc = 100.0 / total perf.append(( "mem_used_percent", used * scale_to_perc, warn * scale_to_perc if warn is not None else None, crit * scale_to_perc if crit is not None else None, 0, None, # some times over 100%! )) return state, infotext, perf
def check_memory_simple(used, total, params): # Convert old-style tuple params to dict if params: if isinstance(params, tuple): params = {"levels": ("perc_used", params)} else: params = {"levels": ("ignore")} perc_used = (float(used) / total) * 100 infotext = "Usage: %s (Used: %s, Total: %s)" % ( get_percent_human_readable(perc_used), get_bytes_human_readable(used), get_bytes_human_readable(total), ) status = 0 if params["levels"][0] == "perc_used": warn_perc, crit_perc = params["levels"][1] warn_abs = (warn_perc / 100.0) * total crit_abs = (crit_perc / 100.0) * total levelstext = " (warn/crit at %s/%s used)" % ( get_percent_human_readable(warn_perc), get_percent_human_readable(crit_perc), ) elif params["levels"][0] == "abs_free": warn_abs_free, crit_abs_free = params["levels"][1] warn_abs = total - warn_abs_free crit_abs = total - crit_abs_free levelstext = " (warn/crit below %s/%s free)" % ( get_bytes_human_readable(warn_abs_free), get_bytes_human_readable(crit_abs_free), ) else: # No levels imposed, ie. params = {'levels': 'ignore'} crit_abs = None warn_abs = None levelstext = "" if crit_abs is not None and used >= crit_abs: status = 2 elif warn_abs is not None and used >= warn_abs: status = 1 if status: infotext += levelstext perfdata = [("memory_used", used, warn_abs, crit_abs, 0, total)] return status, infotext, perfdata
def check_hp_proliant_mem(item, params, info): for line in info: if line[1] == item: # Note: mgmt_hp_proliant_mem provides exact 6 values; # hp_proliant provides 10 values because related inventory plugin # needs the last 4. board_index, module_index, module_size, module_type, \ module_status, module_condition = line[:6] yield 0, f"Board: {board_index}" yield 0, f"Num: {module_index}" type_ = MAP_TYPES_MEMORY.get(module_type, f"unknown({module_type})") yield 0, f"Type: {type_}" module_size = get_bytes_human_readable(int(module_size) * 1024) yield 0, f"Size: {module_size}" snmp_status = 'n/a' if int(module_status) in hp_proliant_mem_status_map: snmp_status = hp_proliant_mem_status_map[int(module_status)] status_output = f"Status: {snmp_status}" yield hp_proliant_mem_status2nagios_map[snmp_status], status_output condition = 'n/a' if saveint(module_condition) in hp_proliant_mem_condition_map: condition = hp_proliant_mem_condition_map[saveint( module_condition)] condition_output = f"Condition: {condition}" yield hp_proliant_mem_condition_status2nagios_map[ condition], condition_output
def check_memory_multiitem(params, data, base=1024): if "mem_total" not in data: return 3, "Invalid data: missing mem_total" mem_total = data["mem_total"] if "mem_used" in data: mem_used = data["mem_used"] mem_avail = mem_total - mem_used elif "mem_avail" in data: mem_avail = data["mem_avail"] mem_used = mem_total - mem_avail else: return 3, "Invalid data: missing mem_used or mem_avail sizes" infotext = "%s used (%s of %s)" % ( get_percent_human_readable(float(mem_used) / float(mem_total) * 100), get_bytes_human_readable(mem_used, base=base), get_bytes_human_readable(mem_total, base=base), ) state = 0 if "levels" in params: warn, crit = params["levels"] if isinstance(warn, int): warn_absolute = warn else: warn_absolute = int(mem_total * warn / 100) if isinstance(crit, int): crit_absolute = crit else: crit_absolute = int(mem_total * crit / 100) if mem_used > crit_absolute: state = 2 elif mem_used > warn_absolute: state = 1 if state: infotext += " (warn/crit at %s/%s)" % ( get_bytes_human_readable(warn_absolute), get_bytes_human_readable(crit_absolute), ) else: warn_absolute = None crit_absolute = None return state, infotext, [("memused", mem_used, warn_absolute, crit_absolute, 0, mem_total)]
def check_juniper_mem_generic(_no_item, params, info): usage_kb, mem_size_kb = map(int, info[0]) # Kilobyte mem_size = mem_size_kb * 1024 usage = usage_kb * 1024 usage_perc = (float(usage_kb) / mem_size_kb) * 100 warn, crit = params warn_kb = (mem_size_kb / 100.0) * warn crit_kb = (mem_size_kb / 100.0) * crit perf = [("mem_used", usage, warn_kb * 1024, crit_kb * 1024, 0, mem_size)] message = "Used: %s/%s (%.0f%%)" % \ ( get_bytes_human_readable(usage), get_bytes_human_readable(mem_size), usage_perc ) levels = " (warn/crit at %.0f%%/%0.f%%)" % (warn, crit) if usage_perc >= crit: return 2, message + levels, perf elif usage_perc >= warn: return 1, message + levels, perf return 0, message, perf
def check_dell_powervault_me4_controller_statistics(item: str, params: Mapping[str, Any], section) -> CheckResult: data = section.get(item) iops = data.get("iops") bytespersecond = data.get("bytes-per-second-numeric") data_read = data.get("data-read") data_write = data.get("data-written") message = "Written data %s and read data %s, IOPS %s/s, Bytes %s/s" % ( data_write, data_read, iops, get_bytes_human_readable(bytespersecond)) yield Metric("iops", iops) yield Metric("bytes", bytespersecond) yield Result(state=State(0), summary=message)
def check_dell_powervault_me4_volume_statistics(item: str, params, section) -> CheckResult: data = section.get(item) sas_percent = data.get("percent-tier-sas") sata_percent = data.get("percent-tier-sata") ssd_percent = data.get("percent-tier-ssd") iops = data.get("iops") bytespersecond = data.get("bytes-per-second-numeric") message = "Usage SSD: %s%%, SAS %s%%, SATA %s%%, IOPS %s/s, Bytes %s/s" % ( ssd_percent, sas_percent, sata_percent, iops, get_bytes_human_readable(bytespersecond)) yield Metric("ssd_usage", ssd_percent) yield Metric("sas_usage", sas_percent) yield Metric("sata_usage", sata_percent) yield Metric("iops", iops) yield Metric("bytes", bytespersecond) yield Result(state=State(0), summary=message)
def aws_get_bytes_rate_human_readable(rate): return get_bytes_human_readable(rate) + "/s"
import time from typing import Any, List, NamedTuple from cmk.base.check_api import ( check_levels, get_bytes_human_readable, get_percent_human_readable, get_rate, MKCounterWrapped, ) _AZURE_METRIC_FMT = { "count": lambda n: "%d" % n, "percent": get_percent_human_readable, "bytes": get_bytes_human_readable, "bytes_per_second": lambda b: "%s/s" % get_bytes_human_readable(b), "seconds": lambda s: "%.2f s" % s, "milli_seconds": lambda ms: "%d ms" % (ms * 1000), } def get_data_or_go_stale(check_function): """Variant of get_parsed_item_data that raises MKCounterWrapped if data is not found. """ @functools.wraps(check_function) def wrapped_check_function(item, params, parsed): if not isinstance(parsed, dict): return 3, "Wrong usage of decorator: parsed is not a dict" if item not in parsed or not parsed[item]: raise MKCounterWrapped("Data not present at the moment")
def size_trend( check, item, resource, levels, used_mb, size_mb: float, timestamp=None, ): # pylint: disable=function-redefined """Trend computation for size related checks of disks, ram, etc. Trends are computed in two steps. In the first step the delta to the last check is computed, using a normal check_mk counter. In the second step an average over that counter is computed to make a long-term prediction. Note: This function is experimental and may change in future releases. Use at your own risk! Args: check (str): The name of the check, e.g. "df". item (str): The name of the item, e.g. the mountpoint "/" for df. resource (str): The resource in question, e.g. "disk", "ram", "swap". levels (dict): Level parameters for the trend computation. Items: "trend_range" : 24, # interval for the trend in hours "trend_perfdata" : True # generate perfomance data for trends "trend_bytes" : (10, 20), # change during trend_range "trend_shrinking_bytes": (16, 32), # Bytes of shrinking during trend_range "trend_perc" : (1, 2), # percent change during trend_range "trend_shrinking_perc" : (1, 2), # percent decreasing change during trend_range "trend_timeleft" : (72, 48) # time left in hours until full "trend_showtimeleft : True # display time left in infotext The item "trend_range" is required. All other items are optional. timestamp (float, optional): Time in secs used to calculate the rate and average. Defaults to "None". used_mb (float): Used space in MB. size_mb (float): Max. available space in MB. Returns: A tuple of (state, infotext, perfdata) for the trend computation. If a MKCounterWrapped occurs (i.e. there is not enough data present for the trend computation) the tuple (0, '', []) is returned. """ perfdata: List[ Union[ # Tuple[str, float], # Tuple[str, float, Optional[float], Optional[float], Optional[float], Optional[float]], ] ] state, infotext, perfdata, problems = 0, "", [], [] MB = 1024.0 * 1024.0 H24 = 60 * 60 * 24 range_hours = levels["trend_range"] range_sec = range_hours * 3600.0 if not timestamp: timestamp = time.time() # compute current rate in MB/s by computing delta since last check try: rate = get_rate( "%s.%s.delta" % (check, item), timestamp, used_mb, allow_negative=True, onwrap=RAISE ) except MKCounterWrapped: # need more data for computing a trend return 0, "", [] if levels.get("trend_perfdata"): perfdata.append(("growth", rate * H24)) # average trend in MB/s, initialized with zero (by default) rate_avg = get_average("%s.%s.trend" % (check, item), timestamp, rate, range_sec / 60.0) trend = rate_avg * range_sec sign = "+" if trend > 0 else "" infotext += ", trend: %s%s / %g hours" % ( sign, get_bytes_human_readable(trend * MB), range_hours, ) # levels for performance data warn_perf: Optional[float] = None crit_perf: Optional[float] = None # apply levels for absolute growth / interval trend_bytes = levels.get("trend_bytes") if trend_bytes: wa, cr = trend_bytes warn_perf, crit_perf = wa / MB, cr / MB if trend * MB >= wa: problems.append( "growing too fast (warn/crit at %s/%s per %.1f h)(!" % ( get_bytes_human_readable(wa), get_bytes_human_readable(cr), range_hours, ) ) state = max(1, state) if trend * MB >= cr: state = 2 problems[-1] += "!" problems[-1] += ")" tmp_state, tmp_problem = _check_shrinking( trend * MB, levels.get("trend_shrinking_bytes"), range_hours, get_bytes_human_readable, ) if tmp_state > 0: state = max(state, tmp_state) problems.append(tmp_problem) # apply levels for growth relative to filesystem size trend_perc: Optional[Tuple[float, float]] = levels.get("trend_perc") if trend_perc: wa_perc, cr_perc = trend_perc wa = wa_perc / 100.0 * size_mb cr = cr_perc / 100.0 * size_mb if warn_perf is not None: assert crit_perf is not None warn_perf = min(warn_perf, wa) crit_perf = min(crit_perf, cr) else: warn_perf, crit_perf = wa, cr if trend >= wa: problems.append( "growing too fast (warn/crit at %s/%s per %.1f h)(!" % ( get_percent_human_readable(wa_perc), get_percent_human_readable(cr_perc), range_hours, ) ) state = max(1, state) if trend >= cr: state = 2 problems[-1] += "!" problems[-1] += ")" tmp_state, tmp_problem = _check_shrinking( 100 * trend / size_mb, levels.get("trend_shrinking_perc"), range_hours, get_percent_human_readable, ) if tmp_state > 0: state = max(state, tmp_state) problems.append(tmp_problem) # compute time until filesystem is full (only for positive trend, of course) # The start value of hours_left is negative. The pnp graph and the perfometer # will interpret this as inifinite -> not growing hours_left = -1 if trend > 0: def format_hours(hours): if hours > 365 * 24: return "more than a year" elif hours > 90 * 24: return "%0d months" % (hours / (30 * 24)) # fixed: true-division elif hours > 4 * 7 * 24: # 4 weeks return "%0d weeks" % (hours / (7 * 24)) # fixed: true-division elif hours > 7 * 24: # 1 week return "%0.1f weeks" % (hours / (7 * 24)) # fixed: true-division elif hours > 2 * 24: # 2 days return "%0.1f days" % (hours / 24) # fixed: true-division return "%d hours" % hours hours_left = (size_mb - used_mb) / trend * range_hours hours_txt = format_hours(hours_left) timeleft = levels.get("trend_timeleft") if timeleft: wa, cr = timeleft if hours_left <= cr: state = 2 problems.append("only %s until %s full(!!)" % (hours_txt, resource)) elif hours_left <= wa: state = max(state, 1) problems.append("only %s until %s full(!)" % (hours_txt, resource)) elif hours_left <= wa * 2 or levels.get("trend_showtimeleft"): problems.append("time left until %s full: %s" % (resource, hours_txt)) elif levels.get("trend_showtimeleft"): problems.append("time left until %s full: %s" % (resource, hours_txt)) if levels.get("trend_perfdata"): perfdata.append( ( "trend", rate_avg * H24, (warn_perf / range_sec * H24) if warn_perf is not None else None, (crit_perf / range_sec * H24) if crit_perf is not None else None, 0, 1.0 * size_mb / range_hours, ) ) if levels.get("trend_showtimeleft"): perfdata.append(("trend_hoursleft", hours_left)) if problems: infotext += " - %s" % ", ".join(problems) return state, infotext, perfdata
def df_check_filesystem_single_coroutine( mountpoint, size_mb, avail_mb, reserved_mb, inodes_total, inodes_avail, params, this_time=None, ): if size_mb == 0: yield 1, "Size of filesystem is 0 MB", [] return # params might still be a tuple show_levels, subtract_reserved, show_reserved = ( (params.get("show_levels", False), params.get("subtract_reserved", False) and reserved_mb > 0, params.get("show_reserved") and reserved_mb > 0) # params might still be a tuple if isinstance(params, dict) else (False, False, False)) used_mb = size_mb - avail_mb used_max = size_mb if subtract_reserved: used_mb -= reserved_mb used_max -= reserved_mb # Get warning and critical levels already with 'magic factor' applied levels = get_filesystem_levels(mountpoint, size_mb / 1024., params) warn_mb, crit_mb = levels["levels_mb"] used_hr = get_bytes_human_readable(used_mb * 1024**2) used_max_hr = get_bytes_human_readable(used_max * 1024**2) used_perc_hr = get_percent_human_readable(100.0 * used_mb / used_max) # If both numbers end with the same unit, then drop the first one if used_hr[-2:] == used_max_hr[-2:]: used_hr = used_hr[:-3] infotext = ["%s used (%s of %s)" % (used_perc_hr, used_hr, used_max_hr)] if warn_mb < 0.0: # Negative levels, so user configured thresholds based on space left. Calculate the # upper thresholds based on the size of the filesystem crit_mb = used_max + crit_mb warn_mb = used_max + warn_mb status = 2 if used_mb >= crit_mb else 1 if used_mb >= warn_mb else 0 perfdata = [("fs_used", used_mb, warn_mb, crit_mb, 0, size_mb), ('fs_size', size_mb), ("fs_used_percent", 100.0 * used_mb / size_mb)] if (show_levels == "always" or # (show_levels == "onproblem" and status > 0) or # (show_levels == "onmagic" and (status > 0 or levels.get("magic", 1.0) != 1.0))): infotext.append(levels["levels_text"]) if show_reserved: reserved_perc_hr = get_percent_human_readable(100.0 * reserved_mb / size_mb) reserved_hr = get_bytes_human_readable(reserved_mb * 1024**2) infotext.append("additionally reserved for root: %s" % reserved_hr # if subtract_reserved else # "therein reserved for root: %s (%s)" % (reserved_perc_hr, reserved_hr)) if subtract_reserved: perfdata.append(("fs_free", avail_mb, None, None, 0, size_mb)) if subtract_reserved or show_reserved: perfdata.append(("reserved", reserved_mb)) yield status, ", ".join(infotext), perfdata if levels.get("trend_range"): trend_state, trend_text, trend_perf = size_trend( 'df', mountpoint, "disk", levels, used_mb, size_mb, this_time, ) # Todo(frans): Return value from size_trend() can be empty but we must yield a valid result # - as soon as we can 'yield from' size_trend we do not have to check any more if trend_state or trend_text or trend_perf: yield trend_state, trend_text.strip(" ,"), trend_perf or [] yield from _check_inodes(levels, inodes_total, inodes_avail)
def get_filesystem_levels(mountpoint, size_gb, params): mega = 1024 * 1024 giga = mega * 1024 # Start with factory settings levels = _FILESYSTEM_DEFAULT_LEVELS.copy() def convert_legacy_levels(value): if isinstance(params, tuple) or not params.get("flex_levels"): return tuple(map(float, value)) return value # convert default levels to dictionary. This is in order support # old style levels like (80, 90) if isinstance(filesystem_default_levels, dict): fs_default_levels = filesystem_default_levels.copy() fs_levels = fs_default_levels.get("levels") if fs_levels: fs_default_levels["levels"] = convert_legacy_levels(fs_levels) levels.update(filesystem_default_levels) else: levels = _FILESYSTEM_DEFAULT_LEVELS.copy() levels["levels"] = convert_legacy_levels(filesystem_default_levels[:2]) if len(filesystem_default_levels) == 2: levels["magic"] = None else: levels["magic"] = filesystem_default_levels[2] # If params is a dictionary, make that override the default values if isinstance(params, dict): levels.update(params) else: # simple format - explicitely override levels and magic levels["levels"] = convert_legacy_levels(params[:2]) if len(params) >= 3: levels["magic"] = params[2] # Determine real warn, crit levels if isinstance(levels["levels"], tuple): warn, crit = levels["levels"] else: # A list of levels. Choose the correct one depending on the # size of the current filesystem. We do not make the first # rule match, but that with the largest size_gb. That way # the order of the entries is not important. found = False found_size = 0 for to_size, this_levels in levels["levels"]: if size_gb * giga > to_size and to_size >= found_size: warn, crit = this_levels found_size = to_size found = True if not found: warn, crit = 100.0, 100.0 # entry not found in list # Take into account magic scaling factor (third optional argument # in check params). A factor of 1.0 changes nothing. Factor should # be > 0 and <= 1. A smaller factor raises levels for big file systems # bigger than 100 GB and lowers it for file systems smaller than 100 GB. # Please run df_magic_factor.py to understand how it works. magic = levels.get("magic") # We need a way to disable the magic factor so check # if magic not 1.0 if magic and magic != 1.0: # convert warn/crit to percentage if not isinstance(warn, float): warn = savefloat(warn * mega / float(size_gb * giga)) * 100 if not isinstance(crit, float): crit = savefloat(crit * mega / float(size_gb * giga)) * 100 normsize = levels["magic_normsize"] hgb_size = size_gb / float(normsize) felt_size = hgb_size**magic scale = felt_size / hgb_size warn_scaled = 100 - ((100 - warn) * scale) crit_scaled = 100 - ((100 - crit) * scale) # Make sure, levels do never get too low due to magic factor lowest_warning_level, lowest_critical_level = levels["levels_low"] if warn_scaled < lowest_warning_level: warn_scaled = lowest_warning_level if crit_scaled < lowest_critical_level: crit_scaled = lowest_critical_level else: if not isinstance(warn, float): warn_scaled = savefloat(warn * mega / float(size_gb * giga)) * 100 else: warn_scaled = warn if not isinstance(crit, float): crit_scaled = savefloat(crit * mega / float(size_gb * giga)) * 100 else: crit_scaled = crit size_mb = size_gb * 1024 warn_mb = savefloat(size_mb * warn_scaled / 100) crit_mb = savefloat(size_mb * crit_scaled / 100) levels["levels_mb"] = (warn_mb, crit_mb) if isinstance(warn, float): if warn_scaled < 0 and crit_scaled < 0: label = 'warn/crit at free space below' warn_scaled *= -1 crit_scaled *= -1 else: label = 'warn/crit at' levels["levels_text"] = "(%s %s/%s)" % ( label, get_percent_human_readable(warn_scaled), get_percent_human_readable(crit_scaled)) else: if warn * mega < 0 and crit * mega < 0: label = 'warn/crit at free space below' warn *= -1 crit *= -1 else: label = 'warn/crit at' warn_hr = get_bytes_human_readable(warn * mega) crit_hr = get_bytes_human_readable(crit * mega) levels["levels_text"] = "(%s %s/%s)" % (label, warn_hr, crit_hr) inodes_levels = params.get("inodes_levels") if inodes_levels: if isinstance(levels["inodes_levels"], tuple): warn, crit = levels["inodes_levels"] else: # A list of inode levels. Choose the correct one depending on the # size of the current filesystem. We do not make the first # rule match, but that with the largest size_gb. That way # the order of the entries is not important. found = False found_size = 0 for to_size, this_levels in levels["inodes_levels"]: if size_gb * giga > to_size and to_size >= found_size: warn, crit = this_levels found_size = to_size found = True if not found: warn, crit = 100.0, 100.0 # entry not found in list levels["inodes_levels"] = warn, crit else: levels["inodes_levels"] = (None, None) return levels
def check_filer_disks(disks, params): state = {} state['prefailed'] = [] state['failed'] = [] state['offline'] = [] state['spare'] = [] total_capacity = 0 for disk in disks: total_capacity += disk.get("capacity", 0) for what in state: if disk['state'] == what: state[what].append(disk) yield 0, "Total raw capacity: %s" % get_bytes_human_readable(total_capacity), [ ("total_disk_capacity", total_capacity) ] # TODO: Is a prefailed disk unavailable? unavail_disks = len(state['prefailed']) + len(state['failed']) + len(state['offline']) yield 0, "Total disks: %d" % (len(disks) - unavail_disks), [("total_disks", len(disks))] spare_disks = len(state['spare']) spare_state, spare_infotext = 0, "Spare disks: %d" % spare_disks spare_disk_levels = params.get('number_of_spare_disks') if spare_disk_levels: warn, crit = spare_disk_levels if spare_disks < crit: spare_state = 2 elif spare_disks < warn: spare_state = 1 if spare_state: spare_infotext += " (warn/crit below %s/%s)" % (warn, crit) yield spare_state, spare_infotext, [("spare_disks", spare_disks)] parity_disks = [disk for disk in disks if disk['type'] == 'parity'] prefailed_parity = [disk for disk in parity_disks if disk['state'] == 'prefailed'] if len(parity_disks) > 0: yield 0, "Parity disks: %d (%d prefailed)" % (len(parity_disks), len(prefailed_parity)) yield 0, "Failed disks: %d" % unavail_disks, [("failed_disks", unavail_disks)] for name, disk_type in [("Data", "data"), ("Parity", "parity")]: total_disks = [disk for disk in disks if disk['type'] == disk_type] prefailed_disks = [disk for disk in total_disks if disk['state'] == 'prefailed'] if len(total_disks) > 0: info_text = "%s disks" % len(total_disks) if len(prefailed_disks) > 0: info_text += " (%d prefailed)" % (prefailed_disks) yield 0, info_text info_texts = [] for disk in prefailed_disks: info_texts.append(disk['identifier']) if len(info_texts) > 0: yield 0, "%s Disk Details: %s" % (name, " / ".join(info_texts)) for disk_state in ["failed", "offline"]: info_texts = [] for disk in state[disk_state]: info_texts.append(disk['identifier']) if len(info_texts) > 0: yield 0, "%s Disk Details: %s" % (disk_state, " / ".join(info_texts)) warn, crit = params["%s_spare_ratio" % disk_state] ratio = float(len( state[disk_state])) / (len(state[disk_state]) + len(state['spare'])) * 100 return_state = False if ratio >= crit: return_state = 2 elif ratio >= warn: return_state = 1 if return_state: yield return_state, "Too many %s disks (warn/crit at %.1f%%/%.1f%%)" % (disk_state, warn, crit)
def render(self): return get_bytes_human_readable(self.bytes, base=1024)