コード例 #1
0
def check_memory_simple(used, total, params):
    # Convert old-style tuple params to dict
    if params:
        if isinstance(params, tuple):
            params = {"levels": ("perc_used", params)}
    else:
        params = {"levels": ("ignore")}

    perc_used = (float(used) / total) * 100
    infotext = "Usage: %s (Used: %s, Total: %s)" % (
        get_percent_human_readable(perc_used),
        get_bytes_human_readable(used),
        get_bytes_human_readable(total),
    )

    status = 0
    if params["levels"][0] == "perc_used":
        warn_perc, crit_perc = params["levels"][1]
        warn_abs = (warn_perc / 100.0) * total
        crit_abs = (crit_perc / 100.0) * total
        levelstext = " (warn/crit at %s/%s used)" % (
            get_percent_human_readable(warn_perc),
            get_percent_human_readable(crit_perc),
        )

    elif params["levels"][0] == "abs_free":
        warn_abs_free, crit_abs_free = params["levels"][1]
        warn_abs = total - warn_abs_free
        crit_abs = total - crit_abs_free
        levelstext = " (warn/crit below %s/%s free)" % (
            get_bytes_human_readable(warn_abs_free),
            get_bytes_human_readable(crit_abs_free),
        )

    else:
        # No levels imposed, ie. params = {'levels': 'ignore'}
        crit_abs = None
        warn_abs = None
        levelstext = ""

    if crit_abs is not None and used >= crit_abs:
        status = 2
    elif warn_abs is not None and used >= warn_abs:
        status = 1
    if status:
        infotext += levelstext

    perfdata = [("memory_used", used, warn_abs, crit_abs, 0, total)]
    return status, infotext, perfdata
コード例 #2
0
def _check_inodes(levels, inodes_total, inodes_avail):
    if not inodes_total:
        return

    inodes_warn_variant, inodes_crit_variant = levels["inodes_levels"]
    inodes_warn_abs, inodes_crit_abs, human_readable_func = (
        # Levels in absolute numbers
        (
            inodes_total - inodes_warn_variant,
            inodes_total - inodes_crit_variant,
            get_number_with_precision,
        ) if isinstance(inodes_warn_variant, int) else
        # Levels in percent
        (
            (100 - inodes_warn_variant) / 100.0 * inodes_total,
            (100 - inodes_crit_variant) / 100.0 * inodes_total,
            lambda x: get_percent_human_readable(100.0 * x / inodes_total),
        ) if isinstance(inodes_warn_variant, float) else  #
        (None, None, get_number_with_precision))

    inode_status, inode_text, inode_perf = check_levels(
        inodes_total - inodes_avail,
        'inodes_used',
        (inodes_warn_abs, inodes_crit_abs),
        boundaries=(0, inodes_total),
        human_readable_func=human_readable_func,
        infoname="Inodes Used",
    )

    # Only show inodes if they are at less then 50%
    show_inodes = levels["show_inodes"]
    inodes_avail_perc = 100.0 * inodes_avail / inodes_total
    infotext = (
        "%s, inodes available: %s/%s" % (
            inode_text,
            get_number_with_precision(inodes_avail),
            get_percent_human_readable(inodes_avail_perc),
        )  #
        if any((
            show_inodes == "always",
            show_inodes == "onlow" and
            (inode_status or inodes_avail_perc < 50),
            show_inodes == "onproblem" and inode_status,
        )) else "")

    yield inode_status, infotext, inode_perf
コード例 #3
0
ファイル: mem.py プロジェクト: tribe29/checkmk
def check_memory_element(
    label,
    used,
    total,
    levels,
    label_total="",
    show_free=False,
    metric_name=None,
    create_percent_metric=False,
):
    """Return a check result for one memory element"""
    if show_free:
        show_value = total - used
        show_text = " free"
    else:
        show_value = used
        show_text = ""

    infotext = "%s: %s%s - %s of %s%s" % (
        label,
        get_percent_human_readable(100.0 * show_value / total),
        show_text,
        get_bytes_human_readable(show_value, base=1024),
        get_bytes_human_readable(total, base=1024),
        (" %s" % label_total).rstrip(),
    )

    try:
        mode, (warn, crit) = levels
    except (ValueError, TypeError):  # handle None, "ignore"
        mode, (warn, crit) = "ignore", (None, None)

    warn, crit, levels_text = normalize_mem_levels(mode, warn, crit, total)
    state = _compute_state(used, warn, crit)
    if state and levels_text:
        infotext = "%s (%s)" % (infotext, levels_text)

    perf = []
    if metric_name:
        perf.append((metric_name, used, warn, crit, 0, total))
    if create_percent_metric:
        scale_to_perc = 100.0 / total
        perf.append((
            "mem_used_percent",
            used * scale_to_perc,
            warn * scale_to_perc if warn is not None else None,
            crit * scale_to_perc if crit is not None else None,
            0,
            None,  # some times over 100%!
        ))

    return state, infotext, perf
コード例 #4
0
def check_memory_multiitem(params, data, base=1024):
    if "mem_total" not in data:
        return 3, "Invalid data: missing mem_total"
    mem_total = data["mem_total"]

    if "mem_used" in data:
        mem_used = data["mem_used"]
        mem_avail = mem_total - mem_used
    elif "mem_avail" in data:
        mem_avail = data["mem_avail"]
        mem_used = mem_total - mem_avail
    else:
        return 3, "Invalid data: missing mem_used or mem_avail sizes"

    infotext = "%s used (%s of %s)" % (
        get_percent_human_readable(float(mem_used) / float(mem_total) * 100),
        get_bytes_human_readable(mem_used, base=base),
        get_bytes_human_readable(mem_total, base=base),
    )

    state = 0
    if "levels" in params:
        warn, crit = params["levels"]
        if isinstance(warn, int):
            warn_absolute = warn
        else:
            warn_absolute = int(mem_total * warn / 100)

        if isinstance(crit, int):
            crit_absolute = crit
        else:
            crit_absolute = int(mem_total * crit / 100)

        if mem_used > crit_absolute:
            state = 2
        elif mem_used > warn_absolute:
            state = 1
        if state:
            infotext += " (warn/crit at %s/%s)" % (
                get_bytes_human_readable(warn_absolute),
                get_bytes_human_readable(crit_absolute),
            )
    else:
        warn_absolute = None
        crit_absolute = None

    return state, infotext, [("memused", mem_used, warn_absolute, crit_absolute, 0, mem_total)]
コード例 #5
0
def size_trend(
    check,
    item,
    resource,
    levels,
    used_mb,
    size_mb: float,
    timestamp=None,
):  # pylint: disable=function-redefined
    """Trend computation for size related checks of disks, ram, etc.
    Trends are computed in two steps. In the first step the delta to
    the last check is computed, using a normal check_mk counter.
    In the second step an average over that counter is computed to
    make a long-term prediction.

    Note:
      This function is experimental and may change in future releases.
      Use at your own risk!

    Args:
      check (str): The name of the check, e.g. "df".
      item (str): The name of the item, e.g. the mountpoint "/" for df.
      resource (str): The resource in question, e.g. "disk", "ram", "swap".
      levels (dict): Level parameters for the trend computation. Items:
          "trend_range"          : 24,       # interval for the trend in hours
          "trend_perfdata"       : True      # generate perfomance data for trends
          "trend_bytes"          : (10, 20), # change during trend_range
          "trend_shrinking_bytes": (16, 32), # Bytes of shrinking during trend_range
          "trend_perc"           : (1, 2),   # percent change during trend_range
          "trend_shrinking_perc" : (1, 2),   # percent decreasing change during trend_range
          "trend_timeleft"       : (72, 48)  # time left in hours until full
          "trend_showtimeleft    : True      # display time left in infotext
        The item "trend_range" is required. All other items are optional.
      timestamp (float, optional): Time in secs used to calculate the rate
        and average. Defaults to "None".
      used_mb (float): Used space in MB.
      size_mb (float): Max. available space in MB.

    Returns:
      A tuple of (state, infotext, perfdata) for the trend computation.
      If a MKCounterWrapped occurs (i.e. there is not enough data
      present for the trend computation) the tuple (0, '', []) is
      returned.
    """

    perfdata: List[
        Union[  #
            Tuple[str, float],  #
            Tuple[str, float, Optional[float], Optional[float], Optional[float], Optional[float]],
        ]
    ]
    state, infotext, perfdata, problems = 0, "", [], []

    MB = 1024.0 * 1024.0
    H24 = 60 * 60 * 24

    range_hours = levels["trend_range"]
    range_sec = range_hours * 3600.0
    if not timestamp:
        timestamp = time.time()

    # compute current rate in MB/s by computing delta since last check
    try:
        rate = get_rate(
            "%s.%s.delta" % (check, item), timestamp, used_mb, allow_negative=True, onwrap=RAISE
        )
    except MKCounterWrapped:
        # need more data for computing a trend
        return 0, "", []

    if levels.get("trend_perfdata"):
        perfdata.append(("growth", rate * H24))

    # average trend in MB/s, initialized with zero (by default)
    rate_avg = get_average("%s.%s.trend" % (check, item), timestamp, rate, range_sec / 60.0)

    trend = rate_avg * range_sec
    sign = "+" if trend > 0 else ""
    infotext += ", trend: %s%s / %g hours" % (
        sign,
        get_bytes_human_readable(trend * MB),
        range_hours,
    )

    # levels for performance data
    warn_perf: Optional[float] = None
    crit_perf: Optional[float] = None

    # apply levels for absolute growth / interval
    trend_bytes = levels.get("trend_bytes")
    if trend_bytes:
        wa, cr = trend_bytes
        warn_perf, crit_perf = wa / MB, cr / MB
        if trend * MB >= wa:
            problems.append(
                "growing too fast (warn/crit at %s/%s per %.1f h)(!"
                % (
                    get_bytes_human_readable(wa),
                    get_bytes_human_readable(cr),
                    range_hours,
                )
            )
            state = max(1, state)
            if trend * MB >= cr:
                state = 2
                problems[-1] += "!"
            problems[-1] += ")"

    tmp_state, tmp_problem = _check_shrinking(
        trend * MB,
        levels.get("trend_shrinking_bytes"),
        range_hours,
        get_bytes_human_readable,
    )
    if tmp_state > 0:
        state = max(state, tmp_state)
        problems.append(tmp_problem)

    # apply levels for growth relative to filesystem size
    trend_perc: Optional[Tuple[float, float]] = levels.get("trend_perc")
    if trend_perc:
        wa_perc, cr_perc = trend_perc
        wa = wa_perc / 100.0 * size_mb
        cr = cr_perc / 100.0 * size_mb
        if warn_perf is not None:
            assert crit_perf is not None
            warn_perf = min(warn_perf, wa)
            crit_perf = min(crit_perf, cr)
        else:
            warn_perf, crit_perf = wa, cr
        if trend >= wa:
            problems.append(
                "growing too fast (warn/crit at %s/%s per %.1f h)(!"
                % (
                    get_percent_human_readable(wa_perc),
                    get_percent_human_readable(cr_perc),
                    range_hours,
                )
            )
            state = max(1, state)
            if trend >= cr:
                state = 2
                problems[-1] += "!"
            problems[-1] += ")"

    tmp_state, tmp_problem = _check_shrinking(
        100 * trend / size_mb,
        levels.get("trend_shrinking_perc"),
        range_hours,
        get_percent_human_readable,
    )
    if tmp_state > 0:
        state = max(state, tmp_state)
        problems.append(tmp_problem)

    # compute time until filesystem is full (only for positive trend, of course)

    # The start value of hours_left is negative. The pnp graph and the perfometer
    # will interpret this as inifinite -> not growing
    hours_left = -1
    if trend > 0:

        def format_hours(hours):
            if hours > 365 * 24:
                return "more than a year"
            elif hours > 90 * 24:
                return "%0d months" % (hours / (30 * 24))  # fixed: true-division
            elif hours > 4 * 7 * 24:  # 4 weeks
                return "%0d weeks" % (hours / (7 * 24))  # fixed: true-division
            elif hours > 7 * 24:  # 1 week
                return "%0.1f weeks" % (hours / (7 * 24))  # fixed: true-division
            elif hours > 2 * 24:  # 2 days
                return "%0.1f days" % (hours / 24)  # fixed: true-division
            return "%d hours" % hours

        hours_left = (size_mb - used_mb) / trend * range_hours
        hours_txt = format_hours(hours_left)

        timeleft = levels.get("trend_timeleft")
        if timeleft:
            wa, cr = timeleft
            if hours_left <= cr:
                state = 2
                problems.append("only %s until %s full(!!)" % (hours_txt, resource))
            elif hours_left <= wa:
                state = max(state, 1)
                problems.append("only %s until %s full(!)" % (hours_txt, resource))
            elif hours_left <= wa * 2 or levels.get("trend_showtimeleft"):
                problems.append("time left until %s full: %s" % (resource, hours_txt))
        elif levels.get("trend_showtimeleft"):
            problems.append("time left until %s full: %s" % (resource, hours_txt))

    if levels.get("trend_perfdata"):
        perfdata.append(
            (
                "trend",
                rate_avg * H24,
                (warn_perf / range_sec * H24) if warn_perf is not None else None,
                (crit_perf / range_sec * H24) if crit_perf is not None else None,
                0,
                1.0 * size_mb / range_hours,
            )
        )

    if levels.get("trend_showtimeleft"):
        perfdata.append(("trend_hoursleft", hours_left))

    if problems:
        infotext += " - %s" % ", ".join(problems)

    return state, infotext, perfdata
コード例 #6
0
def df_check_filesystem_single_coroutine(
    mountpoint,
    size_mb,
    avail_mb,
    reserved_mb,
    inodes_total,
    inodes_avail,
    params,
    this_time=None,
):
    if size_mb == 0:
        yield 1, "Size of filesystem is 0 MB", []
        return

    # params might still be a tuple
    show_levels, subtract_reserved, show_reserved = (
        (params.get("show_levels", False),
         params.get("subtract_reserved", False) and reserved_mb > 0,
         params.get("show_reserved") and reserved_mb > 0)
        # params might still be a tuple
        if isinstance(params, dict) else (False, False, False))

    used_mb = size_mb - avail_mb
    used_max = size_mb
    if subtract_reserved:
        used_mb -= reserved_mb
        used_max -= reserved_mb

    # Get warning and critical levels already with 'magic factor' applied
    levels = get_filesystem_levels(mountpoint, size_mb / 1024., params)
    warn_mb, crit_mb = levels["levels_mb"]

    used_hr = get_bytes_human_readable(used_mb * 1024**2)
    used_max_hr = get_bytes_human_readable(used_max * 1024**2)
    used_perc_hr = get_percent_human_readable(100.0 * used_mb / used_max)

    # If both numbers end with the same unit, then drop the first one
    if used_hr[-2:] == used_max_hr[-2:]:
        used_hr = used_hr[:-3]

    infotext = ["%s used (%s of %s)" % (used_perc_hr, used_hr, used_max_hr)]

    if warn_mb < 0.0:
        # Negative levels, so user configured thresholds based on space left. Calculate the
        # upper thresholds based on the size of the filesystem
        crit_mb = used_max + crit_mb
        warn_mb = used_max + warn_mb

    status = 2 if used_mb >= crit_mb else 1 if used_mb >= warn_mb else 0

    perfdata = [("fs_used", used_mb, warn_mb, crit_mb, 0, size_mb),
                ('fs_size', size_mb),
                ("fs_used_percent", 100.0 * used_mb / size_mb)]

    if (show_levels == "always" or  #
        (show_levels == "onproblem" and status > 0) or  #
        (show_levels == "onmagic" and
         (status > 0 or levels.get("magic", 1.0) != 1.0))):
        infotext.append(levels["levels_text"])

    if show_reserved:
        reserved_perc_hr = get_percent_human_readable(100.0 * reserved_mb /
                                                      size_mb)
        reserved_hr = get_bytes_human_readable(reserved_mb * 1024**2)
        infotext.append("additionally reserved for root: %s" % reserved_hr  #
                        if subtract_reserved else  #
                        "therein reserved for root: %s (%s)" %
                        (reserved_perc_hr, reserved_hr))

    if subtract_reserved:
        perfdata.append(("fs_free", avail_mb, None, None, 0, size_mb))

    if subtract_reserved or show_reserved:
        perfdata.append(("reserved", reserved_mb))

    yield status, ", ".join(infotext), perfdata

    if levels.get("trend_range"):
        trend_state, trend_text, trend_perf = size_trend(
            'df',
            mountpoint,
            "disk",
            levels,
            used_mb,
            size_mb,
            this_time,
        )
        # Todo(frans): Return value from size_trend() can be empty but we must yield a valid result
        # - as soon as we can 'yield from' size_trend we do not have to check any more
        if trend_state or trend_text or trend_perf:
            yield trend_state, trend_text.strip(" ,"), trend_perf or []

    yield from _check_inodes(levels, inodes_total, inodes_avail)
コード例 #7
0
def get_filesystem_levels(mountpoint, size_gb, params):
    mega = 1024 * 1024
    giga = mega * 1024
    # Start with factory settings
    levels = _FILESYSTEM_DEFAULT_LEVELS.copy()

    def convert_legacy_levels(value):
        if isinstance(params, tuple) or not params.get("flex_levels"):
            return tuple(map(float, value))
        return value

    # convert default levels to dictionary. This is in order support
    # old style levels like (80, 90)
    if isinstance(filesystem_default_levels, dict):
        fs_default_levels = filesystem_default_levels.copy()
        fs_levels = fs_default_levels.get("levels")
        if fs_levels:
            fs_default_levels["levels"] = convert_legacy_levels(fs_levels)
        levels.update(filesystem_default_levels)
    else:
        levels = _FILESYSTEM_DEFAULT_LEVELS.copy()
        levels["levels"] = convert_legacy_levels(filesystem_default_levels[:2])
        if len(filesystem_default_levels) == 2:
            levels["magic"] = None
        else:
            levels["magic"] = filesystem_default_levels[2]

    # If params is a dictionary, make that override the default values
    if isinstance(params, dict):
        levels.update(params)

    else:  # simple format - explicitely override levels and magic
        levels["levels"] = convert_legacy_levels(params[:2])
        if len(params) >= 3:
            levels["magic"] = params[2]

    # Determine real warn, crit levels
    if isinstance(levels["levels"], tuple):
        warn, crit = levels["levels"]
    else:
        # A list of levels. Choose the correct one depending on the
        # size of the current filesystem. We do not make the first
        # rule match, but that with the largest size_gb. That way
        # the order of the entries is not important.
        found = False
        found_size = 0
        for to_size, this_levels in levels["levels"]:
            if size_gb * giga > to_size and to_size >= found_size:
                warn, crit = this_levels
                found_size = to_size
                found = True
        if not found:
            warn, crit = 100.0, 100.0  # entry not found in list

    # Take into account magic scaling factor (third optional argument
    # in check params). A factor of 1.0 changes nothing. Factor should
    # be > 0 and <= 1. A smaller factor raises levels for big file systems
    # bigger than 100 GB and lowers it for file systems smaller than 100 GB.
    # Please run df_magic_factor.py to understand how it works.

    magic = levels.get("magic")
    # We need a way to disable the magic factor so check
    # if magic not 1.0
    if magic and magic != 1.0:
        # convert warn/crit to percentage
        if not isinstance(warn, float):
            warn = savefloat(warn * mega / float(size_gb * giga)) * 100
        if not isinstance(crit, float):
            crit = savefloat(crit * mega / float(size_gb * giga)) * 100

        normsize = levels["magic_normsize"]
        hgb_size = size_gb / float(normsize)
        felt_size = hgb_size**magic
        scale = felt_size / hgb_size
        warn_scaled = 100 - ((100 - warn) * scale)
        crit_scaled = 100 - ((100 - crit) * scale)

        # Make sure, levels do never get too low due to magic factor
        lowest_warning_level, lowest_critical_level = levels["levels_low"]
        if warn_scaled < lowest_warning_level:
            warn_scaled = lowest_warning_level
        if crit_scaled < lowest_critical_level:
            crit_scaled = lowest_critical_level
    else:
        if not isinstance(warn, float):
            warn_scaled = savefloat(warn * mega / float(size_gb * giga)) * 100
        else:
            warn_scaled = warn

        if not isinstance(crit, float):
            crit_scaled = savefloat(crit * mega / float(size_gb * giga)) * 100
        else:
            crit_scaled = crit

    size_mb = size_gb * 1024
    warn_mb = savefloat(size_mb * warn_scaled / 100)
    crit_mb = savefloat(size_mb * crit_scaled / 100)
    levels["levels_mb"] = (warn_mb, crit_mb)
    if isinstance(warn, float):
        if warn_scaled < 0 and crit_scaled < 0:
            label = 'warn/crit at free space below'
            warn_scaled *= -1
            crit_scaled *= -1
        else:
            label = 'warn/crit at'
        levels["levels_text"] = "(%s %s/%s)" % (
            label, get_percent_human_readable(warn_scaled),
            get_percent_human_readable(crit_scaled))
    else:
        if warn * mega < 0 and crit * mega < 0:
            label = 'warn/crit at free space below'
            warn *= -1
            crit *= -1
        else:
            label = 'warn/crit at'
        warn_hr = get_bytes_human_readable(warn * mega)
        crit_hr = get_bytes_human_readable(crit * mega)
        levels["levels_text"] = "(%s %s/%s)" % (label, warn_hr, crit_hr)

    inodes_levels = params.get("inodes_levels")
    if inodes_levels:
        if isinstance(levels["inodes_levels"], tuple):
            warn, crit = levels["inodes_levels"]
        else:
            # A list of inode levels. Choose the correct one depending on the
            # size of the current filesystem. We do not make the first
            # rule match, but that with the largest size_gb. That way
            # the order of the entries is not important.
            found = False
            found_size = 0
            for to_size, this_levels in levels["inodes_levels"]:
                if size_gb * giga > to_size and to_size >= found_size:
                    warn, crit = this_levels
                    found_size = to_size
                    found = True
            if not found:
                warn, crit = 100.0, 100.0  # entry not found in list
        levels["inodes_levels"] = warn, crit
    else:
        levels["inodes_levels"] = (None, None)

    return levels
コード例 #8
0
def check_diskstat_dict(item, params, disks):
    # Take care of previously discovered services
    if item in ("read", "write"):
        yield 3, "Sorry, the new version of this check does not " \
                  "support one service for read and one for write anymore."
        return

    this_time = time.time()
    disk = diskstat_select_disk(disks, item)
    if not disk:
        return

    # Averaging
    # Note: this check uses a simple method of averaging: As soon as averaging
    # is turned on the actual metrics are *replaced* by the averaged ones. No
    # duplication of performance data or check output here. This is because we
    # have so many metrics...
    prefix = ""
    averaging = params.get("average")  # in seconds here!
    if averaging:
        avg_disk = {}  # Do not modify our arguments!!
        for key, value in disk.items():
            if isinstance(value, (int, float)):
                avg_disk[key] = get_average("diskstat.%s.%s.avg" % (item, key), this_time, value,
                                            averaging / 60.0)
            else:
                avg_disk[key] = value
        disk = avg_disk
        prefix = "%s average: " % get_age_human_readable(averaging)

    # Utilization
    if "utilization" in disk:
        util = disk.pop("utilization")
        yield check_levels(util,
                           "disk_utilization",
                           params.get("utilization"),
                           human_readable_func=lambda x: get_percent_human_readable(x * 100.0),
                           scale=0.01,
                           statemarkers=False,
                           infoname=prefix + "Utilization")

    # Throughput
    for what in "read", "write":
        if what + "_throughput" in disk:
            throughput = disk.pop(what + "_throughput")
            yield check_levels(throughput,
                               "disk_" + what + "_throughput",
                               params.get(what),
                               unit="/s",
                               scale=1048576,
                               statemarkers=False,
                               human_readable_func=get_bytes_human_readable,
                               infoname=what.title())

    # Average wait from end to end
    for what in ["wait", "read_wait", "write_wait"]:
        if "average_" + what in disk:
            wait = disk.pop("average_" + what)
            yield check_levels(wait,
                               "disk_average_" + what,
                               params.get(what),
                               unit="ms",
                               scale=0.001,
                               statemarkers=False,
                               infoname="Average %s" % what.title().replace("_", " "))

    # Average disk latency
    if "latency" in disk:
        latency = disk.pop("latency")
        yield check_levels(latency,
                           "disk_latency",
                           params.get("latency"),
                           unit="ms",
                           scale=0.001,
                           statemarkers=False,
                           infoname='Latency')

    # Read/write disk latency
    for what in ["read", "write"]:
        latency_key = "%s_latency" % what
        if latency_key not in disk:
            continue
        latency = disk.pop(latency_key)
        if latency is not None:
            yield check_levels(latency,
                               "disk_%s" % latency_key,
                               params.get(latency_key),
                               unit="ms",
                               scale=0.001,
                               statemarkers=False,
                               infoname='%s latency' % what.title())

    # Queue lengths
    for what, plugin_text in [
        ("queue_length", "Queue Length"),
        ("read_ql", "Read Queue Length"),
        ("write_ql", "Write Queue Length"),
    ]:
        if what in disk:
            ql = disk.pop(what)
            yield check_levels(ql,
                               "disk_" + what,
                               params.get(what),
                               statemarkers=False,
                               infoname="Average %s" % plugin_text)

    # I/O operations
    for what in "read", "write":
        if what + "_ios" in disk:
            ios = disk.pop(what + "_ios")
            yield check_levels(
                ios,
                "disk_" + what + "_ios",
                params.get(what + "_ios"),
                unit="1/s",
                statemarkers=False,
                infoname="%s operations" % what.title(),
            )

    # All the other metrics are currently not output in the plugin output - simply because
    # of their amount. They are present as performance data and will shown in graphs.

    # Send everything as performance data now. Sort keys alphabetically
    perfdata = []
    for key in sorted(disk):
        value = disk[key]
        if isinstance(value, (int, float)):
            # Currently the levels are not shown in the perfdata
            perfdata.append(("disk_" + key, value))

    if perfdata:
        yield 0, '', perfdata