示例#1
0
    def _perfometer_possible(self, perfometer, translated_metrics):
        if not translated_metrics:
            return False

        if self._skip_perfometer_by_trivial_metrics(
                perfometer["_required_names"], translated_metrics):
            return False

        for req in perfometer["_required"]:
            try:
                evaluate(req, translated_metrics)
            except Exception:
                return False

        if "condition" in perfometer:
            try:
                value, _color, _unit = evaluate(perfometer["condition"],
                                                translated_metrics)
                if value == 0.0:
                    return False
            except Exception:
                return False

        if "total" in perfometer:
            return self._total_values_exists(perfometer["total"],
                                             translated_metrics)

        return True
示例#2
0
def test_evaluate():
    perfdata: Perfdata = [(n, len(n), u'', 120, 240, 0, 24)
                          for n in ['in', 'out']]
    translated_metrics = utils.translate_metrics(perfdata,
                                                 'check_mk-openvpn_clients')
    assert utils.evaluate("if_in_octets,8,*@bits/s",
                          translated_metrics) == (16.0,
                                                  utils.unit_info['bits/s'],
                                                  '#00e060')
    perfdata = [(n, len(n), u'', None, None, None, None)
                for n in ['/', 'fs_size']]
    translated_metrics = utils.translate_metrics(perfdata, 'check_mk-df')
    assert utils.evaluate("fs_size,fs_used,-#e3fff9",
                          translated_metrics) == (6291456,
                                                  utils.unit_info['bytes'],
                                                  '#e3fff9')

    # This is a terrible metric from Nagios plugins. Test is for survival instead of correctness
    # The unit "percent" is lost on the way. Fixing this would imply also figuring out how to represent
    # graphs for active-icmp check when host has multiple addresses.
    assert utils.evaluate(
        '127.0.0.1pl',
        utils.translate_metrics(
            utils.parse_perf_data('127.0.0.1pl=5%;80;100;;')[0],
            "check_mk_active-icmp")) == (5, utils.unit_info[""], '#cc00ff')
示例#3
0
    def _perfometer_possible(self, perfometer: Perfometer,
                             translated_metrics: TranslatedMetrics) -> bool:
        if not translated_metrics:
            return False

        required_names = _lookup_required_names(perfometer)
        if self._skip_perfometer_by_trivial_metrics(required_names,
                                                    translated_metrics):
            return False

        for req in _lookup_required_expressions(perfometer):
            try:
                evaluate(req, translated_metrics)
            except Exception:
                return False

        if "condition" in perfometer:
            try:
                value, _color, _unit = evaluate(perfometer["condition"],
                                                translated_metrics)
                if value == 0.0:
                    return False
            except Exception:
                return False

        if "total" in perfometer:
            return self._total_values_exists(perfometer["total"],
                                             translated_metrics)

        return True
示例#4
0
    def get_stack(self):
        entry = []

        summed = self._get_summed_values()

        if "total" in self._perfometer:
            total, _unit, _color = evaluate(self._perfometer["total"],
                                            self._translated_metrics)
        else:
            total = summed

        if total == 0:
            entry.append((100.0, get_themed_perfometer_bg_color()))

        else:
            for ex in self._perfometer["segments"]:
                value, _unit, color = evaluate(ex, self._translated_metrics)
                entry.append((100.0 * value / total, color))

            # Paint rest only, if it is positive and larger than one promille
            if total - summed > 0.001:
                entry.append((100.0 * (total - summed) / total,
                              get_themed_perfometer_bg_color()))

        return [entry]
示例#5
0
def test_evaluate():
    perfdata = [(n, len(n), u'', 120, 240, 0, 24) for n in ['in', 'out']]  # type: List[Tuple]
    translated_metrics = utils.translate_metrics(perfdata, 'check_mk-openvpn_clients')
    assert utils.evaluate("if_in_octets,8,*@bits/s",
                          translated_metrics) == (16.0, utils.unit_info['bits/s'], '#00e060')
    perfdata = [(n, len(n), u'', None, None, None, None) for n in ['/', 'fs_size']]
    translated_metrics = utils.translate_metrics(perfdata, 'check_mk-df')
    assert utils.evaluate("fs_size,fs_used,-#e3fff9",
                          translated_metrics) == (6291456, utils.unit_info['bytes'], '#e3fff9')
示例#6
0
 def get_stack(self):
     value, _unit, color = evaluate(self._perfometer["metric"],
                                    self._translated_metrics)
     return [
         self.get_stack_from_values(value, self._perfometer["half_value"],
                                    self._perfometer["exponent"], color)
     ]
示例#7
0
 def _get_type_label(self):
     # type: () -> Text
     # Use unit of first metrics for output of sum. We assume that all
     # stackes metrics have the same unit anyway
     _value, unit, _color = evaluate(self._perfometer["segments"][0],
                                     self._translated_metrics)
     return unit["render"](self._get_summed_values())
示例#8
0
 def get_sort_number(self):
     # type: () -> int
     """Returns the number to sort this perfometer with compared to the other
     performeters in the current performeter sort group"""
     value, _unit, _color = evaluate(self._perfometer["metric"],
                                     self._translated_metrics)
     return value
示例#9
0
def _horizontal_rules_from_thresholds(thresholds, translated_metrics):
    horizontal_rules = []
    for entry in thresholds:
        if len(entry) == 2:
            expression, title = entry
        else:
            expression = entry
            if expression.endswith(":warn"):
                title = _("Warning")
            elif expression.endswith(":crit"):
                title = _("Critical")
            else:
                title = expression

        try:
            value, unit, color = evaluate(expression, translated_metrics)
            if value:
                horizontal_rules.append((
                    value,
                    unit["render"](value),
                    color,
                    title,
                ))
        # Scalar value like min and max are always optional. This makes configuration
        # of graphs easier.
        except Exception:
            pass

    return horizontal_rules
示例#10
0
def metric_unit_color(metric_expression,
                      translated_metrics,
                      optional_metrics=None):
    try:
        _value, unit, color = evaluate(metric_expression, translated_metrics)
    except KeyError as err:  # because metric_name is not in translated_metrics
        metric_name = err.args[0]
        if optional_metrics and metric_name in optional_metrics:
            return
        raise MKGeneralException(
            _("Graph recipe '%s' uses undefined metric '%s', available are: %s"
              ) % (metric_expression, metric_name,
                   ", ".join(sorted(translated_metrics.keys())) or "None"))
    return {"unit": unit["id"], "color": color}
示例#11
0
def test_evaluate():
    perfdata: Perfdata = [(n, len(n), "", 120, 240, 0, 24)
                          for n in ["in", "out"]]
    translated_metrics = utils.translate_metrics(perfdata,
                                                 "check_mk-openvpn_clients")
    assert utils.evaluate("if_in_octets,8,*@bits/s", translated_metrics) == (
        16.0,
        utils.unit_info["bits/s"],
        "#00e060",
    )
    perfdata = [(n, len(n), "", None, None, None, None)
                for n in ["/", "fs_size"]]
    translated_metrics = utils.translate_metrics(perfdata, "check_mk-df")
    assert utils.evaluate("fs_size,fs_used,-#e3fff9", translated_metrics) == (
        6291456,
        utils.unit_info["bytes"],
        "#e3fff9",
    )

    # This is a terrible metric from Nagios plugins. Test is for survival instead of correctness
    # The unit "percent" is lost on the way. Fixing this would imply also figuring out how to represent
    # graphs for active-icmp check when host has multiple addresses.
    assert (utils.evaluate(
        "127.0.0.1pl",
        utils.translate_metrics(
            utils.parse_perf_data("127.0.0.1pl=5%;80;100;;")[0],
            "check_mk_active-icmp"),
    ) == (5, utils.unit_info[""], "#cc00ff"))

    # Here the user has a metrics that represent subnets, but the values look like floats
    # Test that evaluation recognizes the metric from the perf data
    assert (utils.evaluate(
        "10.172",
        utils.translate_metrics(
            utils.parse_perf_data("10.172=6")[0], "check_mk-local"),
    ) == (6, utils.unit_info[""], "#cc00ff"))
示例#12
0
    def get_label(self):
        """Returns the label to be shown on top of the rendered stack

        When the perfometer type definition has a "label" element, this will be used.
        Otherwise the perfometer type specific label of _get_type_label() will be used.
        """

        # "label" option in all Perf-O-Meters overrides automatic label
        if "label" in self._perfometer:
            if self._perfometer["label"] is None:
                return ""

            expr, unit_name = self._perfometer["label"]
            value, unit, _color = evaluate(expr, self._translated_metrics)
            if unit_name:
                unit = unit_info[unit_name]
            return unit["render"](value)

        return self._get_type_label()
示例#13
0
def _scalar_value_command(scalar, translated_metrics):
    if isinstance(scalar, tuple):
        expression, description = scalar
    else:
        expression, description = scalar, None

    try:
        value, _unit, color = evaluate(expression, translated_metrics)
    except Exception:
        return ""

    if not value:
        return ""

    rule_txt = _scalar_description(expression, description, value)
    if not rule_txt:
        return "HRULE:%s%s " % (value, color)

    return "HRULE:%s%s:\"%s\" COMMENT:\"\\n\" " % (
        value,
        color,
        rule_txt,
    )
示例#14
0
def render_graph_pnp(graph_template, translated_metrics):
    graph_title = None
    vertical_label = None

    rrdgraph_commands = ""

    legend_precision = graph_template.get("legend_precision", 2)
    legend_scale = graph_template.get("legend_scale", 1)
    legend_scale_symbol = scale_symbols[legend_scale]

    # Define one RRD variable for each of the available metrics.
    # Note: We need to use the original name, not the translated one.
    for var_name, metrics in translated_metrics.items():
        rrd = "$RRDBASE$_" + metrics["orig_name"] + ".rrd"
        scale = metrics["scale"]
        unit = metrics["unit"]

        if scale != 1.0:
            rrdgraph_commands += "DEF:%s_UNSCALED=%s:1:MAX " % (var_name, rrd)
            rrdgraph_commands += "CDEF:%s=%s_UNSCALED,%f,* " % (
                var_name, var_name, scale)

        else:
            rrdgraph_commands += "DEF:%s=%s:1:MAX " % (var_name, rrd)

        # Scaling for legend
        rrdgraph_commands += "CDEF:%s_LEGSCALED=%s,%f,/ " % (
            var_name, var_name, legend_scale)

        # Prepare negative variants for upside-down graph
        rrdgraph_commands += "CDEF:%s_NEG=%s,-1,* " % (var_name, var_name)
        rrdgraph_commands += "CDEF:%s_LEGSCALED_NEG=%s_LEGSCALED,-1,* " % (
            var_name, var_name)

    # Now add areas and lines to the graph
    graph_metrics = []

    # Graph with upside down metrics? (e.g. for Disk IO)
    have_upside_down = False

    # Compute width of the right column of the legend
    max_title_length = 0
    for nr, metric_definition in enumerate(graph_template["metrics"]):
        if len(metric_definition) >= 3:
            title = metric_definition[2]
        elif not "," in metric_definition:
            metric_name = metric_definition[0].split("#")[0]
            mi = translated_metrics[metric_name]
            title = mi["title"]
        else:
            title = ""
        max_title_length = max(max_title_length, len(title))

    for nr, metric_definition in enumerate(graph_template["metrics"]):
        metric_name = metric_definition[0]
        line_type = metric_definition[1]  # "line", "area", "stack"

        # Optional title, especially for derived values
        if len(metric_definition) >= 3:
            title = metric_definition[2]
        else:
            title = ""

        # Prefixed minus renders the metrics in negative direction
        if line_type[0] == '-':
            have_upside_down = True
            upside_down = True
            upside_down_factor = -1
            line_type = line_type[1:]
            upside_down_suffix = "_NEG"
        else:
            upside_down = False
            upside_down_factor = 1
            upside_down_suffix = ""

        if line_type == "line":
            draw_type = "LINE"
            draw_stack = ""
        elif line_type == "area":
            draw_type = "AREA"
            draw_stack = ""
        elif line_type == "stack":
            draw_type = "AREA"
            draw_stack = ":STACK"

        # User can specify alternative color using a suffixed #aabbcc
        if '#' in metric_name:
            metric_name, custom_color = metric_name.split("#", 1)
        else:
            custom_color = None

        commands = ""
        # Derived value with RBN syntax (evaluated by RRDTool!).
        if "," in metric_name:
            # We evaluate just in order to get color and unit.
            # TODO: beware of division by zero. All metrics are set to 1 here.
            _value, unit, color = evaluate(metric_name, translated_metrics)

            if "@" in metric_name:
                expression, _explicit_unit_name = metric_name.rsplit(
                    "@", 1)  # isolate expression
            else:
                expression = metric_name

            # Choose a unique name for the derived variable and compute it
            commands += "CDEF:DERIVED%d=%s " % (nr, expression)
            if upside_down:
                commands += "CDEF:DERIVED%d_NEG=DERIVED%d,-1,* " % (nr, nr)

            metric_name = "DERIVED%d" % nr
            # Scaling and upsidedown handling for legend
            commands += "CDEF:%s_LEGSCALED=%s,%f,/ " % (
                metric_name, metric_name, legend_scale)
            if upside_down:
                commands += "CDEF:%s_LEGSCALED%s=%s,%f,/ " % (
                    metric_name, upside_down_suffix, metric_name,
                    legend_scale * upside_down_factor)

        else:
            mi = translated_metrics[metric_name]
            if not title:
                title = mi["title"]
            color = parse_color_into_hexrgb(mi["color"])
            unit = mi["unit"]

        if custom_color:
            color = "#" + custom_color

        # Paint the graph itself
        # TODO: Die Breite des Titels intelligent berechnen. Bei legend = "mirrored" muss man die
        # Vefügbare Breite ermitteln und aufteilen auf alle Titel
        right_pad = " " * (max_title_length - len(title))
        commands += "%s:%s%s%s:\"%s%s\"%s " % (
            draw_type, metric_name, upside_down_suffix, color,
            title.replace(":", "\\:"), right_pad, draw_stack)
        if line_type == "area":
            commands += "LINE:%s%s%s " % (
                metric_name, upside_down_suffix,
                render_color(darken_color(parse_color(color), 0.2)))

        unit_symbol = unit["symbol"]
        if unit_symbol == "%":
            unit_symbol = "%%"
        else:
            unit_symbol = " " + unit_symbol

        graph_metrics.append((metric_name, unit_symbol, commands))

        # Use title and label of this metrics as default for the graph
        if title and not graph_title:
            graph_title = title
        if not vertical_label:
            vertical_label = unit["title"]

    # Now create the rrdgraph commands for all metrics - according to the choosen layout
    for metric_name, unit_symbol, commands in graph_metrics:
        rrdgraph_commands += commands

        legend_symbol = unit_symbol
        if unit_symbol and unit_symbol[0] == " ":
            legend_symbol = " %s%s" % (legend_scale_symbol, unit_symbol[1:])
        if legend_symbol == " bits/s":
            # Use a literal '%s' so that GPRINT outputs values with the appropriate
            # SI magnitude (e.g. 123456 -> 123.456 k)
            legend_symbol = " %sbit/s"

        for what, what_title in [("AVERAGE", _("average")), ("MAX", _("max")),
                                 ("LAST", _("last"))]:
            rrdgraph_commands += "GPRINT:%s_LEGSCALED:%s:\"%%8.%dlf%s %s\" " % (
                metric_name,
                what,
                legend_precision,
                legend_symbol,
                what_title,
            )
        rrdgraph_commands += "COMMENT:\"\\n\" "

    # add horizontal rules for warn and crit scalars
    for scalar in graph_template.get("scalars", []):
        rrdgraph_commands += _scalar_value_command(scalar, translated_metrics)

    # For graphs with both up and down, paint a gray rule at 0
    if have_upside_down:
        rrdgraph_commands += "HRULE:0#c0c0c0 "

    # Now compute the arguments for the command line of rrdgraph
    rrdgraph_arguments = ""

    graph_title = graph_template.get("title", graph_title)
    vertical_label = graph_template.get("vertical_label", vertical_label)

    rrdgraph_arguments += " --vertical-label %s --title %s " % (
        cmk.utils.quote_shell_string(
            vertical_label or " "), cmk.utils.quote_shell_string(graph_title))

    min_value, max_value = get_graph_range(graph_template, translated_metrics)
    if min_value is not None and max_value is not None:
        rrdgraph_arguments += " -l %f -u %f" % (min_value, max_value)
    else:
        rrdgraph_arguments += " -l 0"

    return graph_title + "\n" + rrdgraph_arguments + "\n" + rrdgraph_commands + "\n"
示例#15
0
 def _get_summed_values(self):
     summed = 0.0
     for ex in self._perfometer["segments"]:
         value, _unit, _color = evaluate(ex, self._translated_metrics)
         summed += value
     return summed
示例#16
0
 def get_sort_number(self):
     """Use the first segment value for sorting"""
     value, _unit, _color = evaluate(self._perfometer["segments"][0],
                                     self._translated_metrics)
     return value
示例#17
0
 def _get_type_label(self):
     value, unit, _color = evaluate(self._perfometer["metric"],
                                    self._translated_metrics)
     return unit["render"](value)