Пример #1
0
def render_metrics_table(translated_metrics: TranslatedMetrics, host_name: str,
                         service_description: str) -> str:
    # TODO: Don't paste together strings by hand, use our HTML utilities.
    output = "<table class=metricstable>"
    for metric_name, metric in sorted(translated_metrics.items(), key=lambda x: x[1]["title"]):
        output += "<tr>"
        output += "<td class=color>%s</td>" % render_color_icon(metric["color"])
        output += "<td>%s:</td>" % metric["title"]
        output += "<td class=value>%s</td>" % metric["unit"]["render"](metric["value"])
        if not cmk_version.is_raw_edition():
            output += "<td>"
            output += str(
                html.render_popup_trigger(
                    html.render_icon("menu",
                                     title=_("Add this metric to dedicated graph"),
                                     cssclass="iconbutton"),
                    ident="add_metric_to_graph_" + host_name + ";" + str(service_description),
                    method=MethodAjax(endpoint="add_metric_to_graph",
                                      url_vars=[
                                          ("host", host_name),
                                          ("service", service_description),
                                          ("metric", metric_name),
                                      ])))
            output += "</td>"
        output += "</tr>"
    output += "</table>"
    return output
Пример #2
0
    def _skip_perfometer_by_trivial_metrics(self, required_metric_names: RequiredMetricNames,
                                            translated_metrics: TranslatedMetrics) -> bool:
        """Whether or not a perfometer can be skipped by simple metric name matching instead of expression evaluation

        Performance optimization: Try to reduce the amount of perfometers to evaluate by
        comparing the strings in the "required" metrics with the translated metrics.
        We only look at the simple "requried expressions" that don't make use of formulas.
        In case there is a formula, we can not skip the perfometer and have to evaluate
        it.
        """
        if required_metric_names is None:
            return False

        available_metric_names = set(translated_metrics.keys())
        return not required_metric_names.issubset(available_metric_names)
Пример #3
0
def metric_unit_color(
    metric_expression: str,
    translated_metrics: TranslatedMetrics,
    optional_metrics=None,
) -> Dict[str, str]:
    try:
        _value, unit, color = evaluate(metric_expression, translated_metrics)
    except KeyError as err:  # because metric_name is not in translated_metrics
        metric_name = err.args[0]
        if optional_metrics and metric_name in optional_metrics:
            return {}
        raise MKGeneralException(
            _("Graph recipe '%s' uses undefined metric '%s', available are: %s"
              ) % (metric_expression, metric_name,
                   ", ".join(sorted(translated_metrics.keys())) or "None"))
    return {"unit": unit["id"], "color": color}