Exemple #1
0
    def _skip_perfometer_by_trivial_metrics(self, required_metric_names: RequiredMetricNames,
                                            translated_metrics: TranslatedMetrics) -> bool:
        """Whether or not a perfometer can be skipped by simple metric name matching instead of expression evaluation

        Performance optimization: Try to reduce the amount of perfometers to evaluate by
        comparing the strings in the "required" metrics with the translated metrics.
        We only look at the simple "requried expressions" that don't make use of formulas.
        In case there is a formula, we can not skip the perfometer and have to evaluate
        it.
        """
        if required_metric_names is None:
            return False

        available_metric_names = set(translated_metrics.keys())
        return not required_metric_names.issubset(available_metric_names)
Exemple #2
0
def metric_unit_color(
    metric_expression: str,
    translated_metrics: TranslatedMetrics,
    optional_metrics=None,
) -> Dict[str, str]:
    try:
        _value, unit, color = evaluate(metric_expression, translated_metrics)
    except KeyError as err:  # because metric_name is not in translated_metrics
        metric_name = err.args[0]
        if optional_metrics and metric_name in optional_metrics:
            return {}
        raise MKGeneralException(
            _("Graph recipe '%s' uses undefined metric '%s', available are: %s"
              ) % (metric_expression, metric_name,
                   ", ".join(sorted(translated_metrics.keys())) or "None"))
    return {"unit": unit["id"], "color": color}