Example #1
0
def graph_templates_autocompleter(value: str, params: Dict) -> Choices:
    """Return the matching list of dropdown choices
    Called by the webservice with the current input field value and the
    completions_params to get the list of choices"""
    if not params.get("context") and params.get("strict", "False") == "False":
        choices: Iterable[Tuple[str, str]] = ((
            graph_id,
            str(graph_details.get(
                "title",
                graph_id,
            )),
        ) for graph_id, graph_details in graph_info.items())

    else:
        columns = [
            "service_check_command",
            "service_perf_data",
            "service_metrics",
        ]

        choices = set(
            chain.from_iterable(
                _graph_choices_from_livestatus_row(row) for row in
                livestatus_query_bare("service", params["context"], columns)))

    return sorted((v for v in choices if value.lower() in v[1].lower()),
                  key=lambda a: a[1].lower())
Example #2
0
def metrics_of_query(
    context: VisualContext,
) -> Iterator[Choice]:
    # Fetch host data with the *same* query. This saves one round trip. And head
    # host has at least one service
    columns = [
        "service_description",
        "service_check_command",
        "service_perf_data",
        "service_metrics",
        "host_check_command",
        "host_metrics",
    ]

    row = {}
    for row in livestatus_query_bare("service", context, columns):
        parsed_perf_data, check_command = parse_perf_data(
            row["service_perf_data"], row["service_check_command"]
        )
        known_metrics = set([perf[0] for perf in parsed_perf_data] + row["service_metrics"])
        yield from metric_choices(str(check_command), tuple(map(str, known_metrics)))

    if row.get("host_check_command"):
        yield from metric_choices(
            str(row["host_check_command"]), tuple(map(str, row["host_metrics"]))
        )