Beispiel #1
0
def test_ended_preemtively():
    lines = """\
QMNAME(THE.ENDED.ONE)                                     STATUS(ENDED PREEMPTIVELY) DEFAULT(NO) STANDBY(NOT APPLICABLE) INSTNAME(Installation1) INSTPATH(/opt/mqm) INSTVER(7.5.0.2)
"""
    section = parse_info(lines, chr(10))
    check = Check(CHECK_NAME)
    parsed = parse_ibm_mq_managers(section)

    params: Dict[str, Any] = {}
    actual = list(check.run_check("THE.ENDED.ONE", params, parsed))
    expected = [
        (1, "Status: ENDED PREEMPTIVELY"),
        (0, "Version: 7.5.0.2"),
        (0, "Installation: /opt/mqm (Installation1), Default: NO"),
    ]
    assert expected == actual

    lines = """\
QMNAME(THE.ENDED.ONE)                                     STATUS(ENDED PRE-EMPTIVELY) DEFAULT(NO) STANDBY(NOT APPLICABLE) INSTNAME(Installation1) INSTPATH(/opt/mqm) INSTVER(8.0.0.1)
"""
    section = parse_info(lines, chr(10))
    check = Check(CHECK_NAME)
    parsed = parse_ibm_mq_managers(section)

    actual = list(check.run_check("THE.ENDED.ONE", params, parsed))
    expected = [
        (1, "Status: ENDED PRE-EMPTIVELY"),
        (0, "Version: 8.0.0.1"),
        (0, "Installation: /opt/mqm (Installation1), Default: NO"),
    ]
    assert expected == actual
Beispiel #2
0
def test_status_wato_override():
    check = Check(CHECK_NAME)
    parsed = {
        "QM1": {
            "STATUS": "RUNNING"
        },
        "QM1:CHAN1": {
            "CHLTYPE": "SVRCONN",
            "STATUS": "STOPPED"
        },
    }

    # Factory defaults
    params: Dict[str, Any] = {}
    actual = list(check.run_check("QM1:CHAN1", params, parsed))
    expected: List[Tuple[int, str, List[Any]]] = [
        (2, "Status: STOPPED, Type: SVRCONN", [])
    ]
    assert actual == expected

    # Override factory defaults
    params = {"mapped_states": [("stopped", 1)]}
    actual = list(check.run_check("QM1:CHAN1", params, parsed))
    expected = [(1, "Status: STOPPED, Type: SVRCONN", [])]
    assert actual == expected

    # Override-does-not-match configuration
    params = {
        "mapped_states": [("retrying", 1)],
        "mapped_states_default": 3,
    }
    actual = list(check.run_check("QM1:CHAN1", params, parsed))
    expected = [(3, "Status: STOPPED, Type: SVRCONN", [])]
    assert actual == expected
Beispiel #3
0
def test_check():
    check = Check(CHECK_NAME)
    params: Dict[str, Any] = {}
    parsed = {
        "QM1": {
            "STATUS": "RUNNING"
        },
        "QM1:CHAN1": {
            "CHLTYPE": "SDR",
            "STATUS": "RETRYING",
            "XMITQ": "MY.XMIT.Q"
        },
        "QM1:CHAN2": {
            "CHLTYPE": "RCVR",
            "STATUS": "STOPPED"
        },
        "QM1:CHAN3": {
            "CHLTYPE": "SVRCONN"
        },
    }

    actual = list(check.run_check("QM1:CHAN1", params, parsed))
    expected: List[Tuple[int, str, List[Any]]] = [
        (1, "Status: RETRYING, Type: SDR, Xmitq: MY.XMIT.Q", [])
    ]
    assert actual == expected

    actual = list(check.run_check("QM1:CHAN2", params, parsed))
    expected = [(2, "Status: STOPPED, Type: RCVR", [])]
    assert actual == expected

    actual = list(check.run_check("QM1:CHAN3", params, parsed))
    expected = [(0, "Status: INACTIVE, Type: SVRCONN", [])]
    assert actual == expected
Beispiel #4
0
def test_nimble_latency_write_params(params, data, result):
    """Test that latency write levels are applied to write types only."""

    read_check = Check("nimble_latency")
    write_check = Check("nimble_latency.write")
    read_results = list(read_check.run_check("itemxyz", params, data))
    write_results = list(write_check.run_check("itemxyz", params, data))
    assert result == write_results[0]
    assert not read_results
def test_docker_container_diskstat_wrapped():
    check = Check('docker_container_diskstat')
    parsed = check.run_parse(INFO_MISSING_COUNTERS)

    with pytest.raises(MKCounterWrapped):
        check.run_check("SUMMARY", {}, parsed)

    with mock_item_state((0, 0)):
        # raise MKCounterWrapped anyway, because counters are missing in info
        with pytest.raises(MKCounterWrapped):
            check.run_check("SUMMARY", {}, parsed)
Beispiel #6
0
def test_fortigate_sslvpn_old_params():
    check = Check("fortigate_sslvpn")
    parsed = {
        "domain": {
            "state": "1",
            "users": 0,
            "web_sessions": 0,
            "tunnels": 0,
            "tunnels_max": 0,
        },
    }
    check.run_check("no-item", None, parsed)
Beispiel #7
0
def test_cluster_mode_check_function(monkeypatch, params, first_result_change,
                                     second_result_change):
    check = Check("netapp_api_cpu")
    monkeypatch.setattr("time.time", lambda: 0)
    try:
        check.run_check("clu1-01", params, result_parsed_over_time[0])
    except MKCounterWrapped:
        pass
    monkeypatch.setattr("time.time", lambda: 60)
    result = check.run_check("clu1-01", params, result_parsed_over_time[1])
    assert result == first_result_change
    monkeypatch.setattr("time.time", lambda: 180)
    result = check.run_check("clu1-01", params, result_parsed_over_time[2])
    assert result == second_result_change
Beispiel #8
0
def test_stale_service_for_not_running_qmgr():
    check = Check(CHECK_NAME)
    params: Dict[str, Any] = {}
    parsed = {"QM1": {"STATUS": "ENDED NORMALLY"}}
    with pytest.raises(MKCounterWrapped,
                       match=r"Stale because queue manager ENDED NORMALLY"):
        list(check.run_check("QM1:CHAN2", params, parsed))
Beispiel #9
0
def test_check_function(
    info,
    state_expected,
    info_expected,
    perf_expected_key,
    perf_expected_value,
    state_expected_perc,
    info_expected_perc,
):
    """
    Checks funny connections values
    """
    check = Check("mongodb_connections")
    check_result = CheckResult(
        check.run_check(None, {"levels_perc": (80.0, 90.0)}, info))

    if len(check_result.subresults) == 0:
        assert state_expected == 3
    elif len(check_result.subresults) == 3:
        check_used_connection(
            check_result.subresults[0],
            state_expected,
            info_expected,
            perf_expected_key,
            perf_expected_value,
        )
        check_used_percentage(check_result.subresults[1], state_expected_perc,
                              info_expected_perc)
        # check_used_rate(check_result.subresults[2]....  we are not testing the get_rate function here assuming it works
    else:
        raise AssertionError()
Beispiel #10
0
def test_nfsmounts(info, discovery_expected, check_expected):
    check_nfs = Check("nfsmounts")
    check_cifs = Check("cifsmounts")

    # assure that the code of both checks is identical
    assert (
        check_nfs.info["parse_function"].__code__.co_code
        == check_cifs.info["parse_function"].__code__.co_code
    )
    assert (
        check_nfs.info["inventory_function"].__code__.co_code
        == check_cifs.info["inventory_function"].__code__.co_code
    )
    assert (
        check_nfs.info["check_function"].__code__.co_code
        == check_cifs.info["check_function"].__code__.co_code
    )

    parsed = check_nfs.run_parse(info)

    assertDiscoveryResultsEqual(
        check_nfs,
        DiscoveryResult(check_nfs.run_discovery(parsed)),  #
        DiscoveryResult(discovery_expected),
    )

    for item, params, result_expected in check_expected:
        result = CheckResult(check_nfs.run_check(item, params, parsed))
        assertCheckResultsEqual(result, CheckResult([result_expected]))
def test_check_function(
    info,
    state_expected,
    info_expected,
    perf_expected,
    state_expected_flush,
    info_expected_flush,
    perf_expected_flush_key,
    perf_expected_flush_value,
):
    """
    Only checks for missing flushing data
    """
    check = Check("mongodb_flushing")
    check_result = CheckResult(
        check.run_check(None, {
            "average_time": (1, 4, 60),
            "last_time": (0.1, 0.2)
        }, info))

    if len(check_result.subresults) == 1:
        check_result_3(check_result.subresults[0], state_expected,
                       info_expected)
    elif len(check_result.subresults) == 4:
        check_result_average(check_result.subresults[0], state_expected,
                             info_expected)
        check_result_flush_time(
            check_result.subresults[1],
            state_expected_flush,
            info_expected_flush,
            perf_expected_flush_key,
            perf_expected_flush_value,
        )
Beispiel #12
0
def test_no_xmit_queue_defined():
    """
    Happened on queue manager MQZZZPPPP and channel FOO.TO.RESA. It
    is a misconfiguration on the queue manager, but the monitoring should
    not choke on this.
    """
    check = Check(CHECK_NAME)
    params: Dict[str, Any] = {}
    parsed = {
        "QM1": {
            "STATUS": "RUNNING"
        },
        "QM1:CHAN1": {
            "CHLTYPE": "SDR",
            "STATUS": "RETRYING",
            "XMITQ": "MY.XMIT.Q"
        },
        "QM1:CHAN2": {
            "CHLTYPE": "RCVR",
            "STATUS": "STOPPED"
        },
        "QM1:CHAN3": {
            "CHLTYPE": "SVRCONN"
        },
        "MQZZZPPPP:FOO.TO.RESA": {
            "CHLTYPE": "SDR"
        },
    }
    actual = list(check.run_check("MQZZZPPPP:FOO.TO.RESA", params, parsed))
    expected: List[Tuple[int, str,
                         List[Any]]] = [(0, "Status: INACTIVE, Type: SDR", [])]
    assert actual == expected
def test_check_win_license(params, expected_status, expected_levels_info):
    check = Check("msoffice_serviceplans")

    item = "bundle"
    output = check.run_check(
        item,
        params,
        [
            [item, "plan-success-1", "Success"],
            [item, "plan-suc", "cess-2", "Success"],
            [item, "plan-pending-1", "PendingActivation"],
            [item, "plan-pen", "ding-2", "PendingActivation"],
        ],
    )

    result = [
        BasicCheckResult(expected_status,
                         "Success: 2, Pending: 2%s" % expected_levels_info),
        BasicCheckResult(0,
                         "Pending Services: plan-pending-1, plan-pen ding-2"),
    ]

    assertCheckResultsEqual(
        CheckResult(output),
        CheckResult(result),
    )
Beispiel #14
0
def test_crashreport(fix_plugin_legacy, crashdata):
    try:
        run(fix_plugin_legacy.check_info, crashdata)
        check = Check(crashdata.full_checkname)
        if "item" in crashdata.vars:
            item = crashdata.vars["item"]
            params = crashdata.vars.get("params", {})
            if crashdata.parsed:
                raw_result = check.run_check(item, params, crashdata.parsed)
            else:
                raw_result = check.run_check(item, params, crashdata.info)
            print(CheckResult(raw_result))
    except Exception:
        pprint.pprint(crashdata.__dict__)
        crashdata.write("/tmp")
        raise
Beispiel #15
0
def test_io_check():
    item_1st = "VMFS_01"
    params = {"flex_levels": "irrelevant"}
    check = Check("hp_msa_volume.io")
    parsed = {
        "VMFS_01": {
            "durable-id": "V3",
            "data-read-numeric": "23719999539712",
            "data-written-numeric": "18093374647808",
            "virtual-disk-name": "A",
            "raidtype": "RAID0",
        },
        "VMFS_02": {
            "durable-id": "V4",
            "data-read-numeric": "49943891507200",
            "data-written-numeric": "7384656100352",
            "virtual-disk-name": "A",
            "raidtype": "RAID0",
        },
    }
    _, read, written = check.run_check(item_1st, params, parsed)
    assertCheckResultsEqual(
        CheckResult(read),
        CheckResult((0, "Read: 0.00 B/s", [("disk_read_throughput", 0.0, None,
                                            None)])),
    )
    assertCheckResultsEqual(
        CheckResult(written),
        CheckResult((0, "Write: 0.00 B/s", [("disk_write_throughput", 0.0,
                                             None, None)])),
    )
def test_check():
    check = Check(CHECK_NAME)
    params = {"curdepth": (1500, 2000), "ipprocs": {"upper": (4, 8)}}
    parsed = {
        "QM1": {
            "STATUS": "RUNNING"
        },
        "QM1:MY.QUEUE": {
            "CURDEPTH": "1400",
            "MAXDEPTH": "200000",
            "MSGAGE": "2201",
            "IPPROCS": "5",
            "OPPROCS": "0",
            "QTIME": ",",
        },
    }
    actual = list(check.run_check("QM1:MY.QUEUE", params, parsed))
    expected = [
        (0, "Queue depth: 1400 (0.7%)", [("curdepth", 1400, 1500, 2000, 0,
                                          200000)]),
        (0, "Oldest message: 36 m", [("msgage", 2201, None, None)]),
        (1, "Open input handles: 5 (warn/crit at 4/8)", [("ipprocs", 5, 4, 8)
                                                         ]),
        (0, "Open output handles: 0", [("opprocs", 0, None, None)]),
        (0, "Qtime short: n/a", [("qtime_short", 0, None, None)]),
        (0, "Qtime long: n/a", [("qtime_long", 0, None, None)]),
    ]
    assert actual == expected
Beispiel #17
0
def test_check_win_license(capture, result):
    check = Check("win_license")
    output = check.run_check(
        None, result.parameters or check.default_parameters(), check.run_parse(splitter(capture))
    )

    assertCheckResultsEqual(CheckResult(output), result.check_output)
Beispiel #18
0
def test_statgrab_cpu_check(info, mockstate, expected_result):

    check = Check("statgrab_cpu")

    # set up mocking of `get_item_state`
    with mock_item_state(mockstate):
        result = CheckResult(check.run_check(None, {}, info))
    assertCheckResultsEqual(result, expected_result)
Beispiel #19
0
def test_ra32e_power_check_battery():
    check = Check(RA32E_POWER)
    result = check.run_check(None, {}, [["0"]])

    assert len(result) == 2
    status, infotext = result
    assert status == 1
    assert "battery" in infotext
Beispiel #20
0
def test_nimble_latency_ranges(params, data, result):
    """The user can specify a parameter range_reference, which serves as a starting
    point from which values should start to be stacked and checked against levels.
    Test whether the stacking is correct."""

    check = Check("nimble_latency")
    actual_results = list(check.run_check("itemxyz", params, data))
    assert result == actual_results[0]
Beispiel #21
0
def test_df_check_groups_with_parse(add_params, expected_result):
    check = Check("df")
    params = make_test_df_params()
    params.update(add_params)

    actual = CheckResult(check.run_check("my-group", params, parse_df(info_df_groups)))
    expected = CheckResult(expected_result)
    assertCheckResultsEqual(actual, expected)
Beispiel #22
0
def test_k8s_replicas(info, expected):
    check = Check("k8s_replicas")
    parsed = parse_json(info)
    actual = check.run_check(None, {}, parsed)

    assertCheckResultsEqual(
        CheckResult(actual),
        CheckResult(expected),
    )
def test_check_docker_node_disk_usage():
    check = Check("docker_node_disk_usage")
    result = list(check.run_check("volumes", {}, check.run_parse(AGENT_OUTPUT)))
    assert result == [
        (0, "Size: 229.67 kB", [("size", 235177, None, None)]),
        (0, "Reclaimable: 93.00 B", [("reclaimable", 93, None, None)]),
        (0, "Count: 7", [("count", 7, None, None)]),
        (0, "Active: 5", [("active", 5, None, None)]),
    ]
def test_vanished_service_for_running_qmgr():
    check = Check(CHECK_NAME)
    params: Dict[str, Any] = {}
    parsed = {
        "QM1": {"STATUS": "RUNNING"},
        "QM1:CHAN1": {"CHLTYPE": "SVRCONN"},
    }
    actual = list(check.run_check("QM1:VANISHED", params, parsed))
    assert len(actual) == 0
def test_docker_container_diskstat_check(mocker, monkeypatch):
    mocker.patch("cmk.base.item_state._get_counter", return_value=[None, 2.22])
    check = Check('docker_container_diskstat')
    result = check.run_check('dm-1', {}, check.run_parse(INFO))
    assert list(result) == [
        (0, 'Read: 2.22 B/s', [('disk_read_throughput', 2.22, None, None)]),
        (0, 'Write: 2.22 B/s', [('disk_write_throughput', 2.22, None, None)]),
        (0, 'Read operations: 2.22 1/s', [('disk_read_ios', 2.22, None, None)]),
        (0, 'Write operations: 2.22 1/s', [('disk_write_ios', 2.22, None, None)]),
    ]
Beispiel #26
0
def test_check_function(parameters, info, state_expected, infotext_expected, perfdata_expected):
    """
    Verifies if check function asserts warn and crit CPU levels.
    """
    check = Check(CHECK_NAME)
    item = None
    state, infotext, perfdata = check.run_check(item, parameters, info)
    assert state == state_expected
    assert infotext == infotext_expected
    assert perfdata == perfdata_expected
Beispiel #27
0
def test_statgrab_cpu_check_error(info, mockstate):

    check = Check("statgrab_cpu")

    with mock_item_state(mockstate):
        # the mock values are designed to raise an exception.
        # to make sure it is raised, use this:
        with assertMKCounterWrapped(
                "Too short time difference since last check"):
            CheckResult(check.run_check(None, {}, info))
def test_check_function(
    parameters, item, info, state_expected, infotext_expected, perfdata_expected
):
    """
    Verifies if check function asserts warn and crit Board and CPU temperature levels.
    """
    check = Check("alcatel_temp")
    state, infotext, perfdata = check.run_check(item, parameters, info)
    assert state == state_expected
    assert infotext_expected in infotext
    assert perfdata == perfdata_expected
Beispiel #29
0
def test_status_wato_override():
    lines = """\
QMNAME(THE.ENDED.ONE)                                     STATUS(ENDED PRE-EMPTIVELY) DEFAULT(NO) STANDBY(NOT APPLICABLE) INSTNAME(Installation1) INSTPATH(/opt/mqm) INSTVER(7.5.0.2)
"""
    section = parse_info(lines, chr(10))
    check = Check(CHECK_NAME)
    parsed = parse_ibm_mq_managers(section)

    # Factory defaults
    params: Dict[str, Any] = {}
    actual = list(check.run_check("THE.ENDED.ONE", params, parsed))
    expected = [
        (1, "Status: ENDED PRE-EMPTIVELY"),
        (0, "Version: 7.5.0.2"),
        (0, "Installation: /opt/mqm (Installation1), Default: NO"),
    ]
    assert expected == actual

    # Override factory defaults
    params = {"mapped_states": [("ended_pre_emptively", 2)]}
    actual = list(check.run_check("THE.ENDED.ONE", params, parsed))
    expected = [
        (2, "Status: ENDED PRE-EMPTIVELY"),
        (0, "Version: 7.5.0.2"),
        (0, "Installation: /opt/mqm (Installation1), Default: NO"),
    ]
    assert expected == actual

    # Override-does-not-match configuration
    params = {
        "mapped_states": [("running_as_standby", 2)],
        "mapped_states_default": 3,
    }
    actual = list(check.run_check("THE.ENDED.ONE", params, parsed))
    expected = [
        (3, "Status: ENDED PRE-EMPTIVELY"),
        (0, "Version: 7.5.0.2"),
        (0, "Installation: /opt/mqm (Installation1), Default: NO"),
    ]
    assert expected == actual
Beispiel #30
0
def test_wmi_msexch_isclienttype_wato_params(check_name, expected):
    check = Check(check_name)
    result = list(
        check.run_check(
            item="_total",
            params={
                "store_latency": (41.0, 51.0),
                "clienttype_latency": (40.0, 50.0),
                "clienttype_requests": (60, 70),
            },
            info=check.run_parse(info_msx_info_store_1),
        ))
    assert result == expected