Beispiel #1
0
    yield value_store_patched


@pytest.mark.parametrize("item, params, expected_results", [
    ("H62 10 - DATA 20", df.FILESYSTEM_DEFAULT_LEVELS, [
        Metric('fs_used',
               84.703125,
               levels=(256.0, 288.0),
               boundaries=(0.0, 320.0)),
        Metric('fs_size', 320.0, levels=(None, None), boundaries=(None, None)),
        Metric('fs_used_percent',
               26.4697265625,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='26.5% used (84.7  of 320 MiB)',
               details='26.5% used (84.7  of 320 MiB)'),
        Metric('growth',
               -4470.553049074118,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='trend per 1 day 0 hours: +621 TiB',
               details='trend per 1 day 0 hours: +621 TiB'),
        Result(state=state.OK,
               summary='trend per 1 day 0 hours: +203357490%',
               details='trend per 1 day 0 hours: +203357490%'),
        Metric('trend',
               650743966.868858,
               levels=(None, None),
               boundaries=(0.0, 13.333333333333334)),
Beispiel #2
0
def test_cpu_util_single_process_levels(cpu_cores):
    """Test CPU utilization per single process.
- Check that Number of cores weight is active
- Check that single process CPU utilization is present only on warn/crit states"""

    params: Dict[str, Any] = {
        'process': '~.*firefox',
        'process_info': "text",
        'cpu_rescale_max': True,
        'levels': (1, 1, 99999, 99999),
        'single_cpulevels': (45.0, 80.0),
    }

    def run_check_ps_common_with_elapsed_time(check_time, cputime):
        with on_time(check_time, "CET"):
            agent_info = """(on,2275004,434008,00:00:49/26:58,25576) firefox
(on,1869920,359836,00:01:23/6:57,25664) firefox
(on,7962644,229660,00:00:10/26:56,25758) firefox
(on,1523536,83064,00:{:02}:00/26:55,25898) firefox"""
            _cpu_info, parsed_lines = ps_section.parse_ps(splitter(agent_info.format(cputime)))
            lines_with_node_name: List[Tuple[Optional[str], ps_utils.ps_info, List[str]]] = [
                (None, ps_info, cmd_line) for (ps_info, cmd_line) in parsed_lines]

            return list(ps_utils.check_ps_common(
                label="Processes",
                item='firefox',
                params=params,  # type: ignore[arg-type]
                process_lines=lines_with_node_name,
                cpu_cores=cpu_cores,
                total_ram=None,
            ))

    with value_store.context(CheckPluginName("ps"), "unit-test"):
        # CPU utilization is a counter, initialize it
        run_check_ps_common_with_elapsed_time(0, 0)
        # CPU utilization is a counter, after 60s time, one process consumes 2 min of CPU
        output = run_check_ps_common_with_elapsed_time(60, 2)

    cpu_util = 200.0 / cpu_cores
    cpu_util_s = ps_utils.render.percent(cpu_util)
    single_msg = 'firefox with PID 25898 CPU: %s (warn/crit at 45.0%%/80.0%%)' % cpu_util_s
    reference = [
        Result(state=state.OK, summary="Processes: 4"),
        Metric("count", 4, levels=(100000, 100000), boundaries=(0, None)),
        Result(state=state.OK, summary="virtual: 13.0 GiB"),
        Metric("vsz", 13631104),
        Result(state=state.OK, summary="physical: 1.06 GiB"),
        Metric("rss", 1106568),
        Metric('pcpu', cpu_util),
        Result(state=state.OK, summary="CPU: %s" % cpu_util_s),
        Result(state=state.OK, details='firefox with PID 25576 CPU: 0%'),
        Result(state=state.OK, details='firefox with PID 25664 CPU: 0%'),
        Result(state=state.OK, details='firefox with PID 25758 CPU: 0%'),
        Result(state=state.OK, details='firefox with PID 25898 CPU: 40.0%'),
        Result(state=state.OK, summary='Youngest running for: 6 minutes 57 seconds'),
        Result(state=state.OK, summary='Oldest running for: 26 minutes 58 seconds'),
        Result(state=state.OK, details="\r\n".join([
            'name firefox, user on, virtual size 2275004kB, resident size 434008kB,'
            ' creation time Jan 01 1970 00:34:02, pid 25576, cpu usage 0.0%',
            'name firefox, user on, virtual size 1869920kB, resident size 359836kB,'
            ' creation time Jan 01 1970 00:54:03, pid 25664, cpu usage 0.0%',
            'name firefox, user on, virtual size 7962644kB, resident size 229660kB,'
            ' creation time Jan 01 1970 00:34:04, pid 25758, cpu usage 0.0%',
            'name firefox, user on, virtual size 1523536kB, resident size 83064kB,'
            ' creation time Jan 01 1970 00:34:05, pid 25898, cpu usage %.1f%%\r\n'
            % cpu_util,
        ])),
    ]

    if cpu_util > params['single_cpulevels'][1]:
        reference[11] = Result(state=state.CRIT, summary=single_msg)
    elif cpu_util > params['single_cpulevels'][0]:
        reference[11] = Result(state=state.WARN, summary=single_msg)

    assert output == reference
Beispiel #3
0
from cmk.base.api.agent_based import value_store

import pytest  # type: ignore[import]

KILO = 1024

MEGA = KILO**2


@pytest.mark.parametrize(
    "label,used,total,levels,kwargs,expected",
    [
        # all variants of "no levels"
        ("Longterm", 23 * MEGA, 42 * MEGA, None, {}, [
            Result(
                state=state.OK,
                summary="Longterm: 54.8% - 23.0 MiB of 42.0 MiB",
            ),
        ]),
        ("Longterm", 23 * MEGA, 42 * MEGA, "ignore", {}, [
            Result(
                state=state.OK,
                summary="Longterm: 54.8% - 23.0 MiB of 42.0 MiB",
            ),
        ]),
        ("Longterm", 23 * MEGA, 42 * MEGA, ("ignore", None), {}, [
            Result(
                state=state.OK,
                summary="Longterm: 54.8% - 23.0 MiB of 42.0 MiB",
            ),
        ]),
        ("Longterm", 23 * MEGA, 42 * MEGA, ("ignore", (None, None)), {}, [
Beispiel #4
0
def test_inventory_common():
    info = list(itertools.chain.from_iterable(generate_inputs()))
    assert sorted({s.item: s for s in ps_utils.discover_ps(  # type: ignore[attr-defined]
        PS_DISCOVERY_WATO_RULES,  # type: ignore[arg-type]
        ps_section.parse_ps(info),
        None,
        None,
    )}.values(), key=lambda s: s.item) == sorted(PS_DISCOVERED_ITEMS, key=lambda s: s.item)  # type: ignore[attr-defined]


CheckResult = tuple

check_results = [
    [
        Result(
            state=state.OK,
            summary="Processes: 1",
        ),
        Metric("count", 1, levels=(100000, 100000), boundaries=(0, None)),
        Result(
            state=state.WARN,
            summary="virtual: 1.00 GiB (warn/crit at 1.00 GiB/2.00 GiB)",
        ),
        Metric("vsz", 1050360, levels=(1073741824, 2147483648)),
        Result(
            state=state.OK,
            summary="physical: 296 MiB",
        ),
        Metric("rss", 303252, levels=(1073741824, 2147483648)),
        Result(
            state=state.WARN,
            summary="Percentage of total RAM: 28.9% (warn/crit at 25.0%/50.0%)",
Beispiel #5
0
def test_subset_patterns():

    section_ps = ps_section.parse_ps(
        splitter("""(user,0,0,0.5) main
(user,0,0,0.4) main_dev
(user,0,0,0.1) main_dev
(user,0,0,0.5) main_test"""))

    # Boundary in match is necessary otherwise main instance accumulates all
    inv_params: List[Dict] = [{
        'default_params': {
            'cpu_rescale_max': True,
            'levels': (1, 1, 99999, 99999)
        },
        'match': '~(main.*)\\b',
        'descr': '%s',
    }, {}]

    discovered = [
        Service(
            item='main',
            parameters={
                'cpu_rescale_max': True,
                'levels': (1, 1, 99999, 99999),
                'process': '~(main.*)\\b',
                'match_groups': ('main',),
                'user': None,
                'cgroup': (None, False),
            },
        ),
        Service(
            item='main_dev',
            parameters={
                'cpu_rescale_max': True,
                'levels': (1, 1, 99999, 99999),
                'process': '~(main.*)\\b',
                'match_groups': ('main_dev',),
                'user': None,
                'cgroup': (None, False),
            },
        ),
        Service(
            item='main_test',
            parameters={
                'cpu_rescale_max': True,
                'levels': (1, 1, 99999, 99999),
                'process': '~(main.*)\\b',
                'match_groups': ('main_test',),
                'user': None,
                'cgroup': (None, False),
            },
        ),
    ]

    test_discovered = ps_utils.discover_ps(inv_params, section_ps, None, None)  # type: ignore[arg-type]
    assert {s.item: s for s in test_discovered} == {s.item: s for s in discovered}  # type: ignore[attr-defined]

    for service, count in zip(discovered, [1, 2, 1]):
        with value_store.context(CheckPluginName("ps"), "unit-test"):
            output = list(ps_utils.check_ps_common(
                label="Processes",
                item=service.item,
                params=service.parameters,
                process_lines=[
                    (None, psi, cmd_line) for (psi, cmd_line) in section_ps[1]],
                cpu_cores=1,
                total_ram=None,
            ))
        assert output[0] == Result(state=state.OK, summary="Processes: %s" % count)
Beispiel #6
0
def test_sap_hana_status_parse(string_table_row, expected_parsed_data):
    assert sap_hana_status.parse_sap_hana_status(
        string_table_row) == expected_parsed_data


def test_sap_hana_status_discovery():
    assert list(sap_hana_status.discovery_sap_hana_status(SECTION)) == [
        Service(item="Status %s" % ITEM),
        Service(item="Version %s" % ITEM),
    ]


@pytest.mark.parametrize(
    "section, check_type, results",
    [(SECTION, "Status",
      Result(state=state.OK, summary='Status: OK', details='Status: OK')),
     (
         SECTION,
         "Version",
         Result(state=state.OK,
                summary='Version: 1.00.122.22.1543461992 (fa/hana1sp12)',
                details='Version: 1.00.122.22.1543461992 (fa/hana1sp12)'),
     ),
     (SECTION_WARNING, "Status",
      Result(state=state.WARN,
             summary='Status: WARNING',
             details='Status: WARNING'))])
def test_sap_hana_status_check(check_type, results, section):

    yielded_results = list(
        sap_hana_status.check_sap_hana_status("%s %s" % (check_type, ITEM),
Beispiel #7
0
        string_table_row) == expected_parsed_data


def test_sap_hana_license_discovery():
    assert list(sap_hana_license.discovery_sap_hana_license(SECTION)) == [
        Service(item='Y04 10', parameters={}, labels=[]),
        Service(item='H62 10', parameters={}, labels=[]),
        Service(item='X04 55', parameters={}, labels=[]),
        Service(item='X00 00', parameters={}, labels=[]),
    ]


@pytest.mark.parametrize("cur_item, result", [
    ("Y04 10", [
        Result(state=state.OK,
               summary='Status: unlimited',
               details='Status: unlimited'),
        Result(state=state.WARN,
               summary='License: not FALSE',
               details='License: not FALSE'),
        Result(state=state.WARN,
               summary='Expiration date: 2020-08-02 23:59:59.999999000',
               details='Expiration date: 2020-08-02 23:59:59.999999000'),
    ]),
    ("H62 10", [
        Result(state=state.OK,
               summary='Status: unlimited',
               details='Status: unlimited'),
        Result(
            state=state.OK, summary='License: TRUE', details='License: TRUE'),
    ]),
Beispiel #8
0
def test_discover_liebert_humidity_air(section, extra_section, result):
    discovered = list(discover_liebert_humidity_air(section, extra_section))
    assert discovered == result


@pytest.mark.parametrize(
    'item, params, section, extra_section, result',
    [
        (
            'Return',
            PARAMETERS,
            PARSED_SECTION,
            PARSED_EXTRA_SECTION,
            [
                Result(state=state.OK,
                       summary='36.50 % RH',
                       details='36.50 % RH'),
                Metric(name='humidity',
                       value=36.5,
                       levels=(50.0, 55.0),
                       boundaries=(None, None)),
            ],
        ),
        (
            # Item 'Cigar' is not discovered in the discovery function. However, it is tested in this check function
            # in order to test whether the check handles the item correctly when it changes its status from 'on' to
            # 'standby'.
            'Cigar',
            PARAMETERS,
            PARSED_SECTION,
            PARSED_EXTRA_SECTION,
Beispiel #9
0
    scaled_levels = diskstat._scale_levels(levels, factor)
    if levels is None:
        assert scaled_levels is None
    else:
        assert scaled_levels == tuple(level * factor for level in levels)


@pytest.mark.parametrize(
    "params,disk,exp_res",
    [
        (
            type_defs.Parameters({}),
            DISK,
            [
                Result(state=state.OK,
                       summary='Utilization: 53.2%',
                       details='Utilization: 53.2%'),
                Metric('disk_utilization',
                       0.53242,
                       levels=(None, None),
                       boundaries=(None, None)),
                Result(state=state.OK,
                       summary='Read throughput: 12.3 kB/s',
                       details='Read throughput: 12.3 kB/s'),
                Metric('disk_read_throughput',
                       12312.4324,
                       levels=(None, None),
                       boundaries=(None, None)),
                Result(state=state.OK,
                       summary='Write throughput: 3.45 kB/s',
                       details='Write throughput: 3.45 kB/s'),
Beispiel #10
0
def test_check_diskstat_summary(value_store):
    with pytest.raises(IgnoreResultsError):
        list(
            diskstat.check_diskstat(
                'SUMMARY',
                type_defs.Parameters({}),
                {
                    'disk1': DISK_HALF,
                    'disk2': DISK_HALF,
                },
                {},
            ))
    results_summary = list(
        diskstat.check_diskstat(
            'SUMMARY',
            type_defs.Parameters({}),
            {
                'disk1': DISK,
                'disk2': DISK,
            },
            None,
        ))
    assert results_summary == [
        Result(state=state.OK,
               summary='Utilization: 0.00%',
               details='Utilization: 0.00%'),
        Metric('disk_utilization',
               3.933167173747347e-06,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Read throughput: 35.3 B/s',
               details='Read throughput: 35.3 B/s'),
        Metric('disk_read_throughput',
               35.30109578585019,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Write throughput: 111 B/s',
               details='Write throughput: 111 B/s'),
        Metric('disk_write_throughput',
               110.81089251058174,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Average wait: 540 microseconds',
               details='Average wait: 540 microseconds'),
        Metric('disk_average_wait',
               0.0005402843870952481,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Average read wait: 39 microseconds',
               details='Average read wait: 39 microseconds'),
        Metric('disk_average_read_wait',
               3.987349554326878e-05,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Average write wait: 691 microseconds',
               details='Average write wait: 691 microseconds'),
        Metric('disk_average_write_wait',
               0.0006915664158721743,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Latency: 223 microseconds',
               details='Latency: 223 microseconds'),
        Metric('disk_latency',
               0.00022327168360432604,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Average queue length: 10.00',
               details='Average queue length: 10.00'),
        Metric('disk_queue_length',
               10.0,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Read operations: 0.01/s',
               details='Read operations: 0.01/s'),
        Metric('disk_read_ios',
               0.008178677645811379,
               levels=(None, None),
               boundaries=(None, None)),
        Result(state=state.OK,
               summary='Write operations: 0.03/s',
               details='Write operations: 0.03/s'),
        Metric('disk_write_ios',
               0.027053440554341245,
               levels=(None, None),
               boundaries=(None, None)),
        Metric('disk_average_read_request_size',
               4316.235131718299,
               levels=(None, None),
               boundaries=(None, None)),
        Metric('disk_average_request_size',
               4147.124719166019,
               levels=(None, None),
               boundaries=(None, None)),
        Metric('disk_average_write_request_size',
               4096.0,
               levels=(None, None),
               boundaries=(None, None)),
    ]

    # compare against single-item output
    with pytest.raises(IgnoreResultsError):
        list(
            diskstat.check_diskstat(
                'disk1',
                type_defs.Parameters({}),
                {
                    'disk1': DISK_HALF,
                    'disk2': DISK_HALF,
                },
                None,
            ))
    results_single_disk = list(
        diskstat.check_diskstat(
            'disk1',
            type_defs.Parameters({}),
            {
                'disk1': DISK,
                'disk2': DISK,
            },
            None,
        ))
    assert len(results_summary) == len(results_single_disk)
    for res_sum, res_single in zip(results_summary, results_single_disk):
        assert isinstance(res_single, type(res_sum))
        if isinstance(res_sum, Metric):
            assert isinstance(res_single, Metric)
            assert res_sum.value >= res_single.value
Beispiel #11
0
def test_check_diskstat_single_item(value_store):
    with pytest.raises(IgnoreResultsError):
        list(
            diskstat.check_diskstat(
                'item',
                type_defs.Parameters({}),
                {'item': DISK_HALF},
                None,
            ))
    assert list(
        diskstat.check_diskstat(
            'item',
            type_defs.Parameters({}),
            {'item': DISK},
            None,
        )) == [
            Result(state=state.OK,
                   summary='Utilization: 0.00%',
                   details='Utilization: 0.00%'),
            Metric('disk_utilization',
                   3.933167173747347e-06,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Read throughput: 17.7 B/s',
                   details='Read throughput: 17.7 B/s'),
            Metric('disk_read_throughput',
                   17.650547892925093,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Write throughput: 55.4 B/s',
                   details='Write throughput: 55.4 B/s'),
            Metric('disk_write_throughput',
                   55.40544625529087,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Average wait: 540 microseconds',
                   details='Average wait: 540 microseconds'),
            Metric('disk_average_wait',
                   0.0005402843870952481,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Average read wait: 39 microseconds',
                   details='Average read wait: 39 microseconds'),
            Metric('disk_average_read_wait',
                   3.987349554326878e-05,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Average write wait: 691 microseconds',
                   details='Average write wait: 691 microseconds'),
            Metric('disk_average_write_wait',
                   0.0006915664158721743,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Latency: 223 microseconds',
                   details='Latency: 223 microseconds'),
            Metric('disk_latency',
                   0.00022327168360432604,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Average queue length: 10.00',
                   details='Average queue length: 10.00'),
            Metric('disk_queue_length',
                   10.0,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Read operations: 0.00/s',
                   details='Read operations: 0.00/s'),
            Metric('disk_read_ios',
                   0.004089338822905689,
                   levels=(None, None),
                   boundaries=(None, None)),
            Result(state=state.OK,
                   summary='Write operations: 0.01/s',
                   details='Write operations: 0.01/s'),
            Metric('disk_write_ios',
                   0.013526720277170622,
                   levels=(None, None),
                   boundaries=(None, None)),
            Metric('disk_average_read_request_size',
                   4316.235131718299,
                   levels=(None, None),
                   boundaries=(None, None)),
            Metric('disk_average_request_size',
                   4147.124719166019,
                   levels=(None, None),
                   boundaries=(None, None)),
            Metric('disk_average_write_request_size',
                   4096.0,
                   levels=(None, None),
                   boundaries=(None, None)),
        ]
Beispiel #12
0
         [Service(item='Liebert CRV')],
     )])
def test_discover_liebert_system(section, result):
    discovered = list(discover_liebert_system(section))
    assert discovered == result


@pytest.mark.parametrize('section, result', [
    ({
        'System Model Number': 'Liebert CRV',
        'System Status': 'Normal Operation',
        'Unit Operating State': 'standby',
        'Unit Operating State Reason': 'Reason Unknown',
    }, [
        Result(state=state.OK,
               summary='System Model Number: Liebert CRV',
               details='System Model Number: Liebert CRV'),
        Result(state=state.OK,
               summary='System Status: Normal Operation',
               details='System Status: Normal Operation'),
        Result(state=state.OK,
               summary='Unit Operating State: standby',
               details='Unit Operating State: standby'),
        Result(state=state.OK,
               summary='Unit Operating State Reason: Reason Unknown',
               details='Unit Operating State Reason: Reason Unknown'),
    ]),
    (
        {
            'System Model Number': 'Liebert CRV',
            'System Status': 'Normal Operation',
Beispiel #13
0
        'AP19': '1',
        'AP02': '1'
    }, [
        Service(item='AP19'),
        Service(item='AP02'),
    ]),
])
def test_discovery_cisco_wlc(section, services):
    assert list(discovery_cisco_wlc(section)) == services


@pytest.mark.parametrize("item,params,section,results", [
    ("AP19", {}, {
        'AP19': '1',
        'AP02': '1'
    }, [Result(state=state.OK, summary='Accesspoint: online')]),
    ("AP18", {}, {
        'AP19': '1',
        'AP02': '1'
    }, [Result(state=state.CRIT, summary='Accesspoint not found')]),
])
def test_check_cisco_wlc(item, params, section, results):
    assert list(check_cisco_wlc(item, params, section)) == results


@pytest.mark.parametrize("item,params,section,result", [
    ("AP19", {}, {
        "node1": {
            'AP19': '1',
            'AP02': '1'
        }
Beispiel #14
0
     Service(item='4',
             parameters={
                 'discovered_state': ['1'],
                 'discovered_speed': 0
             }),
 ],
 [
     (
         '1',
         {
             'errors': (0.01, 0.1),
             'speed': 0,
             'state': ['1']
         },
         [
             Result(state=state.OK, summary='[docker0] (up)'),
             Result(state=state.OK, summary='MAC: AA:AA:AA:AA:AA:AA'),
             Result(state=state.OK, summary='speed unknown'),
         ],
     ),
     (
         '4',
         {
             'errors': (0.01, 0.1),
             'speed': 0,
             'state': ['1']
         },
         [
             Result(state=state.OK,
                    summary='[wlp3s0] (up)',
                    details='[wlp3s0] (up)'),
Beispiel #15
0
    cluster_check_f5_bigip_cluster_status_v11_2,
    F5_BIGIP_CLUSTER_CHECK_DEFAULT_PARAMETERS as def_params,
)
from cmk.base.plugins.agent_based.agent_based_api.v0 import Result, state
from cmk.base.plugins.agent_based.agent_based_api.v0.type_defs import Parameters


@pytest.mark.parametrize("string_table,expected_parsed_data", [
    ([[['4']]], 4),
])
def test_parse_f5_bigip_cluster_status(string_table, expected_parsed_data):
    assert parse_f5_bigip_cluster_status(string_table) == expected_parsed_data


@pytest.mark.parametrize("arg,result", [
    ((def_params, 3), [Result(state=state.OK, summary="Node is active")]),
    ((def_params, 2), [Result(state=state.OK, summary="Node is active 2")]),
    ((def_params, 1), [Result(state=state.OK, summary="Node is active 1")]),
    ((def_params, 0), [Result(state=state.OK, summary="Node is standby")]),
])
def test_check_f5_bigip_cluster_status(arg, result):
    assert list(check_f5_bigip_cluster_status(Parameters(arg[0]), arg[1])) == result


@pytest.mark.parametrize("arg,result", [
    ((def_params, 4), [Result(state=state.OK, summary="Node is active")]),
    ((def_params, 3), [Result(state=state.OK, summary="Node is standby")]),
    ((def_params, 2), [Result(state=state.CRIT, summary="Node is forced offline")]),
    ((def_params, 1), [Result(state=state.CRIT, summary="Node is offline")]),
    ((def_params, 0), [Result(state=state.UNKNOWN, summary="Node is unknown")]),
])
Beispiel #16
0
          'pasl2001': 'active',
          'tasl2001': 'active'
      }),
    ([[]], None),
])
def test_parse_f5_bigip_vcmpguests(string_table, expected_parsed_data):
    assert parse_f5_bigip_vcmpguests(string_table) == expected_parsed_data


@pytest.mark.parametrize("section,result", [
    ({
        'easl2001': 'active',
        'pasl2001': 'active',
        'tasl2001': 'active'
    }, [
        Result(state=state.OK, summary="Guest [easl2001] is active"),
        Result(state=state.OK, summary="Guest [pasl2001] is active"),
        Result(state=state.OK, summary="Guest [tasl2001] is active"),
    ]),
])
def test_check_f5_bigip_vcmpguests(section, result):
    assert list(check_f5_bigip_vcmpguests(section)) == result


@pytest.mark.parametrize("section,result", [
    (({
        "node1": {
            'easl2001': 'active',
            'pasl2001': 'active',
            'tasl2001': 'active'
        }