def _test_check_winperf_phydisk(item, section_1, section_2, check_func): # fist call: initialize value store with pytest.raises(IgnoreResultsError): list(check_func( item, type_defs.Parameters({}), section_1, )) # second call: get values check_results = list( check_func( item, type_defs.Parameters({}), section_2, )) exp_metrics = set( 'disk_' + k for k in DISK if not k.endswith('_base') and k not in ('timestamp', 'frequency')) for res in check_results: if isinstance(res, Metric): exp_metrics.remove(res.name) assert res.value > 0 assert not exp_metrics
def test_cluster_check_diskstat_summary(value_store): with pytest.raises(IgnoreResultsError): list( diskstat.cluster_check_diskstat( 'SUMMARY', type_defs.Parameters({}), { 'node1': { 'disk1': DISK_HALF, }, 'node2': { 'disk2': DISK_HALF, }, }, { 'node1': None, 'node2': None, }, )) results_cluster = list( diskstat.cluster_check_diskstat( 'SUMMARY', type_defs.Parameters({}), { 'node1': { 'disk1': DISK, }, 'node2': { 'disk2': DISK, }, }, { 'node1': None, 'node2': None, }, )) with pytest.raises(IgnoreResultsError): list( diskstat.check_diskstat( 'SUMMARY', type_defs.Parameters({}), { 'disk1': DISK_HALF, 'disk2': DISK_HALF, }, None, )) results_non_cluster = list( diskstat.check_diskstat( 'SUMMARY', type_defs.Parameters({}), { 'disk1': DISK, 'disk2': DISK, }, None, )) assert results_cluster == results_non_cluster
def test_check_disk(value_store): with pytest.raises(IgnoreResultsError): list(aix_diskiod._check_disk(type_defs.Parameters({}), DISK)) assert list(aix_diskiod._check_disk(type_defs.Parameters({}), DISK)) == [ Result(state=state.OK, summary='Read: 0.00 B/s'), Metric('disk_read_throughput', 0.0), Result(state=state.OK, summary='Write: 0.00 B/s'), Metric('disk_write_throughput', 0.0), ]
def test_cluster_check_diskstat_single_item(value_store): with pytest.raises(IgnoreResultsError): list( diskstat.cluster_check_diskstat( 'disk1', type_defs.Parameters({}), { 'node1': { 'disk1': DISK_HALF, }, }, { 'node1': None, }, )) results_cluster = list( diskstat.cluster_check_diskstat( 'disk1', type_defs.Parameters({}), { 'node_overwritten': { 'disk1': DISK_HALF, }, 'node1': { 'disk1': DISK, }, }, { 'node_overwritten': None, 'node1': None, }, )) with pytest.raises(IgnoreResultsError): list( diskstat.check_diskstat( 'disk1', type_defs.Parameters({}), { 'disk1': DISK_HALF, }, None, )) results_non_cluster = list( diskstat.check_diskstat( 'disk1', type_defs.Parameters({}), { 'disk1': DISK, }, None, )) assert results_cluster == results_non_cluster
def test_discovery_grouped_hierarchy(): assert list( interfaces.discover_interfaces( [ type_defs.Parameters({ 'matching_conditions': ( False, { 'portstates': ['1', '2'], }, ), "grouping": ( True, [{ 'group_name': 'group', 'member_appearance': 'alias', }], ), }), type_defs.Parameters({ 'matching_conditions': (True, {}), "grouping": ( True, [{ 'group_name': 'group', 'member_appearance': 'index', }], ), }), DEFAULT_DISCOVERY_PARAMS, ], _create_interfaces(0), )) == SINGLE_SERVICES + [ Service( item='group', parameters={ 'aggregate': { 'member_appearance': 'alias', 'inclusion_condition': { 'portstates': ['1', '2'] }, 'exclusion_conditions': [], }, 'discovered_oper_status': ['1'], 'discovered_speed': 20000000, }, labels=[], ), ]
def test_discovery_duplicate_descr(): assert list( interfaces.discover_interfaces( [ type_defs.Parameters({ **DEFAULT_DISCOVERY_PARAMS, 'discovery_single': ( True, { 'item_appearance': 'descr', 'pad_portnumbers': True, }, ), }) ], _create_interfaces(0, descr='description'), )) == [ Service( item='description 5', parameters={ 'discovered_oper_status': ['1'], 'discovered_speed': 10000000, }, labels=[], ), Service( item='description 6', parameters={ 'discovered_oper_status': ['1'], 'discovered_speed': 0, }, labels=[], ), ]
def test_cluster_check_bluecat_none_ok(): assert list( bluecat_dns.cluster_check_bluecat_dns( type_defs.Parameters({ "oper_states": { "warning": [1], "critical": [2, 3], }, },), { 'node1': { 'oper_state': 1, }, 'node2': { 'oper_state': 3, }, }, )) == [ Result( state=state.WARN, summary='[node1]: DNS is running normally', ), Result( state=state.CRIT, summary='[node2]: DNS is currently starting', ), Result( state=state.CRIT, summary='No node with OK DNS state', ), ]
def test_cluster_check_bluecat_one_ok(): assert list( bluecat_dns.cluster_check_bluecat_dns( type_defs.Parameters({ "oper_states": { "warning": [4], "critical": [], }, },), { 'node1': { 'oper_state': 1, }, 'node2': { 'oper_state': 4, }, }, )) == [ Result( state=state.OK, notice='[node1]: DNS is running normally', ), Result( state=state.OK, notice='[node2]: DNS is currently stopping(!)', ), Result( state=state.OK, summary='DNS is running normally on node1', ), ]
def test_check(item, params, expected): assert expected == list( oracle_tablespaces.check_oracle_tablespaces( item, type_defs.Parameters(params), Section, ))
def test_cluster_check_bluecat_all_ok(): assert list( bluecat_dns.cluster_check_bluecat_dns( type_defs.Parameters( { "oper_states": { "warning": [], "critical": [], }, }, ), { 'node1': { 'oper_state': 1, }, 'node2': { 'oper_state': 1, }, }, )) == [ Result( state=state.OK, details='[node1]: DNS is running normally', ), Result( state=state.OK, details='[node2]: DNS is running normally', ), Result( state=state.OK, summary='DNS is running normally on node2', ), ]
def test_discovery_grouped_by_agent_and_in_rules(): ifaces = _create_interfaces(0) ifaces[0].group = 'group' ifaces[1].group = 'group' assert list( interfaces.discover_interfaces( [ type_defs.Parameters({ 'matching_conditions': (True, {}), "grouping": ( True, [{ 'group_name': 'group', 'member_appearance': 'index', }], ), }), DEFAULT_DISCOVERY_PARAMS, ], ifaces, )) == SINGLE_SERVICES + [ Service( item='group', parameters={ 'aggregate': { 'member_appearance': 'index', 'inclusion_condition': {}, 'exclusion_conditions': [], }, 'discovered_oper_status': ['1'], 'discovered_speed': 20000000.0, }, labels=[], ), ]
def test_cluster_check_pulse_secure_users(): assert list( pulse_secure_users.cluster_check_pulse_secure_users( type_defs.Parameters({}), { 'node1': { 'n_users': 20 }, 'node2': { 'n_users': 30 } }, )) == [ Result( state=state.OK, details='[node1]: Pulse Secure users: 20', ), Result( state=state.OK, details='[node2]: Pulse Secure users: 30', ), Result( state=state.OK, summary='Pulse Secure users across cluster: 50', details='Pulse Secure users across cluster: 50', ), Metric( 'current_users', 50.0, levels=(None, None), boundaries=(None, None), ), ]
def check_bluecat_dhcp_one_lease(): assert list( bluecat_dhcp.check_bluecat_dhcp( type_defs.Parameters( { "oper_states": { "warning": [], "critical": [], }, }, ), { 'oper_state': 1, 'leases': 1, }, )) == [ Result( state=state.OK, summary="DHCP is running normally", ), Result( state=state.OK, summary="1 lease per second", ), Metric( 'leases', 1, ), ]
def check_bluecat_dhcp_crit(): assert list( bluecat_dhcp.check_bluecat_dhcp( type_defs.Parameters( { "oper_states": { "warning": [], "critical": [5], }, }, ), { 'oper_state': 5, 'leases': 10, }, )) == [ Result( state=state.CRIT, summary="DHCP is fault", ), Result( state=state.OK, summary="1 lease per second", ), Metric( 'leases', 1, ), ]
def test_discovery_duplicate_alias(): assert list( interfaces.discover_interfaces( [ type_defs.Parameters({ 'discovery_single': ( True, { 'item_appearance': 'alias', 'pad_portnumbers': True, }, ), 'matching_conditions': ( False, { 'match_index': ['5'], }, ), }) ], _create_interfaces(0, alias='alias'), )) == [ Service( item='alias 5', parameters={ 'discovered_oper_status': ['1'], 'discovered_speed': 10000000, }, labels=[], ), ]
def test_regression_check(item, check_results): assert list( ipmi.check_ipmi( item, type_defs.Parameters({"ignored_sensorstates": ["ns", "nr", "na"]}), SECTION_IPMI, SECTION_IPMI_DISCRETE, )) == check_results
def test_check_raises(): with pytest.raises(IgnoreResultsError): list( oracle_tablespaces.check_oracle_tablespaces( "item.not.sent", type_defs.Parameters({}), Section, ))
def test_check_raises(): with pytest.raises(IgnoreResultsError) as exc: list( oracle_rman.check_oracle_rman("NON-EXISTANT-ITEM", type_defs.Parameters({}), PARSED_SECTION)) assert "Login into database failed. Working on NON-EXISTANT-ITEM" in str( exc.value)
def _test_check_aix_diskiod(item, section_1, section_2, check_func): # fist call: initialize value store with pytest.raises(IgnoreResultsError): list(check_func( item, type_defs.Parameters({}), section_1, )) # second call: get values check_results = list( check_func( item, type_defs.Parameters({}), section_2, )) for res in check_results: if isinstance(res, Metric): assert res.value > 0
def test_check_disk(value_store): with pytest.raises(IgnoreResultsError): list(aix_diskiod._check_disk(type_defs.Parameters({}), DISK)) assert list(aix_diskiod._check_disk(type_defs.Parameters({}), DISK)) == [ Result(state=state.OK, summary='Read throughput: 0.00 B/s', details='Read throughput: 0.00 B/s'), Metric('disk_read_throughput', 0.0, levels=(None, None), boundaries=(None, None)), Result(state=state.OK, summary='Write throughput: 0.00 B/s', details='Write throughput: 0.00 B/s'), Metric('disk_write_throughput', 0.0, levels=(None, None), boundaries=(None, None)), ]
def test_check_diskstat_single_item(value_store): with pytest.raises(IgnoreResultsError): list( diskstat.check_diskstat( 'item', type_defs.Parameters({}), {'item': DISK_HALF}, None, )) assert list( diskstat.check_diskstat( 'item', type_defs.Parameters({}), {'item': DISK}, None, )) == [ Result(state=state.OK, notice='Utilization: 0.00039%'), Metric('disk_utilization', 3.933167173747347e-06), Result(state=state.OK, summary='Read: 17.7 B/s'), Metric('disk_read_throughput', 17.650547892925093), Result(state=state.OK, summary='Write: 55.4 B/s'), Metric('disk_write_throughput', 55.40544625529087), Result(state=state.OK, notice='Average wait: 540 microseconds'), Metric('disk_average_wait', 0.0005402843870952481), Result(state=state.OK, notice='Average read wait: 40 microseconds'), Metric('disk_average_read_wait', 3.987349554326878e-05), Result(state=state.OK, notice='Average write wait: 692 microseconds'), Metric('disk_average_write_wait', 0.0006915664158721743), Result(state=state.OK, notice='Average queue length: 10.00'), Metric('disk_queue_length', 10.0), Result(state=state.OK, notice='Read operations: 0.00/s'), Metric('disk_read_ios', 0.004089338822905689), Result(state=state.OK, notice='Write operations: 0.01/s'), Metric('disk_write_ios', 0.013526720277170622), Result(state=state.OK, summary='Latency: 223 microseconds'), Metric('disk_latency', 0.00022327168360432604), Metric('disk_average_read_request_size', 4316.235131718299), Metric('disk_average_request_size', 4147.124719166019), Metric('disk_average_write_request_size', 4096.0), ]
def test_check_multiple_interfaces_group_simple(value_store): params = type_defs.Parameters({ 'errors': (0.01, 0.1), 'traffic': [('both', ('upper', ('perc', (5.0, 20.0))))], 'aggregate': { 'member_appearance': 'index', 'inclusion_condition': {}, 'exclusion_conditions': [], }, 'discovered_oper_status': ['1'], 'discovered_speed': 20000000, 'state': ['8'], 'speed': 123456, }) with pytest.raises(IgnoreResultsError): list( interfaces.check_multiple_interfaces( 'group', params, _create_interfaces(0), timestamp=0, )) assert list( interfaces.check_multiple_interfaces( 'group', params, _create_interfaces(4000000), timestamp=5, )) == [ Result(state=state.OK, notice='Interface group'), Result(state=state.OK, notice='Operational state: degraded'), Result(state=state.OK, notice='Members: [1 (up), 2 (down), 3 (down), 4 (down), 5 (up), 6 (up)]'), Metric('in', 800000.0, levels=(62500.0, 250000.0), boundaries=(0.0, 1250000.0)), Metric('inmcast', 0.0), Metric('inbcast', 0.0), Metric('inucast', 0.0), Metric('innucast', 0.0), Metric('indisc', 0.0), Metric('inerr', 0.0, levels=(0.01, 0.1)), Metric('out', 3200000.0, levels=(62500.0, 250000.0), boundaries=(0.0, 1250000.0)), Metric('outmcast', 0.0), Metric('outbcast', 0.0), Metric('outucast', 0.0), Metric('outnucast', 0.0), Metric('outdisc', 0.0), Metric('outerr', 0.0, levels=(0.01, 0.1)), Metric('outqlen', 0.0), Result(state=state.CRIT, summary='In: 800 kB/s (warn/crit at 62.5 kB/s/250 kB/s) (64.0%)'), Result(state=state.CRIT, summary='Out: 3.20 MB/s (warn/crit at 62.5 kB/s/250 kB/s) (256%)'), Result(state=state.WARN, summary='Speed: 10 MBit/s (expected: 123 kBit/s)'), ]
def test_lnx_if_regression( monkeypatch, string_table, discovery_results, items_params_results, ): section = lnx_if.parse_lnx_if(string_table) assert list( lnx_if.discover_lnx_if( [type_defs.Parameters(interfaces.DISCOVERY_DEFAULT_PARAMETERS)], section, )) == discovery_results monkeypatch.setattr(interfaces, 'get_value_store', lambda: {}) for item, par, res in items_params_results: assert list( lnx_if.check_lnx_if( item, type_defs.Parameters(par), section, )) == res node_name = 'node' for item, par, res in items_params_results: assert list( lnx_if.cluster_check_lnx_if( item, type_defs.Parameters(par), {node_name: section}, )) == [ Result( # type: ignore[call-overload] state=res[0].state, summary=res[0].summary + ' on %s' % node_name if res[0].summary else None, notice=res[0].summary + ' on %s' % node_name if not res[0].summary else None, details=res[0].details + ' on %s' % node_name if res[0].details else None, ), ] + res[1:]
def test_discovery_ungrouped_off(): assert list( interfaces.discover_interfaces( [ type_defs.Parameters({ 'matching_conditions': (True, {}), 'discovery_single': (False, {}), }), DEFAULT_DISCOVERY_PARAMS, ], _create_interfaces(0), )) == []
def test_check_single_interface_same_index_descr_alias(value_store): item = '07' result = next( # type: ignore[call-overload] interfaces.check_single_interface( item, type_defs.Parameters({}), _create_interfaces(0, index=item, descr=item, alias=item)[0], )) assert result == Result( state=state.OK, notice='Operational state: up', )
def test_winperf_if_netconnection_id(string_table, settings, items): assert [ service.item for service in winperf_if.discover_winperf_if( [ type_defs.Parameters({ **interfaces.DISCOVERY_DEFAULT_PARAMETERS, **settings, }) ], winperf_if.parse_winperf_if(string_table), ) if isinstance(service, Service) ] == items
def test_winperf_if_regression( monkeypatch, string_table, discovery_results, items_params_results, ): section = winperf_if.parse_winperf_if(string_table) assert list( winperf_if.discover_winperf_if( [type_defs.Parameters(interfaces.DISCOVERY_DEFAULT_PARAMETERS)], section, )) == discovery_results monkeypatch.setattr(interfaces, 'get_value_store', lambda: {}) for item, par, res in items_params_results: assert list( winperf_if.check_winperf_if( item, type_defs.Parameters(par), section, )) == res
def test_check_multiple_interfaces_group_by_agent(value_store): params = type_defs.Parameters({ 'errors': (0.01, 0.1), 'traffic': [('both', ('upper', ('perc', (5.0, 20.0))))], 'aggregate': { 'member_appearance': 'index', }, 'discovered_oper_status': ['1'], 'discovered_speed': 20000000 }) with pytest.raises(IgnoreResultsError): ifaces = _create_interfaces(0) ifaces[3].group = 'group' ifaces[5].group = 'group' list(interfaces.check_multiple_interfaces( 'group', params, ifaces, timestamp=0, )) ifaces = _create_interfaces(4000000) ifaces[3].group = 'group' ifaces[5].group = 'group' assert list(interfaces.check_multiple_interfaces( 'group', params, ifaces, timestamp=5, )) == [ Result(state=state.OK, notice='Interface group'), Result(state=state.CRIT, notice='Operational state: degraded'), Result(state=state.OK, notice='Members: [4 (down), 6 (up)]'), Metric('in', 800000.0, levels=(125000.0, 500000.0), boundaries=(0.0, 2500000.0)), Metric('inmcast', 0.0), Metric('inbcast', 0.0), Metric('inucast', 0.0), Metric('innucast', 0.0), Metric('indisc', 0.0), Metric('inerr', 0.0, levels=(0.01, 0.1)), Metric('out', 3200000.0, levels=(125000.0, 500000.0), boundaries=(0.0, 2500000.0)), Metric('outmcast', 0.0), Metric('outbcast', 0.0), Metric('outucast', 0.0), Metric('outnucast', 0.0), Metric('outdisc', 0.0), Metric('outerr', 0.0, levels=(0.01, 0.1)), Metric('outqlen', 0.0), Result(state=state.CRIT, summary='In: 800 kB/s (warn/crit at 125 kB/s/500 kB/s) (32.0%)'), Result(state=state.CRIT, summary='Out: 3.20 MB/s (warn/crit at 125 kB/s/500 kB/s) (128%)'), Result(state=state.OK, summary='Speed: 20 MBit/s (assumed)'), ]
def test_discovery_partial_duplicate_desc_duplicate_alias(): ifaces = _create_interfaces(0) ifaces[3].descr = 'duplicate_descr' ifaces[4].descr = 'duplicate_descr' for iface in ifaces: iface.alias = 'alias' assert list( interfaces.discover_interfaces( [ type_defs.Parameters({ 'discovery_single': ( True, { 'item_appearance': 'descr', 'pad_portnumbers': True, }, ), 'matching_conditions': ( False, { 'match_index': ['4', '5', '6'], }, ), }) ], ifaces, )) == [ Service( item='duplicate_descr 4', parameters={ 'discovered_oper_status': ['2'], 'discovered_speed': 10000000, }, labels=[], ), Service( item='duplicate_descr 5', parameters={ 'discovered_oper_status': ['1'], 'discovered_speed': 10000000, }, labels=[], ), Service( item='wlp2s0', parameters={ 'discovered_oper_status': ['1'], 'discovered_speed': 0, }, labels=[], ), ]
def test_winperf_if_teaming_performance_data(monkeypatch, value_store, item, params, results): # Initialize counters monkeypatch.setattr('time.time', lambda: 0) with suppress(IgnoreResultsError): list( winperf_if.check_winperf_if( item, type_defs.Parameters(params), winperf_if_teaming_parsed(0, 0), )) # winperf_if should use the timestamp of the parsed data. To check that it does not use # time.time by accident, we set it to 20 s instead of 10 s. If winperf_if would now used # time.time the, the out metric value would be smaller. monkeypatch.setattr('time.time', lambda: 20) assert list( winperf_if.check_winperf_if( item, type_defs.Parameters(params), winperf_if_teaming_parsed(10, 1024 * 1024 * 1024 * 10), )) == results