示例#1
0
def test_get_effective_service_level(monkeypatch):
    ts = Scenario()
    ts.add_host("testhost1")
    ts.add_host("testhost2")
    ts.add_host("testhost3")
    ts.set_ruleset(
        "host_service_levels",
        [
            (10, [], ["testhost2"], {}),
            (2, [], ["testhost2"], {}),
        ],
    )
    ts.set_ruleset(
        "service_service_levels",
        [
            (33, [], ["testhost1"], ["CPU load$"], {}),
        ],
    )
    ts.apply(monkeypatch)

    with plugin_contexts.current_service(CheckPluginName("cpu_loads"), "CPU load"):

        with plugin_contexts.current_host("testhost1"):
            assert check_api.get_effective_service_level() == 33

        with plugin_contexts.current_host("testhost2"):
            assert check_api.get_effective_service_level() == 10

        with plugin_contexts.current_host("testhost3"):
            assert check_api.get_effective_service_level() == 0
示例#2
0
def test_get_effective_service_level(monkeypatch):
    ts = Scenario().add_host("testhost1")
    ts.add_host("testhost2")
    ts.add_host("testhost3")
    ts.set_ruleset(
        "host_service_levels",
        [
            (10, [], ["testhost2"], {}),
            (2, [], ["testhost2"], {}),
        ],
    )
    ts.set_ruleset(
        "service_service_levels",
        [
            (33, [], ["testhost1"], ["CPU load$"], {}),
        ],
    )
    ts.apply(monkeypatch)

    with plugin_contexts.current_service(
            Service(
                item=None,
                check_plugin_name=CheckPluginName("cpu_loads"),
                description="CPU load",
                parameters={},
            )):

        with plugin_contexts.current_host("testhost1"):
            assert check_api.get_effective_service_level() == 33

        with plugin_contexts.current_host("testhost2"):
            assert check_api.get_effective_service_level() == 10

        with plugin_contexts.current_host("testhost3"):
            assert check_api.get_effective_service_level() == 0
示例#3
0
def test_mem_win(
    mocker: MockerFixture,
    fix_register: FixRegister,
    params: Mapping[str, Any],
    expected_result: CheckResult,
) -> None:
    mocker.patch(
        "cmk.base.check_api._prediction.get_levels",
        return_value=(100000, (90000, 110000, None, None)),
    )
    with current_host("unittest-hn"), current_service(
        Service(
            CheckPluginName("unittest_sd"),
            parameters={},
            item=None,
            description="unittest_sd_description",
        )
    ):
        assert (
            list(
                fix_register.check_plugins[CheckPluginName("mem_win")].check_function(
                    item=None,
                    params=params,
                    section=_SECTION,
                )
            )
            == expected_result
        )
示例#4
0
文件: run.py 项目: PLUTEX/checkmk
def run_test_on_checks(check, subcheck, dataset, info_arg, immu, write):
    """Run check for test case listed in dataset"""
    test_cases = getattr(dataset, "checks", {}).get(subcheck, [])
    check_func = check.info.get("check_function")
    check_plugin_name = CheckPluginName(maincheckify(check.name))

    for item, params, results_expected_raw in test_cases:

        print("Dataset item %r in check %r" % (item, check.name))
        immu.register(params, "params")

        with current_service(
            Service(
                item=item,
                check_plugin_name=check_plugin_name,
                description="unit test description",
                parameters={},
            )
        ):
            result = CheckResult(check.run_check(item, params, info_arg))

        immu.test(" after check (%s): " % check_func.__name__)

        result_expected = CheckResult(results_expected_raw)

        if write:
            new_entry = (item, params, result.raw_repr())
            dataset.update_check_result(subcheck, new_entry)
        else:
            assertCheckResultsEqual(result, result_expected)
示例#5
0
def test_cpu_loads_predictive(mocker: Mock) -> None:
    # make sure cpu_load check can handle predictive values
    mocker.patch(
        "cmk.base.check_api._prediction.get_levels",
        return_value=(None, (2.2, 4.2, None, None)),
    )
    with current_host("unittest"), current_service(
            CheckPluginName("cpu_loads"), "item"):
        assert list(
            check_cpu_load(
                {
                    "levels": {
                        "period": "minute",
                        "horizon": 1,
                        "levels_upper": ("absolute", (2.0, 4.0)),
                    }
                },
                Section(
                    load=Load(0.5, 1.0, 1.5),
                    num_cpus=4,
                    threads=Threads(count=123),
                ),
            )
        ) == [
            Result(
                state=State.OK,
                summary="15 min load: 1.50 (no reference for prediction yet)"),
            Metric("load15", 1.5,
                   levels=(2.2, 4.2)),  # those are the predicted values
            Result(state=State.OK,
                   summary="15 min load per core: 0.38 (4 cores)"),
            Metric("load1", 0.5, boundaries=(0, 4.0)),
            Metric("load5", 1.0, boundaries=(0, 4.0)),
        ]
示例#6
0
def execute_check(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    *,
    dry_run: bool,
    show_perfdata: bool,
) -> bool:

    plugin = agent_based_register.get_check_plugin(service.check_plugin_name)

    # check if we must use legacy mode. remove this block entirely one day
    if (plugin is not None and host_config.is_cluster
            and plugin.cluster_check_function.__name__
            == "cluster_legacy_mode_from_hell"):
        with plugin_contexts.current_service(service):
            submittable = _legacy_mode.get_aggregated_result(
                parsed_sections_broker,
                host_config.hostname,
                ipaddress,
                service,
                used_params=(  #
                    time_resolved_check_parameters(service.parameters)  #
                    if isinstance(service.parameters,
                                  cmk.base.config.TimespecificParamList) else
                    service.parameters),
            )
    else:  # This is the new, shiny, 'normal' case.
        submittable = get_aggregated_result(
            parsed_sections_broker,
            host_config,
            ipaddress,
            service,
            plugin,
            lambda: _final_read_only_check_parameters(service.parameters),
        )

    if submittable.submit:
        _submit_to_core.check_result(
            host_name=host_config.hostname,
            service_name=service.description,
            result=submittable.result,
            cache_info=submittable.cache_info,
            dry_run=dry_run,
            show_perfdata=show_perfdata,
        )
    else:
        console.verbose(
            f"{service.description:20} PEND - {submittable.result[1]}\n")

    return submittable.data_received
示例#7
0
def test_check_levels_predictive_default_render_func(mocker):
    mocker.patch("cmk.base.check_api._prediction.get_levels",
                 return_value=(None, (2.2, 4.2, None, None)))

    with current_host("unittest"), current_service(
            CheckPluginName("test_check"), "unittest-service-description"):
        result = next(
            utils.check_levels_predictive(42.42,
                                          metric_name="metric_name",
                                          levels={}))

    assert isinstance(result, Result)
    assert result.summary.startswith("42.42")
示例#8
0
def fixture_results(checkplugin, section, params, mocker: MockerFixture):
    params = {k: params for k in checkplugin.metrics}
    mocker.patch("cmk.base.check_api._prediction.get_levels",
                 return_value=(None, (2.2, 4.2, None, None)))

    with current_host("unittest"), current_service(
            CheckPluginName("test_check"), "unittest-service-description"):
        results = list(
            checkplugin.function(
                item=ITEM,
                params=params,
                section_gcp_service_filestore=section,
                section_gcp_assets=None,
            ))
    return results, checkplugin
def test_check_levels_predictive_default_render_func(mocker):
    mocker.patch(
        "cmk.base.check_api._prediction.get_levels", return_value=(None, (2.2, 4.2, None, None))
    )

    irrelevant_test_parameters: LegacyCheckParameters = {}
    service = Service(
        item=None,
        check_plugin_name=CheckPluginName("test_check"),
        description="unittest-service-description",
        parameters=irrelevant_test_parameters,
    )
    with current_host("unittest"), current_service(service):
        result = next(utils.check_levels_predictive(42.42, metric_name="metric_name", levels={}))

    assert isinstance(result, Result)
    assert result.summary.startswith("42.42")
示例#10
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    hostname: HostName,
    ipaddress: Optional[HostAddress],
    service: Service,
    *,
    used_params: LegacyCheckParameters,
    value_store_manager: item_state.ValueStoreManager,
) -> AggregatedResult:
    with plugin_contexts.current_service(service):
        # In the legacy mode from hell (tm) the item state features *probably*
        # need to be called from the parse functions. We consolidate the
        # preperation of the item state at this point, which is the largest
        # possible scope without leaving the 'legacy' world.
        with value_store_manager.namespace(service.id()):
            return _get_aggregated_result(
                parsed_sections_broker=parsed_sections_broker,
                hostname=hostname,
                ipaddress=ipaddress,
                service=service,
                used_params=used_params,
            )
示例#11
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    plugin: Optional[checking_classes.CheckPlugin],
    timespecific_parameters: Union[LegacyCheckParameters,
                                   TimespecificParameters],
    *,
    value_store_manager: value_store.ValueStoreManager,
    persist_value_store_changes: bool,
) -> AggregatedResult:
    """Run the check function and aggregate the subresults

    This function is also called during discovery.
    """
    if plugin is None:
        return AggregatedResult(
            submit=True,
            data_received=True,
            result=ServiceCheckResult.check_not_implemented(),
            cache_info=None,
        )

    config_cache = config.get_config_cache()
    check_function = (_cluster_modes.get_cluster_check_function(
        *config_cache.get_clustered_service_configuration(
            host_config.hostname,
            service.description,
        ),
        plugin=plugin,
        service_id=service.id(),
        persist_value_store_changes=persist_value_store_changes,
    ) if host_config.is_cluster else plugin.check_function)

    section_kws, error_result = _get_monitoring_data_kwargs_handle_pre20_services(
        parsed_sections_broker,
        host_config,
        config_cache,
        ipaddress,
        service,
        plugin.sections,
    )
    if not section_kws:  # no data found
        return AggregatedResult(
            submit=False,
            data_received=False,
            result=error_result,
            cache_info=None,
        )

    item_kw = {} if service.item is None else {"item": service.item}
    params_kw = ({} if plugin.check_default_parameters is None else {
        "params":
        _final_read_only_check_parameters(timespecific_parameters)
    })

    try:
        with plugin_contexts.current_host(
                host_config.hostname), plugin_contexts.current_service(
                    service), value_store_manager.namespace(service.id()):
            result = _aggregate_results(
                check_function(
                    **item_kw,
                    **params_kw,
                    **section_kws,
                ))

    except (item_state.MKCounterWrapped,
            checking_classes.IgnoreResultsError) as e:
        msg = str(e) or "No service summary available"
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=ServiceCheckResult(output=msg),
            cache_info=None,
        )
    except MKTimeout:
        raise
    except Exception:
        if cmk.utils.debug.enabled():
            raise
        table = check_table.get_check_table(host_config.hostname,
                                            skip_autochecks=True)
        result = ServiceCheckResult(
            3,
            cmk.base.crash_reporting.create_check_crash_dump(
                host_name=host_config.hostname,
                service_name=service.description,
                plugin_name=service.check_plugin_name,
                plugin_kwargs={
                    **item_kw,
                    **params_kw,
                    **section_kws
                },
                is_manual=service.id() in table,
            ),
        )

    return AggregatedResult(
        submit=True,
        data_received=True,
        result=result,
        cache_info=parsed_sections_broker.get_cache_info(plugin.sections),
    )
示例#12
0
def test_parse_diskstat_predictive(mocker: MockerFixture):
    # SUP-5924
    DATA = [
        ["1617784511"],
        [
            "259",
            "0",
            "nvme0n1",
            "131855",
            "42275",
            "8019303",
            "34515",
            "386089",
            "166344",
            "13331634",
            "138121",
            "0",
            "185784",
            "177210",
            "0",
            "0",
            "0",
            "0",
            "41445",
            "4574",
        ],
        [
            "53",
            "0",
            "dm-0",
            "172574",
            "0",
            "7980626",
            "74812",
            "548159",
            "0",
            "12443656",
            "706944",
            "0",
            "189576",
            "781756",
            "0",
            "0",
            "0",
            "0",
            "0",
            "0",
        ],
        [
            "53",
            "1",
            "dm-1",
            "171320",
            "0",
            "7710074",
            "74172",
            "546564",
            "0",
            "12514416",
            "674352",
            "0",
            "186452",
            "748524",
            "0",
            "0",
            "0",
            "0",
            "0",
            "0",
        ],
        [
            "53",
            "2",
            "dm-2",
            "194",
            "0",
            "8616",
            "68",
            "0",
            "0",
            "0",
            "0",
            "0",
            "72",
            "68",
            "0",
            "0",
            "0",
            "0",
            "0",
            "0",
        ],
        ["[dmsetup_info]"],
        ["vme0n1p3_crypt", "253:0"],
        ["buntu--vg-swap_1", "253:2", "ubuntu-vg", "swap_1"],
        ["buntu--vg-root", "253:1", "ubuntu-vg", "root"],
    ]

    PARAMS = {
        "average": 300,
        "latency": (80.0, 160.0),
        "read": {
            "horizon": 90,
            "levels_lower": ("absolute", (2.0, 4.0)),
            "levels_upper": ("relative", (10.0, 20.0)),
            "levels_upper_min": (10.0, 15.0),
            "period": "wday",
        },
        "read_ios": (400.0, 600.0),
        "read_latency": (80.0, 160.0),
        "read_wait": (30.0, 50.0),
        "utilization": (80.0, 90.0),
        "write": (50.0, 100.0),
        "write_ios": (300.0, 400.0),
        "write_latency": (80.0, 160.0),
        "write_wait": (30.0, 50.0),
    }

    mocker.patch(
        "cmk.base.check_api._prediction.get_levels", return_value=(None, (2.1, 4.1, None, None))
    )
    dummy_service: Service[LegacyCheckParameters] = Service(
        CheckPluginName("unittest_sd"),
        parameters={},
        item="item-nvme0n1",
        description="unittest_sd_description",
    )
    with plugin_contexts.current_host("unittest-hn"), plugin_contexts.current_service(
        dummy_service
    ):

        with pytest.raises(IgnoreResultsError):
            list(diskstat.check_diskstat("nvme0n1", PARAMS, diskstat.parse_diskstat(DATA), None))
        DATA[0][0] = "1617784512"
        assert list(
            diskstat.check_diskstat(
                "nvme0n1",
                PARAMS,
                diskstat.parse_diskstat(DATA),
                None,
            )
        ) == [
            Result(state=state.OK, notice="All values averaged over 5 minutes 0 seconds"),
            Result(state=state.OK, notice="Utilization: 0%"),
            Metric("disk_utilization", 0.0, levels=(0.8, 0.9)),
            Result(state=state.OK, summary="Read: 0.00 B/s (no reference for prediction yet)"),
            Metric("disk_read_throughput", 0.0, levels=(2.1, 4.1)),  # fake levels are quite low
            Result(state=state.OK, summary="Write: 0.00 B/s"),
            Metric("disk_write_throughput", 0.0, levels=(50000000.0, 100000000.0)),
            Result(state=state.OK, notice="Average wait: 0 seconds"),
            Metric("disk_average_wait", 0.0),
            Result(state=state.OK, notice="Average read wait: 0 seconds"),
            Metric("disk_average_read_wait", 0.0, levels=(0.03, 0.05)),
            Result(state=state.OK, notice="Average write wait: 0 seconds"),
            Metric("disk_average_write_wait", 0.0, levels=(0.03, 0.05)),
            Result(state=state.OK, notice="Average queue length: 0.00"),
            Metric("disk_queue_length", 0.0),
            Result(state=state.OK, notice="Read operations: 0.00/s"),
            Metric("disk_read_ios", 0.0, levels=(400.0, 600.0)),
            Result(state=state.OK, notice="Write operations: 0.00/s"),
            Metric("disk_write_ios", 0.0, levels=(300.0, 400.0)),
            Result(state=state.OK, summary="Latency: 0 seconds"),
            Metric("disk_latency", 0.0, levels=(0.08, 0.16)),
            Metric("disk_average_read_request_size", 0.0),
            Metric("disk_average_request_size", 0.0),
            Metric("disk_average_write_request_size", 0.0),
        ]
示例#13
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    plugin: Optional[checking_classes.CheckPlugin],
    params_function: Callable[[], Parameters],
    *,
    value_store_manager: item_state.ValueStoreManager,
) -> AggregatedResult:
    """Run the check function and aggregate the subresults

    This function is also called during discovery.
    """
    if plugin is None:
        return AggregatedResult(
            submit=True,
            data_received=True,
            result=CHECK_NOT_IMPLEMENTED,
            cache_info=None,
        )

    check_function = (plugin.cluster_check_function
                      if host_config.is_cluster else plugin.check_function)

    source_type = (SourceType.MANAGEMENT
                   if service.check_plugin_name.is_management_name() else SourceType.HOST)

    config_cache = config.get_config_cache()

    kwargs = {}
    try:
        kwargs = parsed_sections_broker.get_section_cluster_kwargs(
            config_cache.get_clustered_service_node_keys(
                host_config.hostname,
                source_type,
                service.description,
            ) or [],
            plugin.sections,
        ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs(
            HostKey(host_config.hostname, ipaddress, source_type),
            plugin.sections,
        )

        if not kwargs and not service.check_plugin_name.is_management_name():
            # in 1.6 some plugins where discovered for management boards, but with
            # the regular host plugins name. In this case retry with the source type
            # forced to MANAGEMENT:
            kwargs = parsed_sections_broker.get_section_cluster_kwargs(
                config_cache.get_clustered_service_node_keys(
                    host_config.hostname,
                    SourceType.MANAGEMENT,
                    service.description,
                ) or [],
                plugin.sections,
            ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs(
                HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT),
                plugin.sections,
            )

        if not kwargs:  # no data found
            return AggregatedResult(
                submit=False,
                data_received=False,
                result=RECEIVED_NO_DATA,
                cache_info=None,
            )

        if service.item is not None:
            kwargs["item"] = service.item

        if plugin.check_default_parameters is not None:
            kwargs["params"] = params_function()

        with plugin_contexts.current_host(host_config.hostname), \
            plugin_contexts.current_service(service), \
            value_store_manager.namespace(service.id()):
            result = _aggregate_results(check_function(**kwargs))

    except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e:
        msg = str(e) or "No service summary available"
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=(0, msg, []),
            cache_info=None,
        )

    except MKTimeout:
        raise

    except Exception:
        if cmk.utils.debug.enabled():
            raise
        table = check_table.get_check_table(host_config.hostname, skip_autochecks=True)
        result = 3, cmk.base.crash_reporting.create_check_crash_dump(
            host_name=host_config.hostname,
            service_name=service.description,
            plugin_name=service.check_plugin_name,
            plugin_kwargs=kwargs,
            is_manual=service.id() in table,
        ), []

    return AggregatedResult(
        submit=True,
        data_received=True,
        result=result,
        cache_info=parsed_sections_broker.get_cache_info(plugin.sections),
    )
示例#14
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    plugin: Optional[checking_classes.CheckPlugin],
    # missleading. These are prams that *may* be *partially* time specific
    timespecific_parameters: LegacyCheckParameters,
    *,
    value_store_manager: value_store.ValueStoreManager,
    persist_value_store_changes: bool,
) -> AggregatedResult:
    """Run the check function and aggregate the subresults

    This function is also called during discovery.
    """
    if plugin is None:
        return AggregatedResult(
            submit=True,
            data_received=True,
            result=CHECK_NOT_IMPLEMENTED,
            cache_info=None,
        )

    config_cache = config.get_config_cache()
    check_function = (
        _cluster_modes.get_cluster_check_function(
            *config_cache.get_clustered_service_configuration(
                host_config.hostname,
                service.description,
            ),
            plugin=plugin,
            service_id=service.id(),
            persist_value_store_changes=persist_value_store_changes,
        )
        if host_config.is_cluster
        else plugin.check_function
    )
    source_type = (
        SourceType.MANAGEMENT if service.check_plugin_name.is_management_name() else SourceType.HOST
    )
    try:
        kwargs = (
            get_section_cluster_kwargs(
                parsed_sections_broker,
                config_cache.get_clustered_service_node_keys(
                    host_config.hostname,
                    source_type,
                    service.description,
                )
                or [],
                plugin.sections,
            )
            if host_config.is_cluster
            else get_section_kwargs(
                parsed_sections_broker,
                HostKey(host_config.hostname, ipaddress, source_type),
                plugin.sections,
            )
        )
        if not kwargs and not service.check_plugin_name.is_management_name():
            # in 1.6 some plugins where discovered for management boards, but with
            # the regular host plugins name. In this case retry with the source type
            # forced to MANAGEMENT:
            kwargs = (
                get_section_cluster_kwargs(
                    parsed_sections_broker,
                    config_cache.get_clustered_service_node_keys(
                        host_config.hostname,
                        SourceType.MANAGEMENT,
                        service.description,
                    )
                    or [],
                    plugin.sections,
                )
                if host_config.is_cluster
                else get_section_kwargs(
                    parsed_sections_broker,
                    HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT),
                    plugin.sections,
                )
            )
        if not kwargs:  # no data found
            return AggregatedResult(
                submit=False,
                data_received=False,
                result=RECEIVED_NO_DATA,
                cache_info=None,
            )

        kwargs = {
            **kwargs,
            **({} if service.item is None else {"item": service.item}),
            **(
                {}
                if plugin.check_default_parameters is None
                else {"params": _final_read_only_check_parameters(timespecific_parameters)}
            ),
        }
        with plugin_contexts.current_host(host_config.hostname), plugin_contexts.current_service(
            service
        ), value_store_manager.namespace(service.id()):
            result = _aggregate_results(check_function(**kwargs))

    except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e:
        msg = str(e) or "No service summary available"
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=(0, msg, []),
            cache_info=None,
        )
    except MKTimeout:
        raise
    except Exception:
        if cmk.utils.debug.enabled():
            raise
        table = check_table.get_check_table(host_config.hostname, skip_autochecks=True)
        result = (
            3,
            cmk.base.crash_reporting.create_check_crash_dump(
                host_name=host_config.hostname,
                service_name=service.description,
                plugin_name=service.check_plugin_name,
                plugin_kwargs=globals().get("kwargs", {}),
                is_manual=service.id() in table,
            ),
            [],
        )

    return AggregatedResult(
        submit=True,
        data_received=True,
        result=result,
        cache_info=parsed_sections_broker.get_cache_info(plugin.sections),
    )
示例#15
0
def test_parse_diskstat_predictive(value_store: MutableMapping[str, Any],
                                   mocker: MockerFixture):
    # SUP-5924
    DATA = [
        ['1617784511'],
        [
            '259', '0', 'nvme0n1', '131855', '42275', '8019303', '34515',
            '386089', '166344', '13331634', '138121', '0', '185784', '177210',
            '0', '0', '0', '0', '41445', '4574'
        ],
        [
            '53', '0', 'dm-0', '172574', '0', '7980626', '74812', '548159',
            '0', '12443656', '706944', '0', '189576', '781756', '0', '0', '0',
            '0', '0', '0'
        ],
        [
            '53', '1', 'dm-1', '171320', '0', '7710074', '74172', '546564',
            '0', '12514416', '674352', '0', '186452', '748524', '0', '0', '0',
            '0', '0', '0'
        ],
        [
            '53', '2', 'dm-2', '194', '0', '8616', '68', '0', '0', '0', '0',
            '0', '72', '68', '0', '0', '0', '0', '0', '0'
        ],
        ['[dmsetup_info]'],
        ['vme0n1p3_crypt', '253:0'],
        ['buntu--vg-swap_1', '253:2', 'ubuntu-vg', 'swap_1'],
        ['buntu--vg-root', '253:1', 'ubuntu-vg', 'root'],
    ]

    PARAMS = {
        'average': 300,
        'latency': (80.0, 160.0),
        'read': {
            'horizon': 90,
            'levels_lower': ('absolute', (2.0, 4.0)),
            'levels_upper': ('relative', (10.0, 20.0)),
            'levels_upper_min': (10.0, 15.0),
            'period': 'wday'
        },
        'read_ios': (400.0, 600.0),
        'read_latency': (80.0, 160.0),
        'read_wait': (30.0, 50.0),
        'utilization': (80.0, 90.0),
        'write': (50.0, 100.0),
        'write_ios': (300.0, 400.0),
        'write_latency': (80.0, 160.0),
        'write_wait': (30.0, 50.0)
    }

    mocker.patch("cmk.base.check_api._prediction.get_levels",
                 return_value=(None, (2.1, 4.1, None, None)))
    dummy_service = Service(
        CheckPluginName("unittest_sd"),
        parameters={},
        item="item-nvme0n1",
        description="unittest_sd_description",
    )
    with plugin_contexts.current_host(
            "unittest-hn"), plugin_contexts.current_service(dummy_service):

        with pytest.raises(IgnoreResultsError):
            list(
                diskstat.check_diskstat("nvme0n1", PARAMS,
                                        diskstat.parse_diskstat(DATA), None))
        DATA[0][0] = '1617784512'
        assert list(
            diskstat.check_diskstat(
                "nvme0n1",
                PARAMS,
                diskstat.parse_diskstat(DATA),
                None,
            )
        ) == [
            Result(state=state.OK,
                   notice='All values averaged over 5 minutes 0 seconds'),
            Result(state=state.OK, notice='Utilization: 0%'),
            Metric('disk_utilization', 0.0, levels=(0.8, 0.9)),
            Result(state=state.OK,
                   summary='Read: 0.00 B/s (no reference for prediction yet)'),
            Metric('disk_read_throughput', 0.0,
                   levels=(2.1, 4.1)),  # fake levels are quite low
            Result(state=state.OK, summary='Write: 0.00 B/s'),
            Metric('disk_write_throughput',
                   0.0,
                   levels=(50000000.0, 100000000.0)),
            Result(state=state.OK, notice='Average wait: 0 seconds'),
            Metric('disk_average_wait', 0.0),
            Result(state=state.OK, notice='Average read wait: 0 seconds'),
            Metric('disk_average_read_wait', 0.0, levels=(0.03, 0.05)),
            Result(state=state.OK, notice='Average write wait: 0 seconds'),
            Metric('disk_average_write_wait', 0.0, levels=(0.03, 0.05)),
            Result(state=state.OK, notice='Average queue length: 0.00'),
            Metric('disk_queue_length', 0.0),
            Result(state=state.OK, notice='Read operations: 0.00/s'),
            Metric('disk_read_ios', 0.0, levels=(400.0, 600.0)),
            Result(state=state.OK, notice='Write operations: 0.00/s'),
            Metric('disk_write_ios', 0.0, levels=(300.0, 400.0)),
            Result(state=state.OK, summary='Latency: 0 seconds'),
            Metric('disk_latency', 0.0, levels=(0.08, 0.16)),
            Metric('disk_average_read_request_size', 0.0),
            Metric('disk_average_request_size', 0.0),
            Metric('disk_average_write_request_size', 0.0),
        ]