Exemple #1
0
def inventory_df_netapp(info):
    mplist = []
    for volume, size_kb, _used_kb in info:
        if saveint(
                size_kb
        ) > 0:  # Exclude filesystems with zero size (some snapshots)
            mplist.append(volume)
    return df_discovery(host_extra_conf(host_name(), filesystem_groups),
                        mplist)
Exemple #2
0
def _get_discovery_ruleset() -> Any:
    # NOTE: THIS AN API VIOLATION, DO NOT REPLICATE THIS
    # This is needed because inventory_ipmi_rules was once not a dict, which is not allowed by the
    # API for discovery rulesets
    # ==============================================================================================
    rules_all_hosts = get_discovery_ruleset(RuleSetName("inventory_ipmi_rules"))
    rules_this_host = get_config_cache().host_extra_conf(host_name(), rules_all_hosts)
    rules_this_host += [{"discovery_mode": ("summarize", {})}]  # default parameters
    return rules_this_host[0]
Exemple #3
0
def df_inventory(mplist):
    group_patterns: Dict[str, Tuple[List[str], List[str]]] = {}
    for groups in host_extra_conf(host_name(), filesystem_groups):
        for group in transform_filesystem_groups(groups):
            grouping_entry = group_patterns.setdefault(group['group_name'], ([], []))
            grouping_entry[0].extend(group['patterns_include'])
            grouping_entry[1].extend(group['patterns_exclude'])

    ungrouped_mountpoints, groups = ungrouped_mountpoints_and_groups(mplist, group_patterns)

    return [(mp, {}) for mp in ungrouped_mountpoints] \
            + [(group, {"patterns": group_patterns[group]}) for group in groups]
Exemple #4
0
def _compile_params() -> Dict[str, Any]:
    compiled_params: Dict[str, Any] = {"reclassify_patterns": []}

    for rule in host_extra_conf(host_name(), cmk.base.config.logwatch_rules):
        if isinstance(rule, dict):
            compiled_params["reclassify_patterns"].extend(rule["reclassify_patterns"])
            if "reclassify_states" in rule:
                # (mo) wondering during migration: doesn't this mean the last one wins?
                compiled_params["reclassify_states"] = rule["reclassify_states"]
        else:
            compiled_params["reclassify_patterns"].extend(rule)

    return compiled_params
Exemple #5
0
def inventory_freeipmi(parsed):
    rules = host_extra_conf(host_name(), inventory_ipmi_rules)
    if rules:
        mode, ignore_params = ipmi.transform_discovery_ruleset(rules[0])
    else:
        mode, ignore_params = 'single', {}

    if mode == "summarize":
        yield "Summary FreeIPMI", {}
    else:
        for sensorname, data in parsed.items():
            if not ipmi_ignore_entry(sensorname, data["status_txt"], ignore_params):
                yield sensorname, {}
Exemple #6
0
def _get_effective_service_level(
    plugin_name: CheckPluginName,
    item: Optional[str],
) -> int:
    """Get the service level that applies to the current service."""

    host = host_name()
    service_description = cmk.base.config.service_description(host, plugin_name, item)
    config_cache = cmk.base.config.get_config_cache()
    service_level = config_cache.service_level_of_service(host, service_description)
    if service_level is not None:
        return service_level

    return config_cache.get_host_config(host).service_level or 0
Exemple #7
0
def inventory_diskstat_generic(parsed):
    # Skip over on empty data
    if not parsed:
        return

    # New style: use rule based configuration, defaulting to summary mode
    if diskstat_inventory_mode == "rule":
        hits = host_extra_conf(host_name(), diskstat_inventory)
        if len(hits) > 0:
            modes = hits[0]
        else:
            modes = ["summary"]

    elif diskstat_inventory_mode == "single":
        modes = ["physical"]
    elif diskstat_inventory_mode == "summary":
        modes = ["summary"]
    else:
        modes = ["legacy"]

    inventory = []
    if "summary" in modes:
        inventory.append(("SUMMARY", "diskstat_default_levels"))

    if "legacy" in modes:
        inventory += [("read", None), ("write", None)]

    for line in parsed:
        name = line[1]
        if "physical" in modes and \
           not ' ' in name and \
           not diskstat_diskless_pattern.match(name):
            inventory.append((name, "diskstat_default_levels"))

        if "lvm" in modes and \
           name.startswith("LVM "):
            inventory.append((name, "diskstat_default_levels"))

        if "vxvm" in modes and \
           name.startswith("VxVM "):
            inventory.append((name, "diskstat_default_levels"))

        if "diskless" in modes and \
           diskstat_diskless_pattern.match(name):
            # Sort of partitions with disks - typical in XEN virtual setups.
            # Eg. there are xvda1, xvda2, but no xvda...
            inventory.append((name, "diskstat_default_levels"))

    return inventory
Exemple #8
0
def inventory_hr_fs(info):
    mplist = []
    for hrtype, hrdescr, _hrunits, hrsize, _hrused in info:
        hrdescr = fix_hr_fs_mountpoint(hrdescr)
        # NOTE: These types are defined in the HR-TYPES-MIB.
        #       .1.3.6.1.2.1.25.2.1 +
        #                           +-> .4 "hrStorageFixedDisk"
        if hrtype in [ ".1.3.6.1.2.1.25.2.1.4",
                       # This strange value below is needed for VCenter Appliances
                       ".1.3.6.1.2.1.25.2.3.1.2.4"] and \
                hrdescr not in inventory_df_exclude_mountpoints and \
                saveint(hrsize) != 0:
            mplist.append(hrdescr)
    return df_discovery(host_extra_conf(host_name(), filesystem_groups),
                        mplist)
Exemple #9
0
def logwatch_forward_spool_directory(
    method: str,
    item: Optional[str],
    messages: List[str],
) -> LogwatchFordwardResult:
    if not messages:
        return LogwatchFordwardResult()

    spool_path = method[6:]
    file_name = '.%s_%s%d' % (
        host_name(), item and item.replace('/', '\\') + '_' or '', time.time())
    os.makedirs(spool_path, exist_ok=True)

    with open('%s/%s' % (spool_path, file_name), 'w', encoding="utf-8") as f:
        f.write('\n'.join(messages) + '\n')
    os.rename('%s/%s' % (spool_path, file_name),
              '%s/%s' % (spool_path, file_name[1:]))

    return LogwatchFordwardResult(num_forwarded=len(messages))
Exemple #10
0
def logwatch_forward_spool_directory(
    method: str,
    item: Optional[str],
    syslog_messages: Sequence[SyslogMessage],
) -> LogwatchFordwardResult:

    if not syslog_messages:
        return LogwatchFordwardResult()

    spool_file = Path(
        method[6:], '.%s_%s%d' % (
            host_name(),
            (item.replace('/', '\\') + '_') if item else '',
            time.time(),
        ))

    spool_file.parent.mkdir(parents=True, exist_ok=True)

    spool_file.write_text('\n'.join(map(repr, syslog_messages)) + '\n')
    spool_file.rename(spool_file.parent / spool_file.name[1:])

    return LogwatchFordwardResult(num_forwarded=len(syslog_messages))
Exemple #11
0
def logwatch_forward_spool_directory(
    method: str,
    item: Optional[str],
    syslog_messages: Sequence[SyslogMessage],
) -> LogwatchFordwardResult:

    if not syslog_messages:
        return LogwatchFordwardResult()

    spool_file = Path(
        method[6:],
        ".%s_%s%d" % (
            host_name(),
            (item.replace("/", "\\") + "_") if item else "",
            time.time(),
        ),
    )

    spool_file.parent.mkdir(parents=True, exist_ok=True)

    spool_file.write_text("\n".join(map(repr, syslog_messages)) + "\n")
    spool_file.rename(spool_file.parent / spool_file.name[1:])

    return LogwatchFordwardResult(num_forwarded=len(syslog_messages))
Exemple #12
0
def _get_discovery_groups():
    """Isolate the remaining API violation w.r.t. parameters"""
    return host_extra_conf(
        host_name(),
        get_checkgroup_parameters('logwatch_groups', []),
    )
Exemple #13
0
def check_logwatch_generic(item, patterns, loglines, found) -> CheckResult:
    logmsg_dir = pathlib.Path(cmk.utils.paths.var_dir, 'logwatch', host_name())

    logmsg_dir.mkdir(parents=True, exist_ok=True)

    logmsg_file_path = logmsg_dir / item.replace("/", "\\")

    # Logfile (=item) section not found and no local file found. This usually
    # means, that the corresponding logfile also vanished on the target host.
    if not found and not logmsg_file_path.exists():
        yield Result(state=state.UNKNOWN, summary="log not present anymore")
        return

    block_collector = LogwatchBlockCollector()
    current_block = None

    logmsg_file_exists = logmsg_file_path.exists()
    mode = 'r+' if logmsg_file_exists else 'w'
    try:
        logmsg_file_handle = logmsg_file_path.open(mode, encoding='utf-8')
    except IOError as exc:
        raise IOError("User %r cannot open file for writing: %s" %
                      (getpass.getuser(), exc)) from exc

    # TODO: repr() of a dict may change.
    pattern_hash = hashlib.sha256(repr(patterns).encode()).hexdigest()
    net_lines = 0
    # parse cached log lines
    if logmsg_file_exists:
        # new format contains hash of patterns on the first line so we only reclassify if they
        # changed
        initline = logmsg_file_handle.readline().rstrip('\n')
        if initline.startswith('[[[') and initline.endswith(']]]'):
            old_pattern_hash = initline[3:-3]
            skip_reclassification = old_pattern_hash == pattern_hash
        else:
            logmsg_file_handle.seek(0)
            skip_reclassification = False

        logfile_size = logmsg_file_path.stat().st_size
        if skip_reclassification and logfile_size > LOGWATCH_MAX_FILESIZE:
            # early out: without reclassification the file wont shrink and if it is already at
            # the maximum size, all input is dropped anyway
            if logfile_size > LOGWATCH_MAX_FILESIZE * 2:
                # if the file is far too large, truncate it
                truncate_by_line(logmsg_file_path, LOGWATCH_MAX_FILESIZE)
            yield _dropped_msg_result(LOGWATCH_MAX_FILESIZE)
            return

        for line in logmsg_file_handle:
            line = line.rstrip('\n')
            # Skip empty lines
            if not line:
                continue
            if line.startswith('<<<') and line.endswith('>>>'):
                # The section is finished here. Add it to the list of reclassified lines if the
                # state of the block is not "I" -> "ignore"
                block_collector(current_block)
                current_block = LogwatchBlock(line, patterns)
            elif current_block is not None:
                current_block.add_line(line, skip_reclassification)
                net_lines += 1

        # The last section is finished here. Add it to the list of reclassified lines if the
        # state of the block is not "I" -> "ignore"
        block_collector(current_block)

        if skip_reclassification:
            output_size = logmsg_file_handle.tell()
            # when skipping reclassification, output lines contains only headers anyway
            block_collector.clear_lines()
        else:
            output_size = block_collector.size
    else:
        output_size = 0
        skip_reclassification = False

    header = time.strftime("<<<%Y-%m-%d %H:%M:%S UNKNOWN>>>\n")
    output_size += len(header)
    header = six.ensure_str(header)

    # process new input lines - but only when there is some room left in the file
    if output_size < LOGWATCH_MAX_FILESIZE:
        current_block = LogwatchBlock(header, patterns)
        for line in loglines:
            current_block.add_line(line, False)
            net_lines += 1
            output_size += len(line.encode('utf-8'))
            if output_size >= LOGWATCH_MAX_FILESIZE:
                break
        block_collector(current_block)

    # when reclassifying, rewrite the whole file, outherwise append
    if not skip_reclassification and block_collector.get_lines():
        logmsg_file_handle.seek(0)
        logmsg_file_handle.truncate()
        logmsg_file_handle.write(u"[[[%s]]]\n" % pattern_hash)

    for line in block_collector.get_lines():
        logmsg_file_handle.write(line)
    # correct output size
    logmsg_file_handle.close()
    if net_lines == 0 and logmsg_file_exists:
        logmsg_file_path.unlink()

    # if logfile has reached maximum size, abort with critical state
    if logmsg_file_path.exists() and logmsg_file_path.stat().st_size > LOGWATCH_MAX_FILESIZE:
        yield _dropped_msg_result(LOGWATCH_MAX_FILESIZE)
        return

    #
    # Render output
    #

    if block_collector.worst <= 0:
        yield Result(state=state.OK, summary="No error messages")
        return

    info = block_collector.get_count_info()
    if LOGWATCH_SERVICE_OUTPUT == 'default':
        info += ' (Last worst: "%s")' % block_collector.last_worst_line

    summary, details = info, None
    if '\n' in info.strip():
        summary, details = info.split('\n', 1)

    yield Result(
        state=state(block_collector.worst),
        summary=summary,
        details=details,
    )
Exemple #14
0
def get_ec_rule_params():
    """Isolate the remaining API violation w.r.t. parameters"""
    return host_extra_conf(
        host_name(),
        get_checkgroup_parameters('logwatch_ec', []),
    )
Exemple #15
0
def _get_configured_only_from() -> Union[None, str, list[str]]:
    return config.HostConfig.make_host_config(host_name()).only_from
Exemple #16
0
def logwatch_spool_path() -> Path:
    return Path(cmk.utils.paths.var_dir, "logwatch_spool", host_name())
Exemple #17
0
def check_logwatch_ec_common(
    item: Optional[str],
    params: Mapping[str, Any],
    parsed: ClusterSection,
    *,
    service_level: int,
) -> CheckResult:
    yield from logwatch.check_errors(parsed)

    if item:
        # If this check has an item (logwatch.ec_single), only forward the information from this log
        if not any(item in node_data.logfiles for node_data in parsed.values()
                   ) or not logwatch.ec_forwarding_enabled(params, item):
            return
        used_logfiles = [item]
    else:
        # Filter logfiles if some should be excluded
        used_logfiles = [
            name for node_data in parsed.values()
            for name in node_data.logfiles
            if logwatch.ec_forwarding_enabled(params, name)
        ]

    # Check if the number of expected files matches the actual one
    if params.get("monitor_logfilelist"):
        if "expected_logfiles" not in params:
            yield Result(
                state=state.WARN,
                summary=(
                    "You enabled monitoring the list of forwarded logfiles. "
                    "You need to redo service discovery."),
            )
        else:
            expected = params["expected_logfiles"]
            missing = [f for f in expected if f not in used_logfiles]
            if missing:
                yield Result(
                    state=state.WARN,
                    summary="Missing logfiles: %s" % (", ".join(missing)),
                )

            exceeding = [f for f in used_logfiles if f not in expected]
            if exceeding:
                yield Result(
                    state=state.WARN,
                    summary="Newly appeared logfiles: %s" %
                    (", ".join(exceeding)),
                )

    # 3. create syslog message of each line
    # <128> Oct 24 10:44:27 Klappspaten /var/log/syslog: Oct 24 10:44:27 Klappspaten logger: asdasas
    # <facility+priority> timestamp hostname logfile: message
    facility = params.get("facility", 17)  # default to "local1"
    syslog_messages = []
    cur_time = int(time.time())

    forwarded_logfiles = set([])

    # Keep track of reclassifed lines
    rclfd_total = 0
    rclfd_to_ignore = 0

    logfile_reclassify_settings: Dict[str, Any] = {}

    def add_reclassify_settings(settings):
        if isinstance(settings, dict):
            logfile_reclassify_settings["reclassify_patterns"].extend(
                settings.get("reclassify_patterns", []))
            if "reclassify_states" in settings:
                logfile_reclassify_settings["reclassify_states"] = settings[
                    "reclassify_states"]
        else:  # legacy configuration
            logfile_reclassify_settings["reclassify_patterns"].extend(settings)

    for logfile in used_logfiles:
        lines = _filter_accumulated_lines(parsed, logfile)

        logfile_reclassify_settings["reclassify_patterns"] = []
        logfile_reclassify_settings["reclassify_states"] = {}

        # Determine logwatch patterns specifically for this logfile
        if params.get("logwatch_reclassify"):
            logfile_settings = service_extra_conf(
                HostName(host_name()),
                logfile,
                cmk.base.config.logwatch_rules,
            )
            for settings in logfile_settings:
                add_reclassify_settings(settings)

        for line in lines:
            rclfd_level = None
            if logfile_reclassify_settings:
                old_level, _text = line.split(" ", 1)
                level = logwatch.reclassify(Counter(),
                                            logfile_reclassify_settings,
                                            line[2:], old_level)
                if level != old_level:
                    rclfd_total += 1
                    rclfd_level = level
                    if level == "I":  # Ignored lines are not forwarded
                        rclfd_to_ignore += 1
                        continue

            syslog_messages.append(
                SyslogMessage(
                    facility=facility,
                    severity=logwatch_to_prio(rclfd_level or line[0]),
                    timestamp=cur_time,
                    host_name=host_name(),
                    application=logfile,
                    text=line[2:],
                    service_level=service_level,
                ))
            forwarded_logfiles.add(logfile)

    try:
        if forwarded_logfiles:
            logfile_info = " from " + ",".join(forwarded_logfiles)
        else:
            logfile_info = ""

        result = logwatch_forward_messages(params.get("method"), item,
                                           syslog_messages)

        yield Result(
            state=state.OK,
            summary="Forwarded %d messages%s" %
            (result.num_forwarded, logfile_info),
        )
        yield Metric("messages", result.num_forwarded)

        exc_txt = " (%s)" % result.exception if result.exception else ""

        if result.num_spooled:
            yield Result(
                state=state.WARN,
                summary="Spooled %d messages%s" %
                (result.num_spooled, exc_txt),
            )

        if result.num_dropped:
            yield Result(
                state=state.CRIT,
                summary="Dropped %d messages%s" %
                (result.num_dropped, exc_txt),
            )

    except Exception as exc:
        if cmk.utils.debug.enabled():
            raise
        yield Result(
            state=state.CRIT,
            summary="Failed to forward messages (%s). Lost %d messages." %
            (exc, len(syslog_messages)),
        )

    if rclfd_total:
        yield Result(
            state=state.OK,
            summary=
            "Reclassified %d messages through logwatch patterns (%d to IGNORE)"
            % (rclfd_total, rclfd_to_ignore),
        )
Exemple #18
0
def logwatch_spool_path():
    return cmk.utils.paths.var_dir + "/logwatch_spool/" + host_name()
Exemple #19
0
def discover_single_items(discovery_rules):
    config = host_extra_conf_merged(host_name(), discovery_rules)
    mode = config.get("mode", "single")
    return mode == "single"
Exemple #20
0
def check_logwatch_generic(
    *,
    item: str,
    patterns,
    loglines,
    found: bool,
    max_filesize: int,
) -> CheckResult:
    logmsg_dir = pathlib.Path(cmk.utils.paths.var_dir, "logwatch", host_name())

    logmsg_dir.mkdir(parents=True, exist_ok=True)

    logmsg_file_path = logmsg_dir / item.replace("/", "\\")

    # Logfile (=item) section not found and no local file found. This usually
    # means, that the corresponding logfile also vanished on the target host.
    if not found and not logmsg_file_path.exists():
        yield Result(state=state.UNKNOWN, summary="log not present anymore")
        return

    block_collector = LogwatchBlockCollector()

    logmsg_file_exists = logmsg_file_path.exists()
    logmsg_file_handle = logmsg_file_path.open(
        "r+" if logmsg_file_exists else "w", encoding="utf-8")

    # TODO: repr() of a dict may change.
    pattern_hash = hashlib.sha256(repr(patterns).encode()).hexdigest()
    if not logmsg_file_exists:
        output_size = 0
        reclassify = True
    else:  # parse cached log lines
        reclassify = _patterns_changed(logmsg_file_handle, pattern_hash)

        if not reclassify and _truncate_way_too_large_result(
                logmsg_file_path, max_filesize):
            yield _dropped_msg_result(max_filesize)
            return

        block_collector.extend(
            _extract_blocks(logmsg_file_handle, patterns, reclassify))

        if reclassify:
            output_size = block_collector.size
        else:
            output_size = logmsg_file_handle.tell()
            # when skipping reclassification, output lines contain only headers anyway
            block_collector.clear_lines()

    header = time.strftime("<<<%Y-%m-%d %H:%M:%S UNKNOWN>>>\n")
    output_size += len(header)
    header = six.ensure_str(header)

    # process new input lines - but only when there is some room left in the file
    block_collector.extend(
        _extract_blocks([header] + loglines,
                        patterns,
                        False,
                        limit=max_filesize - output_size))

    # when reclassifying, rewrite the whole file, otherwise append
    if reclassify and block_collector.get_lines():
        logmsg_file_handle.seek(0)
        logmsg_file_handle.truncate()
        logmsg_file_handle.write("[[[%s]]]\n" % pattern_hash)

    for line in block_collector.get_lines():
        logmsg_file_handle.write(line)
    # correct output size
    logmsg_file_handle.close()

    if not block_collector.saw_lines:
        logmsg_file_path.unlink(missing_ok=True)

    # if logfile has reached maximum size, abort with critical state
    if logmsg_file_path.exists(
    ) and logmsg_file_path.stat().st_size > max_filesize:
        yield _dropped_msg_result(max_filesize)
        return

    #
    # Render output
    #

    if block_collector.worst <= 0:
        yield Result(state=state.OK, summary="No error messages")
        return

    info = block_collector.get_count_info()
    if LOGWATCH_SERVICE_OUTPUT == "default":
        info += ' (Last worst: "%s")' % block_collector.last_worst_line

    summary, details = info, None
    if "\n" in info.strip():
        summary, details = info.split("\n", 1)

    yield Result(
        state=state(block_collector.worst),
        summary=summary,
        details=details,
    )
Exemple #21
0
def _logmsg_file_path(item: str) -> pathlib.Path:
    logmsg_dir = pathlib.Path(cmk.utils.paths.var_dir, "logwatch", host_name())
    logmsg_dir.mkdir(parents=True, exist_ok=True)
    return logmsg_dir / item.replace("/", "\\")