Exemple #1
0
def update_dns_cache():
    failed = []

    ip_lookup_cache = _get_ip_lookup_cache()
    ip_lookup_cache.persist_on_update = False

    console.verbose("Cleaning up existing DNS cache...\n")
    _clear_ip_lookup_cache(ip_lookup_cache)

    console.verbose("Updating DNS cache...\n")
    for hostname, family in _get_dns_cache_lookup_hosts():
        console.verbose("%s (IPv%d)..." % (hostname, family))
        try:
            ip = lookup_ip_address(hostname, family)
            console.verbose("%s\n" % ip)

        except (MKTerminate, MKTimeout):
            # We should be more specific with the exception handler below, then we
            # could drop this special handling here
            raise

        except Exception as e:
            failed.append(hostname)
            console.verbose("lookup failed: %s\n" % e)
            if cmk.utils.debug.enabled():
                raise
            continue

    ip_lookup_cache.persist_on_update = True
    ip_lookup_cache.save_persisted()

    return len(ip_lookup_cache), failed
Exemple #2
0
def _cleanup_old_piggybacked_files(time_settings):
    # type: (Dict[Tuple[Optional[str], str], int]) -> None
    """Remove piggybacked data files which exceed configured maximum cache age."""

    base_dir = str(cmk.utils.paths.piggyback_dir)
    for piggybacked_hostname in os.listdir(base_dir):
        if piggybacked_hostname[0] == ".":
            continue

        # Cleanup piggyback files from sources that we have no status file for
        backed_host_dir_path = os.path.join(base_dir, piggybacked_hostname)
        for source_hostname in os.listdir(backed_host_dir_path):
            if source_hostname[0] == ".":
                continue

            piggyback_file_path = os.path.join(backed_host_dir_path, source_hostname)

            successfully_processed, reason, _reason_status =\
                _get_piggyback_processed_file_info(source_hostname, piggybacked_hostname, piggyback_file_path, time_settings)

            if not successfully_processed:
                console.verbose("Piggyback file '%s' is outdated (%s). Remove it.\n" %
                                (piggyback_file_path, reason))
                _remove_piggyback_file(piggyback_file_path)

        # Remove empty backed host directory
        try:
            os.rmdir(backed_host_dir_path)
        except OSError as e:
            if e.errno == errno.ENOTEMPTY:
                pass
            else:
                raise
        else:
            console.verbose("Piggyback folder '%s' is empty. Remove it.\n" % backed_host_dir_path)
Exemple #3
0
def get_piggyback_raw_data(piggyback_max_cachefile_age, hostname):
    """Returns the usable piggyback data for the given host

    A list of two element tuples where the first element is
    the source host name and the second element is the raw
    piggyback data (byte string)
    """
    if not hostname:
        return []

    piggyback_data = []
    for source_host, piggyback_file_path in get_piggyback_files(
            piggyback_max_cachefile_age, hostname):
        try:
            raw_data = file(piggyback_file_path).read()
        except IOError as e:
            console.verbose(
                "Cannot read piggyback raw data from host %s: %s\n" %
                (source_host, e))
            continue

        console.verbose("Using piggyback raw data from host %s.\n" %
                        source_host)
        piggyback_data.append((source_host, raw_data))

    return piggyback_data
Exemple #4
0
def _execute_walks_for_dump(snmp_config, oids):
    for oid in oids:
        try:
            console.verbose("Walk on \"%s\"..." % oid)
            yield walk_for_export(snmp_config, oid)
        except Exception as e:
            console.error("Error: %s\n" % e)
            if cmk.utils.debug.enabled():
                raise
Exemple #5
0
def cached_dns_lookup(hostname, family):
    # type: (str, int) -> Optional[str]
    cache = cmk_base.config_cache.get_dict("cached_dns_lookup")
    cache_id = hostname, family

    # Address has already been resolved in prior call to this function?
    try:
        return cache[cache_id]
    except KeyError:
        pass

    # Prepare file based fall-back DNS cache in case resolution fails
    # TODO: Find a place where this only called once!
    ip_lookup_cache = _initialize_ip_lookup_cache()

    cached_ip = ip_lookup_cache.get(cache_id)
    if cached_ip and config.use_dns_cache:
        cache[cache_id] = cached_ip
        return cached_ip

    host_config = config.get_config_cache().get_host_config(hostname)

    if host_config.is_no_ip_host:
        cache[cache_id] = None
        return None

    # Now do the actual DNS lookup
    try:
        ipa = socket.getaddrinfo(
            hostname, None, family == 4 and socket.AF_INET
            or socket.AF_INET6)[0][4][0]

        # Update our cached address if that has changed or was missing
        if ipa != cached_ip:
            console.verbose("Updating IPv%d DNS cache for %s: %s\n" %
                            (family, hostname, ipa))
            _update_ip_lookup_cache(cache_id, ipa)

        cache[cache_id] = ipa  # Update in-memory-cache
        return ipa

    except (MKTerminate, MKTimeout):
        # We should be more specific with the exception handler below, then we
        # could drop this special handling here
        raise

    except Exception as e:
        # DNS failed. Use cached IP address if present, even if caching
        # is disabled.
        if cached_ip:
            cache[cache_id] = cached_ip
            return cached_ip
        else:
            cache[cache_id] = None
            raise MKIPAddressLookupError(
                "Failed to lookup IPv%d address of %s via DNS: %s" %
                (family, hostname, e))
Exemple #6
0
def _get_cached_snmpwalk(hostname, fetchoid):
    path = _snmpwalk_cache_path(hostname, fetchoid)
    try:
        console.vverbose("  Loading %s from walk cache %s\n" % (fetchoid, path))
        return store.load_data_from_file(path)
    except Exception:
        if cmk.utils.debug.enabled():
            raise
        console.verbose("  Failed loading walk cache. Continue without it.\n" % path)
        return None
Exemple #7
0
        def wrapped_check_func(hostname, *args, **kwargs):
            host_config = config.get_config_cache().get_host_config(hostname)
            exit_spec = host_config.exit_code_spec()

            status, infotexts, long_infotexts, perfdata = 0, [], [], []

            try:
                status, infotexts, long_infotexts, perfdata = check_func(
                    hostname, *args, **kwargs)

            except SystemExit:
                raise

            except MKTimeout:
                if _in_keepalive_mode():
                    raise
                else:
                    infotexts.append("Timed out")
                    status = max(status, exit_spec.get("timeout", 2))

            except (MKAgentError, MKSNMPError, MKIPAddressLookupError) as e:
                infotexts.append("%s" % e)
                status = exit_spec.get("connection", 2)

            except MKGeneralException as e:
                infotexts.append("%s" % e)
                status = max(status, exit_spec.get("exception", 3))

            except Exception:
                if cmk.utils.debug.enabled():
                    raise
                crash_output = cmk_base.crash_reporting.create_crash_dump(
                    hostname, check_plugin_name, None, False, None,
                    description, [])
                infotexts.append(
                    crash_output.replace("Crash dump:\n", "Crash dump:\\n"))
                status = max(status, exit_spec.get("exception", 3))

            # Produce the service check result output
            output_txt = "%s - %s" % (defines.short_service_state_name(status),
                                      ", ".join(infotexts))
            if perfdata:
                output_txt += " | %s" % " ".join(perfdata)
            if long_infotexts:
                output_txt = "%s\n%s" % (output_txt, "\n".join(long_infotexts))
            output_txt += "\n"

            if _in_keepalive_mode():
                keepalive.add_keepalive_active_check_result(
                    hostname, output_txt)
                console.verbose(output_txt.encode("utf-8"))
            else:
                console.output(output_txt.encode("utf-8"))

            return status
Exemple #8
0
    def walk(self, snmp_config, oid, check_plugin_name=None, table_base_oid=None,
             context_name=None):
        # type: (snmp_utils.SNMPHostConfig, str, Optional[str], Optional[str], Optional[str]) -> snmp_utils.SNMPRowInfo
        protospec = self._snmp_proto_spec(snmp_config)

        ipaddress = snmp_config.ipaddress
        if snmp_config.is_ipv6_primary:
            ipaddress = "[" + ipaddress + "]"

        portspec = self._snmp_port_spec(snmp_config)
        command = self._snmp_walk_command(snmp_config, context_name)
        command += ["-OQ", "-OU", "-On", "-Ot", "%s%s%s" % (protospec, ipaddress, portspec), oid]

        # list2cmdline exists, but mypy complains
        console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command))  # type: ignore

        snmp_process = None
        exitstatus = None
        rowinfo = []  # type: snmp_utils.SNMPRowInfo
        try:
            snmp_process = subprocess.Popen(
                command,
                close_fds=True,
                stdin=open(os.devnull),
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)

            rowinfo = self._get_rowinfo_from_snmp_process(snmp_process)

        except MKTimeout:
            # On timeout exception try to stop the process to prevent child process "leakage"
            if snmp_process:
                os.kill(snmp_process.pid, signal.SIGTERM)
                snmp_process.wait()
            raise

        finally:
            # The stdout and stderr pipe are not closed correctly on a MKTimeout
            # Normally these pipes getting closed after p.communicate finishes
            # Closing them a second time in a OK scenario won't hurt neither..
            if snmp_process:
                exitstatus = snmp_process.wait()
                if snmp_process.stderr:
                    error = snmp_process.stderr.read()
                    snmp_process.stderr.close()
                if snmp_process.stdout:
                    snmp_process.stdout.close()

        if exitstatus:
            console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal +
                            "SNMP error: %s\n" % error.strip())
            raise MKSNMPError(
                "SNMP Error on %s: %s (Exit-Code: %d)" % (ipaddress, error.strip(), exitstatus))
        return rowinfo
Exemple #9
0
def _output_check_result(servicedesc, state, infotext, perftexts):
    if _show_perfdata:
        infotext_fmt = "%-56s"
        p = ' (%s)' % (" ".join(perftexts))
    else:
        p = ''
        infotext_fmt = "%s"

    console.verbose("%-20s %s%s" + infotext_fmt + "%s%s\n",
                    servicedesc.encode('utf-8'), tty.bold, tty.states[state],
                    cmk_base.utils.make_utf8(infotext.split('\n')[0]),
                    tty.normal, cmk_base.utils.make_utf8(p))
Exemple #10
0
def run_inv_export_hooks(hostname, tree):
    for hookname, ruleset in inv_exports.items():
        entries = host_extra_conf(hostname, ruleset)
        if entries:
            console.verbose(", running %s%s%s%s..." % (tty.blue, tty.bold, hookname, tty.normal))
            params = entries[0]
            try:
                inv_export[hookname]["export_function"](hostname, params, tree)
            except Exception, e:
                if cmk.debug.enabled():
                    raise
                raise MKGeneralException("Failed to execute export hook %s: %s" % (hookname, e))
Exemple #11
0
def snmpwalk_on_suboid(hostname, ip, oid, hex_plain = False, context_name = None):
    protospec = snmp_proto_spec(hostname)
    portspec = snmp_port_spec(hostname)
    command = snmp_walk_command(hostname)
    if context_name != None:
        command += [ "-n", context_name ]
    command += [ "-OQ", "-OU", "-On", "-Ot", "%s%s%s" % (protospec, ip, portspec), oid ]

    debug_cmd = [ "''" if a == "" else a for a in command ]
    console.vverbose("Running '%s'\n" % " ".join(debug_cmd))

    snmp_process = subprocess.Popen(command, close_fds=True, stdin=open(os.devnull),
                                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    # Ugly(1): in some cases snmpwalk inserts line feed within one
    # dataset. This happens for example on hexdump outputs longer
    # than a few bytes. Those dumps are enclosed in double quotes.
    # So if the value begins with a double quote, but the line
    # does not end with a double quote, we take the next line(s) as
    # a continuation line.
    rowinfo = []
    try:
        line_iter = snmp_process.stdout.xreadlines()
        while True:
            line = line_iter.next().strip()
            parts = line.split('=', 1)
            if len(parts) < 2:
                continue # broken line, must contain =
            oid = parts[0].strip()
            value = parts[1].strip()
            # Filter out silly error messages from snmpwalk >:-P
            if value.startswith('No more variables') or value.startswith('End of MIB') \
               or value.startswith('No Such Object available') or value.startswith('No Such Instance currently exists'):
                continue

            if value == '"' or (len(value) > 1 and value[0] == '"' and (value[-1] != '"')): # to be continued
                while True: # scan for end of this dataset
                    nextline = line_iter.next().strip()
                    value += " " + nextline
                    if value[-1] == '"':
                        break
            rowinfo.append((oid, strip_snmp_value(value, hex_plain)))

    except StopIteration:
        pass

    error = snmp_process.stderr.read()
    exitstatus = snmp_process.wait()
    if exitstatus:
        console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal + "SNMP error: %s\n" % error.strip())
        raise MKSNMPError("SNMP Error on %s: %s (Exit-Code: %d)" % (ip, error.strip(), exitstatus))
    return rowinfo
Exemple #12
0
def get_cached_snmpwalk(hostname, fetchoid):
    path = cmk.paths.var_dir + "/snmp_cache/" + hostname + "/" + fetchoid

    try:
        console.vverbose("  Loading %s from walk cache %s\n" % (fetchoid, path))
        return eval(file(path).read())
    except IOError:
        return None # don't print error when not cached yet
    except:
        if cmk.debug.enabled():
            raise
        console.verbose("Failed to read cached SNMP walk from %s, ignoring.\n" % path)
        return None
Exemple #13
0
def cleanup_piggyback_files(time_settings):
    # type: (Dict[Tuple[Optional[str], str], int]) -> None
    """This is a housekeeping job to clean up different old files from the
    piggyback directories.

    # Source status files and/or piggybacked data files are cleaned up/deleted
    # if and only if they have exceeded the maximum cache age configured in the
    # global settings or in the rule 'Piggybacked Host Files'."""

    console.verbose("Cleanup piggyback files; time settings: %s.\n" % repr(time_settings))

    _cleanup_old_source_status_files(time_settings)
    _cleanup_old_piggybacked_files(time_settings)
Exemple #14
0
def run_inv_export_hooks(hostname, tree):
    for hookname, ruleset in inv_exports.items():
        entries = host_extra_conf(hostname, ruleset)
        if entries:
            console.verbose(", running %s%s%s%s..." %
                            (tty.blue, tty.bold, hookname, tty.normal))
            params = entries[0]
            try:
                inv_export[hookname]["export_function"](hostname, params, tree)
            except Exception, e:
                if cmk.debug.enabled():
                    raise
                raise MKGeneralException(
                    "Failed to execute export hook %s: %s" % (hookname, e))
Exemple #15
0
def do_inv_for(hostname):
    try:
        ipaddress = lookup_ip_address(hostname)
    except:
        raise MKGeneralException("Cannot resolve hostname '%s'." % hostname)

    global g_inv_tree
    g_inv_tree = {}

    # If this is an SNMP host then determine the SNMP sections
    # that this device supports.
    if is_snmp_host(hostname):
        snmp_check_types = snmp_scan(hostname, ipaddress, for_inv=True)
    else:
        snmp_check_types = []

    for info_type, plugin in inv_info.items():
        # Skip SNMP sections that are not supported by this device
        use_caches = True
        if check_uses_snmp(info_type):
            use_caches = False
            if info_type not in snmp_check_types:
                continue

        try:
            info = get_info_for_discovery(hostname,
                                          ipaddress,
                                          info_type,
                                          use_caches=use_caches)
        except Exception, e:
            if str(e):
                raise  # Otherwise simply ignore missing agent section
            continue

        if not info:  # section not present (None or [])
            # Note: this also excludes existing sections without info..
            continue

        console.verbose(tty.green + tty.bold + info_type + " " + tty.normal)

        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        inv_function = plugin["inv_function"]
        if len(inspect.getargspec(inv_function).args) == 2:
            params = get_inv_params(hostname, info_type)
            inv_function(info, params)
        else:
            inv_function(info)
Exemple #16
0
def update_dns_cache():
    updated = 0
    failed = []

    console.verbose("Cleaning up existing DNS cache...\n")
    try:
        os.unlink(_cache_path())
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise

    config_cache = config.get_config_cache()

    console.verbose("Updating DNS cache...\n")
    for hostname in config_cache.all_active_hosts():
        host_config = config_cache.get_host_config(hostname)

        # Use intelligent logic. This prevents DNS lookups for hosts
        # with statically configured addresses, etc.
        for family in [4, 6]:
            if (family == 4 and host_config.is_ipv4_host) \
               or (family == 6 and host_config.is_ipv6_host):
                console.verbose("%s (IPv%d)..." % (hostname, family))
                try:
                    if family == 4:
                        ip = lookup_ipv4_address(hostname)
                    else:
                        ip = lookup_ipv6_address(hostname)

                    console.verbose("%s\n" % ip)
                    updated += 1

                except (MKTerminate, MKTimeout):
                    # We should be more specific with the exception handler below, then we
                    # could drop this special handling here
                    raise

                except Exception as e:
                    failed.append(hostname)
                    console.verbose("lookup failed: %s\n" % e)
                    if cmk.utils.debug.enabled():
                        raise
                    continue

    # TODO: After calculation the cache needs to be written once

    return updated, failed
Exemple #17
0
def _cleanup_old_source_status_files(piggyback_max_cachefile_age):
    base_dir = str(cmk.utils.paths.piggyback_source_dir)
    for entry in os.listdir(base_dir):
        if entry[0] == ".":
            continue

        piggyback_file_path = os.path.join(base_dir, entry)

        try:
            file_age = cmk_base.utils.cachefile_age(piggyback_file_path)
        except MKGeneralException:
            continue  # File might've been deleted. That's ok.

        if file_age > piggyback_max_cachefile_age:
            console.verbose("Removing outdated piggyback source status file %s\n" %
                            piggyback_file_path)
            _remove_piggyback_file(piggyback_file_path)
Exemple #18
0
def try_get_activation_lock():
    global _restart_lock_fd
    # In some bizarr cases (as cmk -RR) we need to avoid duplicate locking!
    if config.restart_locking and _restart_lock_fd is None:
        lock_file = cmk.utils.paths.default_config_dir + "/main.mk"
        _restart_lock_fd = os.open(lock_file, os.O_RDONLY)
        # Make sure that open file is not inherited to monitoring core!
        fcntl.fcntl(_restart_lock_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
        try:
            console.verbose("Waiting for exclusive lock on %s.\n" % lock_file,
                            stream=sys.stderr)
            fcntl.flock(
                _restart_lock_fd, fcntl.LOCK_EX |
                (config.restart_locking == "abort" and fcntl.LOCK_NB or 0))
        except:
            return True
    return False
Exemple #19
0
def do_inv_for(hostname):
    try:
        ipaddress = lookup_ip_address(hostname)
    except:
        raise MKGeneralException("Cannot resolve hostname '%s'." % hostname)

    global g_inv_tree
    g_inv_tree = {}

    # If this is an SNMP host then determine the SNMP sections
    # that this device supports.
    if is_snmp_host(hostname):
        snmp_check_types = snmp_scan(hostname, ipaddress, for_inv=True)
    else:
        snmp_check_types = []

    for info_type, plugin in inv_info.items():
        # Skip SNMP sections that are not supported by this device
        use_caches = True
        if check_uses_snmp(info_type):
            use_caches = False
            if info_type not in snmp_check_types:
                continue

        try:
            info = get_info_for_discovery(hostname, ipaddress, info_type, use_caches=use_caches)
        except Exception, e:
            if str(e):
                raise  # Otherwise simply ignore missing agent section
            continue

        if not info:  # section not present (None or [])
            # Note: this also excludes existing sections without info..
            continue

        console.verbose(tty.green + tty.bold + info_type + " " + tty.normal)

        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        inv_function = plugin["inv_function"]
        if len(inspect.getargspec(inv_function).args) == 2:
            params = get_inv_params(hostname, info_type)
            inv_function(info, params)
        else:
            inv_function(info)
Exemple #20
0
def precompile_hostchecks():
    console.verbose("Creating precompiled host check config...\n")
    config.PackedConfig().save()

    if not os.path.exists(cmk.utils.paths.precompiled_hostchecks_dir):
        os.makedirs(cmk.utils.paths.precompiled_hostchecks_dir)

    config_cache = config.get_config_cache()

    console.verbose("Precompiling host checks...\n")
    for host in config_cache.all_active_hosts():
        try:
            _precompile_hostcheck(config_cache, host)
        except Exception as e:
            if cmk.utils.debug.enabled():
                raise
            console.error("Error precompiling checks for host %s: %s\n" % (host, e))
            sys.exit(5)
Exemple #21
0
def store_piggyback_raw_data(source_host, piggybacked_raw_data):
    piggyback_file_paths = []
    for piggybacked_host, lines in piggybacked_raw_data.items():
        piggyback_file_path = str(cmk.utils.paths.piggyback_dir / piggybacked_host / source_host)
        console.verbose("Storing piggyback data for: %s\n" % piggybacked_host)
        content = "\n".join(lines) + "\n"
        store.save_file(piggyback_file_path, content)
        piggyback_file_paths.append(piggyback_file_path)

    # Store the last contact with this piggyback source to be able to filter outdated data later
    # We use the mtime of this file later for comparison.
    # Only do this for hosts that sent piggyback data this turn, cleanup the status file when no
    # piggyback data was sent this turn.
    if piggybacked_raw_data:
        status_file_path = _piggyback_source_status_path(source_host)
        _store_status_file_of(status_file_path, piggyback_file_paths)
    else:
        remove_source_status_file(source_host)
Exemple #22
0
def _run_inventory_export_hooks(host_config, inventory_tree):
    import cmk_base.inventory_plugins as inventory_plugins
    hooks = host_config.inventory_export_hooks

    if not hooks:
        return

    console.step("Execute inventory export hooks")
    for hookname, params in hooks:
        console.verbose("Execute export hook: %s%s%s%s" %
                        (tty.blue, tty.bold, hookname, tty.normal))
        try:
            func = inventory_plugins.inv_export[hookname]["export_function"]
            func(host_config.hostname, params, inventory_tree.get_raw_tree())
        except Exception as e:
            if cmk.utils.debug.enabled():
                raise
            raise MKGeneralException("Failed to execute export hook %s: %s" % (hookname, e))
Exemple #23
0
def _cleanup_old_piggybacked_files(piggyback_max_cachefile_age):
    # type: (int) -> None
    """Remove piggyback data that is not needed anymore

    The monitoring (_get_piggyback_files()) is already skipping these files,
    but we need some cleanup mechanism.

    - Remove all piggyback files created by sources without status file
    - Remove all piggyback files that are older that the current status file of the source host
    - Cleanup empty backed host directories below "piggyback"
    """
    keep_sources = set(os.listdir(str(cmk.utils.paths.piggyback_source_dir)))

    base_dir = os.path.join(cmk.utils.paths.tmp_dir, "piggyback")
    for backed_host_name in os.listdir(base_dir):
        if backed_host_name[0] == ".":
            continue

        # Cleanup piggyback files from sources that we have no status file for
        backed_host_dir_path = os.path.join(base_dir, backed_host_name)
        for source_host_name in os.listdir(backed_host_dir_path):
            if source_host_name[0] == ".":
                continue

            piggyback_file_path = os.path.join(backed_host_dir_path,
                                               source_host_name)

            delete_reason = _shall_cleanup_piggyback_file(
                piggyback_max_cachefile_age, piggyback_file_path,
                source_host_name, keep_sources)
            if delete_reason:
                console.verbose("Removing outdated piggyback file (%s) %s\n" %
                                (delete_reason, piggyback_file_path))
                _remove_piggyback_file(piggyback_file_path)

        # Remove empty backed host directory
        try:
            os.rmdir(backed_host_dir_path)
        except OSError as e:
            if e.errno == errno.ENOTEMPTY:
                pass
            else:
                raise
Exemple #24
0
def _cleanup_old_source_status_files(time_settings):
    # type: (Dict[Tuple[Optional[str], str], int]) -> None
    """Remove source status files which exceed configured maximum cache age.
    There may be several 'Piggybacked Host Files' rules where the max age is configured.
    We simply use the greatest one per source."""

    global_max_cache_age = time_settings[(None, 'max_cache_age')]  # type: int

    max_cache_age_by_sources = {}  # type: Dict[str, int]
    for piggybacked_host_folder in _get_piggybacked_host_folders():
        for source_host in _get_piggybacked_host_sources(
                piggybacked_host_folder):
            max_cache_age = _get_max_cache_age(source_host.name,
                                               piggybacked_host_folder.name,
                                               time_settings)
            max_cache_age_of_source = max_cache_age_by_sources.get(
                source_host.name)
            if max_cache_age_of_source is None:
                max_cache_age_by_sources[source_host.name] = max_cache_age

            elif max_cache_age >= max_cache_age_of_source:
                max_cache_age_by_sources[source_host.name] = max_cache_age

    base_dir = str(cmk.utils.paths.piggyback_source_dir)
    for entry in os.listdir(base_dir):
        if entry[0] == ".":
            continue

        source_file_path = os.path.join(base_dir, entry)

        try:
            file_age = cmk_base.utils.cachefile_age(source_file_path)
        except MKGeneralException:
            continue  # File might've been deleted. That's ok.

        max_cache_age = max_cache_age_by_sources.get(entry,
                                                     global_max_cache_age)
        if file_age > max_cache_age:
            console.verbose(
                "Piggyback source status file '%s' is outdated (File too old: %s). Remove it.\n"
                % (source_file_path, Age(file_age - max_cache_age)))
            _remove_piggyback_file(source_file_path)
Exemple #25
0
def do_backup(tarname):
    console.verbose("Creating backup file '%s'...\n", tarname)
    tar = tarfile.open(tarname, "w:gz")

    for name, path, canonical_name, descr, is_dir, in backup_paths():

        absdir = os.path.abspath(path)
        if os.path.exists(path):
            if is_dir:
                subtarname = name + ".tar"
                subfile = StringIO.StringIO()
                subtar = tarfile.open(mode="w",
                                      fileobj=subfile,
                                      dereference=True)
                subtar.add(path, arcname=".")
                subdata = subfile.getvalue()
            else:
                subtarname = canonical_name
                subdata = open(absdir).read()

            info = tarfile.TarInfo(subtarname)
            info.mtime = time.time()
            info.uid = 0
            info.gid = 0
            info.size = len(subdata)
            info.mode = 0o644
            info.type = tarfile.REGTYPE
            info.name = subtarname
            console.verbose("  Added %s (%s) with a size of %s\n", descr,
                            absdir, render.fmt_bytes(info.size))
            tar.addfile(info, StringIO.StringIO(subdata))

    tar.close()
    console.verbose("Successfully created backup.\n")
Exemple #26
0
def _get_piggyback_files(piggyback_max_cachefile_age, hostname):
    # type: (int, str) -> List[Tuple[str, str]]
    """Gather a list of piggyback files to read for further processing.

    Please note that there may be multiple parallel calls executing the
    _get_piggyback_files(), store_piggyback_raw_data() or cleanup_piggyback_files()
    functions. Therefor all these functions needs to deal with suddenly vanishing or
    updated files/directories.
    """
    files = []  # type: List[Tuple[str, str]]
    host_piggyback_dir = cmk.utils.paths.piggyback_dir / hostname

    # cleanup_piggyback_files() may remove stale piggyback files of one source
    # host and also the directory "hostname" when the last piggyback file for the
    # current host was removed. This may cause the os.listdir() to fail. We treat
    # this as regular case: No piggyback files for the current host.
    try:
        source_host_names = [e.name for e in host_piggyback_dir.iterdir()]
    except OSError as e:
        if e.errno == errno.ENOENT:
            return files
        else:
            raise

    for source_host in source_host_names:
        if source_host.startswith("."):
            continue

        piggyback_file_path = host_piggyback_dir / source_host

        try:
            file_age = cmk_base.utils.cachefile_age(str(piggyback_file_path))
        except MKGeneralException:
            continue  # File might've been deleted. That's ok.

        # Skip piggyback files that are outdated at all
        if file_age > piggyback_max_cachefile_age:
            console.verbose(
                "Piggyback file %s is outdated (%d seconds too old). Skip processing.\n"
                %
                (piggyback_file_path, file_age - piggyback_max_cachefile_age))
            continue

        status_file_path = _piggyback_source_status_path(source_host)
        if not os.path.exists(status_file_path):
            console.verbose(
                "Piggyback file %s is outdated (Source not sending piggyback). Skip processing.\n"
                % piggyback_file_path)
            continue

        if _is_piggyback_file_outdated(status_file_path,
                                       str(piggyback_file_path)):
            console.verbose(
                "Piggyback file %s is outdated (Not updated by source). Skip processing.\n"
                % piggyback_file_path)
            continue

        files.append((source_host, str(piggyback_file_path)))

    return files
Exemple #27
0
def _save_inventory_tree(hostname, inventory_tree):
    # type: (str, StructuredDataTree) -> Optional[float]
    cmk.utils.store.makedirs(cmk.utils.paths.inventory_output_dir)

    old_time = None
    filepath = cmk.utils.paths.inventory_output_dir + "/" + hostname
    if not inventory_tree.is_empty():
        old_tree = StructuredDataTree().load_from(filepath)
        old_tree.normalize_nodes()
        if old_tree.is_equal(inventory_tree):
            console.verbose("Inventory was unchanged\n")
        else:
            if old_tree.is_empty():
                console.verbose("New inventory tree\n")
            else:
                console.verbose("Inventory tree has changed\n")
                old_time = os.stat(filepath).st_mtime
                arcdir = "%s/%s" % (cmk.utils.paths.inventory_archive_dir,
                                    hostname)
                cmk.utils.store.makedirs(arcdir)
                os.rename(filepath, arcdir + ("/%d" % old_time))
            inventory_tree.save_to(cmk.utils.paths.inventory_output_dir,
                                   hostname)

    else:
        if os.path.exists(
                filepath
        ):  # Remove empty inventory files. Important for host inventory icon
            os.remove(filepath)
        if os.path.exists(filepath + ".gz"):
            os.remove(filepath + ".gz")

    return old_time
Exemple #28
0
def update_dns_cache():
    updated = 0
    failed = []

    console.verbose("Cleaning up existing DNS cache...\n")
    try:
        os.unlink(_cache_path())
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise

    config_cache = config.get_config_cache()

    console.verbose("Updating DNS cache...\n")
    for hostname in config_cache.all_active_hosts():
        host_config = config_cache.get_host_config(hostname)

        # Use intelligent logic. This prevents DNS lookups for hosts
        # with statically configured addresses, etc.
        for family in [4, 6]:
            if (family == 4 and host_config.is_ipv4_host) \
               or (family == 6 and host_config.is_ipv6_host):
                console.verbose("%s (IPv%d)..." % (hostname, family))
                try:
                    if family == 4:
                        ip = lookup_ipv4_address(hostname)
                    else:
                        ip = lookup_ipv6_address(hostname)

                    console.verbose("%s\n" % ip)
                    updated += 1
                except Exception as e:
                    failed.append(hostname)
                    console.verbose("lookup failed: %s\n" % e)
                    if cmk.utils.debug.enabled():
                        raise
                    continue

    return updated, failed
Exemple #29
0
def get_piggyback_raw_data(piggybacked_hostname, time_settings):
    # type: (str, Dict[Tuple[Optional[str], str], int]) -> List[PiggybackRawDataInfo]
    """Returns the usable piggyback data for the given host

    A list of two element tuples where the first element is
    the source host name and the second element is the raw
    piggyback data (byte string)
    """
    if not piggybacked_hostname:
        return []

    piggyback_file_infos = _get_piggyback_processed_file_infos(
        piggybacked_hostname, time_settings)
    if not piggyback_file_infos:
        console.verbose("No piggyback files for '%s'. Skip processing.\n" %
                        piggybacked_hostname)
        return []

    piggyback_data = []
    for file_info in piggyback_file_infos:
        try:
            raw_data = open(file_info.file_path).read()

        except IOError as e:
            reason = "Cannot read piggyback raw data from source '%s'" % file_info.source_hostname
            piggyback_raw_data = PiggybackRawDataInfo(
                source_hostname=file_info.source_hostname,
                file_path=file_info.file_path,
                successfully_processed=False,
                reason=reason,
                reason_status=0,
                raw_data='')
            console.verbose("Piggyback file '%s': %s, %s\n" %
                            (file_info.file_path, reason, e))

        else:
            piggyback_raw_data = PiggybackRawDataInfo(
                file_info.source_hostname, file_info.file_path,
                file_info.successfully_processed, file_info.reason,
                file_info.reason_status, raw_data)
            if file_info.successfully_processed:
                console.verbose("Piggyback file '%s': %s.\n" %
                                (file_info.file_path, file_info.reason))
            else:
                console.verbose(
                    "Piggyback file '%s' is outdated (%s). Skip processing.\n"
                    % (file_info.file_path, file_info.reason))
        piggyback_data.append(piggyback_raw_data)
    return piggyback_data
Exemple #30
0
def do_inv_for(hostname):
    global g_inv_tree
    g_inv_tree = {}

    node = inv_tree("software.applications.check_mk.cluster.")

    if is_cluster(hostname):
        node["is_cluster"] = True
        do_inv_for_cluster(hostname)
    else:
        node["is_cluster"] = False
        do_inv_for_realhost(hostname)

    # Remove empty paths
    inv_cleanup_tree(g_inv_tree)
    old_timestamp = save_inv_tree(hostname)

    console.verbose(
        "..%s%s%d%s entries" %
        (tty.bold, tty.yellow, count_nodes(g_inv_tree), tty.normal))

    run_inv_export_hooks(hostname, g_inv_tree)
    return g_inv_tree, old_timestamp
Exemple #31
0
def _do_snmpwalk_on(snmp_config, options, filename):
    console.verbose("%s:\n" % snmp_config.hostname)

    oids = oids_to_walk(options)

    with open(filename, "w") as out:
        for rows in _execute_walks_for_dump(snmp_config, oids):
            for oid, value in rows:
                out.write("%s %s\n" % (oid, value))
            console.verbose("%d variables.\n" % len(rows))

    console.verbose("Wrote fetched data to %s%s%s.\n" % (tty.bold, filename, tty.normal))
Exemple #32
0
    def get(self, snmp_config, oid, context_name=None):
        if oid.endswith(".*"):
            oid_prefix = oid[:-2]
            commandtype = "getnext"
        else:
            oid_prefix = oid
            commandtype = "get"

        protospec = self._snmp_proto_spec(snmp_config)
        ipaddress = snmp_config.ipaddress
        if snmp_config.is_ipv6_primary:
            ipaddress = "[" + ipaddress + "]"
        portspec = self._snmp_port_spec(snmp_config)
        command = self._snmp_base_command(commandtype, snmp_config, context_name) + \
                   [ "-On", "-OQ", "-Oe", "-Ot",
                     "%s%s%s" % (protospec, ipaddress, portspec),
                     oid_prefix ]

        console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command))

        snmp_process = subprocess.Popen(command,
                                        close_fds=True,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE)
        exitstatus = snmp_process.wait()
        if exitstatus:
            console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal +
                            "SNMP error\n")
            console.verbose(snmp_process.stderr.read() + "\n")
            return None

        line = snmp_process.stdout.readline().strip()
        if not line:
            console.verbose("Error in response to snmpget.\n")
            return None

        item, value = line.split("=", 1)
        value = value.strip()
        console.vverbose("SNMP answer: ==> [%s]\n" % value)
        if value.startswith('No more variables') or value.startswith('End of MIB') \
           or value.startswith('No Such Object available') or value.startswith('No Such Instance currently exists'):
            value = None

        # In case of .*, check if prefix is the one we are looking for
        if commandtype == "getnext" and not item.startswith(oid_prefix + "."):
            value = None

        # Strip quotes
        if value and value.startswith('"') and value.endswith('"'):
            value = value[1:-1]
        return value
Exemple #33
0
def save_inv_tree(hostname):
    if not os.path.exists(inventory_output_dir):
        os.makedirs(inventory_output_dir)

    old_time = None

    if inventory_pprint_output:
        r = pprint.pformat(g_inv_tree)
    else:
        r = repr(g_inv_tree)

    path = inventory_output_dir + "/" + hostname
    if g_inv_tree:
        old_tree = None
        if os.path.exists(path):
            try:
                old_tree = eval(file(path).read())
            except:
                pass

        if old_tree != g_inv_tree:
            if old_tree:
                console.verbose("..changed")
                old_time = os.stat(path).st_mtime
                arcdir = "%s/%s" % (inventory_archive_dir, hostname)
                if not os.path.exists(arcdir):
                    os.makedirs(arcdir)
                os.rename(path, arcdir + ("/%d" % old_time))
            else:
                console.verbose("..new")

            file(path, "w").write(r + "\n")
            gzip.open(path + ".gz", "w").write(r + "\n")
            # Inform Livestatus about the latest inventory update
            file(inventory_output_dir + "/.last", "w")
        else:
            console.verbose("..unchanged")

    else:
        if os.path.exists(path):  # Remove empty inventory files. Important for host inventory icon
            os.remove(path)
        if os.path.exists(path + ".gz"):
            os.remove(path + ".gz")

    return old_time
Exemple #34
0
def do_inv(hostnames):
    ensure_directory(inventory_output_dir)
    ensure_directory(inventory_archive_dir)

    # No hosts specified: do all hosts and force caching
    if hostnames == None:
        hostnames = all_active_realhosts()
        set_use_cachefile()

    errors = []
    for hostname in hostnames:
        try:
            console.verbose("Doing HW/SW-Inventory for %s..." % hostname)
            do_inv_for(hostname)
            console.verbose("..OK\n")
        except Exception, e:
            if cmk.debug.enabled():
                raise
            console.verbose("Failed: %s\n" % e)
            errors.append("Failed to inventorize %s: %s" % (hostname, e))
        cleanup_globals()
Exemple #35
0
        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        inv_function = plugin["inv_function"]
        if len(inspect.getargspec(inv_function).args) == 2:
            params = get_inv_params(hostname, info_type)
            inv_function(info, params)
        else:
            inv_function(info)

    extend_tree_with_check_mk_inventory_info(hostname)

    # Remove empty paths
    inv_cleanup_tree(g_inv_tree)
    old_timestamp = save_inv_tree(hostname)

    console.verbose("..%s%s%d%s entries" % (tty.bold, tty.yellow, count_nodes(g_inv_tree), tty.normal))

    run_inv_export_hooks(hostname, g_inv_tree)
    return g_inv_tree, old_timestamp


def extend_tree_with_check_mk_inventory_info(hostname):
    persisted_file = cmk.paths.omd_root + "/var/check_mk/persisted/%s" % hostname
    try:
        persisted_data = eval(file(persisted_file).read()).items()

    except IOError, e:
        if e.errno == 2:  # IOError: [Errno 2] No such file or directory
            return
        else:
            raise