Exemple #1
0
def _discover_marked_host(
    *,
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    autodiscovery_queue: _AutodiscoveryQueue,
    reference_time: float,
    oldest_queued: float,
) -> bool:
    host_name = host_config.hostname
    console.verbose(f"{tty.bold}{host_name}{tty.normal}:\n")

    if host_config.discovery_check_parameters is None:
        console.verbose("  failed: discovery check disabled\n")
        return False
    rediscovery_parameters = host_config.discovery_check_parameters.get(
        "inventory_rediscovery", {})

    reason = _may_rediscover(
        rediscovery_parameters=rediscovery_parameters,
        reference_time=reference_time,
        oldest_queued=oldest_queued,
    )
    if reason:
        console.verbose(f"  skipped: {reason}\n")
        return False

    result = automation_discovery(
        config_cache=config_cache,
        host_config=host_config,
        mode=DiscoveryMode(rediscovery_parameters.get("mode")),
        service_filters=_ServiceFilters.from_settings(rediscovery_parameters),
        on_error=OnError.IGNORE,
        use_cached_snmp_data=True,
        # autodiscovery is run every 5 minutes (see
        # omd/packages/check_mk/skel/etc/cron.d/cmk_discovery)
        # make sure we may use the file the active discovery check left behind:
        max_cachefile_age=config.max_cachefile_age(discovery=600),
    )
    if result.error_text is not None:
        # for offline hosts the error message is empty. This is to remain
        # compatible with the automation code
        console.verbose(
            f"  failed: {result.error_text or 'host is offline'}\n")
        # delete the file even in error case, otherwise we might be causing the same error
        # every time the cron job runs
        autodiscovery_queue.remove(host_name)
        return False

    something_changed = (result.self_new != 0 or result.self_removed != 0
                         or result.self_kept != result.self_total
                         or result.clustered_new != 0
                         or result.clustered_vanished != 0
                         or result.self_new_host_labels != 0)

    if not something_changed:
        console.verbose("  nothing changed.\n")
        activation_required = False
    else:
        console.verbose(
            f"  {result.self_new} new, {result.self_removed} removed, "
            f"{result.self_kept} kept, {result.self_total} total services "
            f"and {result.self_new_host_labels} new host labels. "
            f"clustered new {result.clustered_new}, clustered vanished "
            f"{result.clustered_vanished}")

        # Note: Even if the actual mark-for-discovery flag may have been created by a cluster host,
        #       the activation decision is based on the discovery configuration of the node
        activation_required = bool(rediscovery_parameters["activation"])

        # Enforce base code creating a new host config object after this change
        config_cache.invalidate_host_config(host_name)

        # Now ensure that the discovery service is updated right after the changes
        schedule_discovery_check(host_name)

    autodiscovery_queue.remove(host_name)

    return activation_required
Exemple #2
0
def _discover_marked_host(config_cache: config.ConfigCache, host_config: config.HostConfig,
                          now_ts: float, oldest_queued: float) -> bool:
    host_name = host_config.hostname
    something_changed = False

    console.verbose(f"{tty.bold}{host_name}{tty.normal}:\n")
    host_flag_path = os.path.join(_get_autodiscovery_dir(), host_name)

    params = host_config.discovery_check_parameters
    if params is None:
        console.verbose("  failed: discovery check disabled\n")
        return False

    reason = _may_rediscover(params, now_ts, oldest_queued)
    if not reason:
        result = discover_on_host(
            config_cache=config_cache,
            host_config=host_config,
            mode=DiscoveryMode(_get_rediscovery_parameters(params).get("mode")),
            service_filters=_ServiceFilters.from_settings(_get_rediscovery_parameters(params)),
            on_error="ignore",
            use_cached_snmp_data=True,
            # autodiscovery is run every 5 minutes (see
            # omd/packages/check_mk/skel/etc/cron.d/cmk_discovery)
            # make sure we may use the file the active discovery check left behind:
            max_cachefile_age=600,
        )
        if result.error_text is not None:
            if result.error_text:
                console.verbose(f"failed: {result.error_text}\n")
            else:
                # for offline hosts the error message is empty. This is to remain
                # compatible with the automation code
                console.verbose("  failed: host is offline\n")
        else:
            if result.self_new == 0 and\
               result.self_removed == 0 and\
               result.self_kept == result.self_total and\
               result.clustered_new == 0 and\
               result.clustered_vanished == 0 and\
               result.self_new_host_labels == 0:
                console.verbose("  nothing changed.\n")
            else:
                console.verbose(f"  {result.self_new} new, {result.self_removed} removed, "
                                f"{result.self_kept} kept, {result.self_total} total services "
                                f"and {result.self_new_host_labels} new host labels. "
                                f"clustered new {result.clustered_new}, clustered vanished "
                                f"{result.clustered_vanished}")

                # Note: Even if the actual mark-for-discovery flag may have been created by a cluster host,
                #       the activation decision is based on the discovery configuration of the node
                if _get_rediscovery_parameters(params)["activation"]:
                    something_changed = True

                # Enforce base code creating a new host config object after this change
                config_cache.invalidate_host_config(host_name)

                # Now ensure that the discovery service is updated right after the changes
                schedule_discovery_check(host_name)

        # delete the file even in error case, otherwise we might be causing the same error
        # every time the cron job runs
        try:
            os.remove(host_flag_path)
        except OSError:
            pass
    else:
        console.verbose(f"  skipped: {reason}\n")

    return something_changed