def test_active_check_result(): assert ActiveCheckResult.from_subresults( ActiveCheckResult(0, ("Ok",), ("We're good",), ("metric1",)), ActiveCheckResult(2, ("Critical",), ("We're doomed",), ("metric2",)), ) == ActiveCheckResult(2, ["Ok", "Critical"], ["We're good", "We're doomed"], ["metric1", "metric2"])
def active_check_inventory(hostname: HostName, options: Dict[str, int]) -> ActiveCheckResult: # TODO: drop '_inv_' _inv_hw_changes = options.get("hw-changes", 0) _inv_sw_changes = options.get("sw-changes", 0) _inv_sw_missing = options.get("sw-missing", 0) _inv_fail_status = options.get("inv-fail-status", 1) host_config = config.HostConfig.make_host_config(hostname) retentions_tracker = RetentionsTracker(host_config.inv_retention_intervals) inv_result = _inventorize_host( host_config=host_config, selected_sections=NO_SELECTION, run_plugin_names=EVERYTHING, retentions_tracker=retentions_tracker, ) trees = inv_result.trees retentions = Retentions( retentions_tracker, trees.inventory, # If no intervals are configured then remove all known retentions do_update=bool(host_config.inv_retention_intervals), ) if inv_result.safe_to_write: old_tree = _save_inventory_tree(hostname, trees.inventory, retentions) update_result = ActiveCheckResult(0, (), (), ()) else: old_tree, sources_state = None, 1 update_result = ActiveCheckResult( sources_state, (f"Cannot update tree{state_markers[sources_state]}", ), (), ()) _run_inventory_export_hooks(host_config, trees.inventory) return ActiveCheckResult.from_subresults( update_result, _check_inventory_tree(trees, old_tree, _inv_sw_missing, _inv_sw_changes, _inv_hw_changes), *check_sources( source_results=inv_result.source_results, mode=Mode.INVENTORY, # Do not use source states which would overwrite "State when inventory fails" in the # ruleset "Do hardware/software Inventory". These are handled by the "Check_MK" service override_non_ok_state=_inv_fail_status, ), check_parsing_errors( errors=inv_result.parsing_errors, error_state=_inv_fail_status, ), )
def active_check_discovery( host_name: HostName, ipaddress: Optional[HostAddress], *, # The next argument *must* remain optional for the DiscoCheckExecutor. # See Also: `cmk.base.agent_based.checking.active_check_checking()`. fetcher_messages: Sequence[FetcherMessage] = (), ) -> ActiveCheckResult: # Note: '--cache' is set in core_cmc, nagios template or even on CL and means: # 1. use caches as default: # - Set FileCacheFactory.maybe = True (set max_cachefile_age, else 0) # - Set FileCacheFactory.use_outdated = True # 2. Then these settings are used to read cache file or not config_cache = config.get_config_cache() host_config = config_cache.get_host_config(host_name) params = host_config.discovery_check_parameters if params is None: params = host_config.default_discovery_check_parameters() rediscovery_parameters = params.get("inventory_rediscovery", {}) discovery_mode = DiscoveryMode(rediscovery_parameters.get("mode")) # In case of keepalive discovery we always have an ipaddress. When called as non keepalive # ipaddress is always None if ipaddress is None and not host_config.is_cluster: ipaddress = config.lookup_ip_address(host_config) parsed_sections_broker, source_results = make_broker( config_cache=config_cache, host_config=host_config, ip_address=ipaddress, mode=Mode.DISCOVERY, fetcher_messages=fetcher_messages, selected_sections=NO_SELECTION, file_cache_max_age=config.max_cachefile_age( discovery=None if cmk.core_helpers.cache.FileCacheFactory. maybe else 0), force_snmp_cache_refresh=False, on_scan_error=OnError.RAISE, ) host_labels = analyse_host_labels( host_config=host_config, ipaddress=ipaddress, parsed_sections_broker=parsed_sections_broker, load_labels=True, save_labels=False, on_error=OnError.RAISE, ) services = _get_host_services( host_config, ipaddress, parsed_sections_broker, on_error=OnError.RAISE, ) services_result, services_need_rediscovery = _check_service_lists( host_name=host_name, services_by_transition=services, params=params, service_filters=_ServiceFilters.from_settings(rediscovery_parameters), discovery_mode=discovery_mode, ) host_labels_result, host_labels_need_rediscovery = _check_host_labels( host_labels, int(params.get("severity_new_host_label", 1)), discovery_mode, ) parsing_errors_result = check_parsing_errors( parsed_sections_broker.parsing_errors()) return ActiveCheckResult.from_subresults( services_result, host_labels_result, *check_sources(source_results=source_results, mode=Mode.DISCOVERY), parsing_errors_result, _schedule_rediscovery( host_config=host_config, need_rediscovery=(services_need_rediscovery or host_labels_need_rediscovery) and parsing_errors_result.state == 0, ), )
def _execute_checkmk_checks( *, hostname: HostName, ipaddress: Optional[HostAddress], fetcher_messages: Sequence[FetcherMessage] = (), run_plugin_names: Container[CheckPluginName], selected_sections: SectionNameCollection, dry_run: bool, show_perfdata: bool, ) -> ActiveCheckResult: config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) exit_spec = host_config.exit_code_spec() mode = Mode.CHECKING if selected_sections is NO_SELECTION else Mode.FORCE_SECTIONS try: license_usage.try_history_update() # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when # address is unknown). When called as non keepalive ipaddress may be None or # is already an address (2nd argument) if ipaddress is None and not host_config.is_cluster: ipaddress = config.lookup_ip_address(host_config) services = config.resolve_service_dependencies( host_name=hostname, services=sorted( check_table.get_check_table(hostname).values(), key=lambda service: service.description, ), ) with CPUTracker() as tracker: broker, source_results = make_broker( config_cache=config_cache, host_config=host_config, ip_address=ipaddress, mode=mode, selected_sections=selected_sections, file_cache_max_age=host_config.max_cachefile_age, fetcher_messages=fetcher_messages, force_snmp_cache_refresh=False, on_scan_error=OnError.RAISE, ) num_success, plugins_missing_data = check_host_services( config_cache=config_cache, host_config=host_config, ipaddress=ipaddress, parsed_sections_broker=broker, services=services, run_plugin_names=run_plugin_names, dry_run=dry_run, show_perfdata=show_perfdata, ) if run_plugin_names is EVERYTHING: inventory.do_inventory_actions_during_checking_for( config_cache, host_config, ipaddress, parsed_sections_broker=broker, ) timed_results = [ *check_sources( source_results=source_results, mode=mode, include_ok_results=True, ), *check_parsing_errors(errors=broker.parsing_errors(), ), *_check_plugins_missing_data( plugins_missing_data, exit_spec, bool(num_success), ), ] return ActiveCheckResult.from_subresults( *timed_results, _timing_results(tracker, fetcher_messages), ) finally: _submit_to_core.finalize()
def _execute_checkmk_checks( *, hostname: HostName, ipaddress: Optional[HostAddress], fetched: Sequence[Tuple[Source, FetcherMessage]], run_plugin_names: Container[CheckPluginName], selected_sections: SectionNameCollection, dry_run: bool, show_perfdata: bool, ) -> ActiveCheckResult: config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) exit_spec = host_config.exit_code_spec() try: license_usage.try_history_update() services = config.resolve_service_dependencies( host_name=hostname, services=sorted( check_table.get_check_table(hostname).values(), key=lambda service: service.description, ), ) broker, source_results = make_broker( fetched=fetched, selected_sections=selected_sections, file_cache_max_age=host_config.max_cachefile_age, ) with CPUTracker() as tracker: num_success, plugins_missing_data = check_host_services( config_cache=config_cache, host_config=host_config, ipaddress=ipaddress, parsed_sections_broker=broker, services=services, run_plugin_names=run_plugin_names, dry_run=dry_run, show_perfdata=show_perfdata, ) if run_plugin_names is EVERYTHING: inventory.do_inventory_actions_during_checking_for( config_cache, host_config, parsed_sections_broker=broker, ) timed_results = [ *check_sources( source_results=source_results, include_ok_results=True, ), *check_parsing_errors(errors=broker.parsing_errors(), ), *_check_plugins_missing_data( plugins_missing_data, exit_spec, bool(num_success), ), ] return ActiveCheckResult.from_subresults( *timed_results, _timing_results(tracker.duration, [fetched_entry[1] for fetched_entry in fetched]), ) finally: _submit_to_core.finalize()