def user_sync_valuespec(cls, site_id): return CascadingDropdown( title=_("Sync with LDAP connections"), orientation="horizontal", choices=[ (None, _("Disable automatic user synchronization (use master site users)" )), ("all", _("Sync users with all connections")), ( "list", _("Sync with the following LDAP connections"), ListChoice( choices=userdb_utils.connection_choices, allow_empty=False, ), ), ], default_value="all" if site_is_local(site_id) else None, help= _("By default the users are synchronized automatically in the interval configured " "in the connection. For example the LDAP connector synchronizes the users every " "five minutes by default. The interval can be changed for each connection " 'individually in the <a href="wato.py?mode=ldap_config">connection settings</a>. ' "Please note that the synchronization is only performed on the master site in " "distributed setups by default.<br>" "The remote sites don't perform automatic user synchronizations with the " "configured connections. But you can configure each site to either " "synchronize the users with all configured connections or a specific list of " "connections."), )
def check_mk_automation(siteid: SiteId, command: str, args: Optional[Sequence[str]] = None, indata: Any = "", stdin_data: Optional[str] = None, timeout: Optional[int] = None, sync: bool = True, non_blocking_http: bool = False) -> Any: if args is None: args = [] if not siteid or site_is_local(siteid): return check_mk_local_automation(command, args, indata, stdin_data, timeout) return check_mk_remote_automation( site_id=siteid, command=command, args=args, indata=indata, stdin_data=stdin_data, timeout=timeout, sync=sync, non_blocking_http=non_blocking_http, )
def _get_diagnostics_dump_file(self, site: str, tarfile_name: str) -> bytes: if site_is_local(site): return _get_diagnostics_dump_file(tarfile_name) return do_remote_automation(get_site_config(site), "diagnostics-dump-get-file", [ ("tarfile_name", tarfile_name), ])
def _get_agent_output_file(self) -> bytes: if site_is_local(self._request.host.site_id()): return get_fetch_agent_output_file(self._request) return watolib.do_remote_automation( get_site_config(self._request.host.site_id()), "fetch-agent-output-get-file", [ ("request", repr(self._request.serialize())), ])
def _get_job_status(self) -> Dict: if site_is_local(self._request.host.site_id()): return get_fetch_agent_job_status(self._request) return watolib.do_remote_automation( get_site_config(self._request.host.site_id()), "fetch-agent-output-get-status", [ ("request", repr(self._request.serialize())), ])
def site_globals_editable(site_id, site) -> bool: # Site is a remote site of another site. Allow to edit probably pushed site # specific globals when remote WATO is enabled if is_wato_slave_site(): return True # Local site: Don't enable site specific locals when no remote sites configured if not has_wato_slave_sites(): return False return site["replication"] or site_is_local(site_id)
def user_sync_default_config(site_name: SiteId) -> UserSyncConfig: global_user_sync = _transform_userdb_automatic_sync(config.userdb_automatic_sync) if global_user_sync == "master": if site_is_local(site_name) and not is_wato_slave_site(): user_sync_default: UserSyncConfig = "all" else: user_sync_default = None else: user_sync_default = global_user_sync return user_sync_default
def _start_fetch(self) -> None: """Start the job on the site the host is monitored by""" if site_is_local(self._request.host.site_id()): start_fetch_agent_job(self._request) return watolib.do_remote_automation( get_site_config(self._request.host.site_id()), "fetch-agent-output-start", [ ("request", repr(self._request.serialize())), ])
def _add_change_to_site( self, site_id: SiteId, change_id: str, action_name: str, text: LogMessage, object_ref: Optional[ObjectRef], add_user: bool, need_sync: Optional[bool], need_restart: Optional[bool], domains: List[Type[ABCConfigDomain]], domain_settings: Optional[DomainSettings], ) -> None: # Individual changes may override the domain restart default value if need_restart is None: need_restart = any(d.needs_activation for d in domains) if need_sync is None: need_sync = any(d.needs_sync for d in domains) # Using attrencode here is against our regular rule to do the escaping # at the last possible time: When rendering. But this here is the last # place where we can distinguish between HTML() encapsulated (already) # escaped / allowed HTML and strings to be escaped. text = escaping.escape_text(text) # If the local site don't need a restart, there is no reason to add a # change for that site. Otherwise the activation page would show a # change but the site would not be selected for activation. if site_is_local(site_id) and need_restart is False: return None SiteChanges(SiteChanges.make_path(site_id)).append({ "id": change_id, "action_name": action_name, "text": "%s" % text, "object": object_ref, "user_id": user.id if add_user else None, "domains": [d.ident() for d in domains], "time": time.time(), "need_sync": need_sync, "need_restart": need_restart, "domain_settings": domain_settings or {}, })
def _perform_tests_for_site( self, site_id: SiteId, result_queue: "multiprocessing.Queue[Tuple[SiteId, str]]") -> None: self._logger.debug("[%s] Starting" % site_id) try: # Would be better to clean all open fds that are not needed, but we don't # know the FDs of the result_queue pipe. Can we find it out somehow? # Cleanup resources of the apache # for x in range(3, 256): # try: # os.close(x) # except OSError, e: # if e.errno == errno.EBADF: # pass # else: # raise # Reinitialize logging targets log.init_logging() # NOTE: We run in a subprocess! if site_is_local(site_id): automation = AutomationCheckAnalyzeConfig() results_data = automation.execute(automation.get_request()) else: results_data = watolib.do_remote_automation( get_site_config(site_id), "check-analyze-config", [], timeout=request.request_timeout - 10, ) self._logger.debug("[%s] Finished" % site_id) result = { "state": 0, "response": results_data, } except Exception: self._logger.exception("[%s] Failed" % site_id) result = { "state": 1, "response": "Traceback:<br>%s" % (traceback.format_exc().replace("\n", "<br>\n")), } finally: result_queue.put((site_id, repr(result))) result_queue.close() result_queue.join_thread() result_queue.join()
def get_check_table( discovery_request: StartDiscoveryRequest) -> DiscoveryResult: """Gathers the check table using a background job Cares about handling local / remote sites using an automation call. In both cases the ServiceDiscoveryBackgroundJob is executed to care about collecting the check table asynchronously. In case of a remote site the chain is: Starting from central site: _get_check_table() | v automation service-discovery-job-discover | v to remote site | v AutomationServiceDiscoveryJob().execute() | v _get_check_table() """ if discovery_request.options.action == DiscoveryAction.TABULA_RASA: watolib.add_service_change( discovery_request.host, "refresh-autochecks", _("Refreshed check configuration of host '%s'") % discovery_request.host.name(), ) if site_is_local(discovery_request.host.site_id()): return execute_discovery_job(discovery_request) sync_changes_before_remote_automation(discovery_request.host.site_id()) return DiscoveryResult.deserialize( watolib.do_remote_automation( get_site_config(discovery_request.host.site_id()), "service-discovery-job", [ ("host_name", discovery_request.host.name()), ("options", json.dumps(discovery_request.options._asdict())), ], ))
def _automation_serialized( command: str, *, siteid: Optional[SiteId] = None, args: Optional[Sequence[str]] = None, indata: Any = "", stdin_data: Optional[str] = None, timeout: Optional[int] = None, sync: bool = True, non_blocking_http: bool = False, ) -> AutomationResponse: if args is None: args = [] if not siteid or site_is_local(siteid): cmdline, serialized_result = check_mk_local_automation_serialized( command=command, args=args, indata=indata, stdin_data=stdin_data, timeout=timeout, ) return AutomationResponse( command=command, serialized_result=serialized_result, local=True, cmdline=cmdline, ) return AutomationResponse( command=command, serialized_result=check_mk_remote_automation_serialized( site_id=siteid, command=command, args=args, indata=indata, stdin_data=stdin_data, timeout=timeout, sync=sync, non_blocking_http=non_blocking_http, ), local=False, cmdline=[], )
def _update_distributed_wato_file(sites): # Note: we cannot access config.sites here, since we # are currently in the process of saving the new # site configuration. distributed = False for siteid, site in sites.items(): if site.get("replication"): distributed = True if site_is_local(siteid): cmk.gui.watolib.activate_changes.create_distributed_wato_files( base_dir=Path(cmk.utils.paths.omd_root), site_id=siteid, is_remote=False, ) # Remove the distributed wato file # a) If there is no distributed WATO setup # b) If the local site could not be gathered if not distributed: # or not found_local: _delete_distributed_wato_file()
def get_check_table( discovery_request: StartDiscoveryRequest) -> DiscoveryResult: """Gathers the check table using a background job Cares about handling local / remote sites using an automation call. In both cases the ServiceDiscoveryBackgroundJob is executed to care about collecting the check table asynchronously. In case of a remote site the chain is: Starting from central site: _get_check_table() | v automation service-discovery-job-discover | v to remote site | v AutomationServiceDiscoveryJob().execute() | v _get_check_table() """ if discovery_request.options.action == DiscoveryAction.TABULA_RASA: watolib.add_service_change( discovery_request.host, "refresh-autochecks", _("Refreshed check configuration of host '%s'") % discovery_request.host.name(), ) if site_is_local(discovery_request.host.site_id()): return execute_discovery_job(discovery_request) discovery_result = _get_check_table_from_remote(discovery_request) discovery_result = _add_missing_discovery_result_fields(discovery_result) return discovery_result
def execute_network_scan_job() -> None: """Executed by the multisite cron job once a minute. Is only executed in the central site. Finds the next folder to scan and starts it via WATO automation. The result is written to the folder in the master site.""" init_wato_datastructures(with_wato_lock=True) if is_wato_slave_site(): return # Don't execute this job on slaves. folder = _find_folder_to_scan() if not folder: return # Nothing to do. run_as = folder.attribute("network_scan")["run_as"] if not userdb.user_exists(run_as): raise MKGeneralException( _("The user %s used by the network " "scan of the folder %s does not exist.") % (run_as, folder.title())) with UserContext(run_as): result: NetworkScanResult = { "start": time.time(), "end": True, # means currently running "state": None, "output": "The scan is currently running.", } # Mark the scan in progress: Is important in case the request takes longer than # the interval of the cron job (1 minute). Otherwise the scan might be started # a second time before the first one finished. _save_network_scan_result(folder, result) try: if site_is_local(folder.site_id()): found = _do_network_scan(folder) else: found = do_remote_automation(get_site_config(folder.site_id()), "network-scan", [("folder", folder.path())]) if not isinstance(found, list): raise MKGeneralException( _("Received an invalid network scan result: %r") % found) _add_scanned_hosts_to_folder(folder, found) result.update({ "state": True, "output": _("The network scan found %d new hosts.") % len(found), }) except Exception as e: result.update({ "state": False, "output": _("An exception occured: %s") % e, }) logger.error("Exception in network scan:\n%s", traceback.format_exc()) result["end"] = time.time() _save_network_scan_result(folder, result)