def remove_tls_registration( hosts_by_site: Mapping[SiteId, Sequence[HostName]]) -> None: for site_id, host_names in hosts_by_site.items(): if not host_names: continue if site_is_local(site_id): _remove_tls_registration(host_names) return do_remote_automation( get_site_config(site_id), "remove-tls-registration", [("host_names", json.dumps(host_names))], )
def _execute_site_sync(site_id: SiteId, site_spec: SiteConfiguration, site_request: SiteRequest) -> SiteResult: """Executes the sync with a site. Is executed in a dedicated subprocess (One per site)""" try: logger.debug(_("[%s] Starting sync for site"), site_id) # timeout=100: Use a value smaller than the default apache request timeout result = DiscoveredHostLabelSyncResponse( **do_remote_automation(site_spec, "discovered-host-label-sync", [ ("request", repr(site_request.serialize())), ], timeout=100)) logger.debug(_("[%s] Finished sync for site"), site_id) return SiteResult( site_id=site_id, success=True, error="", updated_host_labels=result.updated_host_labels, ) except Exception as e: logger.error('Exception (%s, discovered_host_label_sync)', site_id, exc_info=True) return SiteResult( site_id=site_id, success=False, error=str(e), updated_host_labels=[], )
def push_user_profiles_to_site(site, user_profiles): def _serialize(user_profiles): """Do not synchronize user session information""" return { user_id: {k: v for k, v in profile.items() if k != "session_info"} for user_id, profile in user_profiles.items() } return do_remote_automation( site, "push-profiles", [("profiles", repr(_serialize(user_profiles)))], timeout=60 )
def _perform_tests_for_site( self, site_id: SiteId, result_queue: "multiprocessing.Queue[Tuple[SiteId, str]]") -> None: self._logger.debug("[%s] Starting" % site_id) result = None try: # Would be better to clean all open fds that are not needed, but we don't # know the FDs of the result_queue pipe. Can we find it out somehow? # Cleanup resources of the apache # for x in range(3, 256): # try: # os.close(x) # except OSError, e: # if e.errno == errno.EBADF: # pass # else: # raise # Reinitialize logging targets log.init_logging() # NOTE: We run in a subprocess! if site_is_local(site_id): automation = AutomationCheckAnalyzeConfig() results_data = automation.execute(automation.get_request()) else: results_data = do_remote_automation( get_site_config(site_id), "check-analyze-config", [], timeout=request.request_timeout - 10, ) self._logger.debug("[%s] Finished" % site_id) result = { "state": 0, "response": results_data, } except Exception: self._logger.exception("[%s] Failed" % site_id) result = { "state": 1, "response": "Traceback:<br>%s" % (traceback.format_exc().replace("\n", "<br>\n")), } finally: result_queue.put((site_id, repr(result))) result_queue.close() result_queue.join_thread() result_queue.join()
def _get_diagnostics_dump_file(self, site: SiteId, tarfile_name: str) -> bytes: if site_is_local(site): return _get_diagnostics_dump_file(tarfile_name) return do_remote_automation( get_site_config(site), "diagnostics-dump-get-file", [ ("tarfile_name", tarfile_name), ], )
def get_check_table( discovery_request: StartDiscoveryRequest) -> DiscoveryResult: """Gathers the check table using a background job Cares about handling local / remote sites using an automation call. In both cases the ServiceDiscoveryBackgroundJob is executed to care about collecting the check table asynchronously. In case of a remote site the chain is: Starting from central site: _get_check_table() | v automation service-discovery-job-discover | v to remote site | v AutomationServiceDiscoveryJob().execute() | v _get_check_table() """ if discovery_request.options.action == DiscoveryAction.TABULA_RASA: _changes.add_service_change( "refresh-autochecks", _("Refreshed check configuration of host '%s'") % discovery_request.host.name(), discovery_request.host.object_ref(), discovery_request.host.site_id(), ) if site_is_local(discovery_request.host.site_id()): return execute_discovery_job(discovery_request) sync_changes_before_remote_automation(discovery_request.host.site_id()) return DiscoveryResult.deserialize( do_remote_automation( get_site_config(discovery_request.host.site_id()), "service-discovery-job", [ ("host_name", discovery_request.host.name()), ("options", json.dumps(discovery_request.options._asdict())), ], ))
def push_user_profiles_to_site(site, user_profiles): return do_remote_automation(site, "push-profiles", [("profiles", repr(user_profiles))], timeout=60)
def execute_network_scan_job() -> None: """Executed by the multisite cron job once a minute. Is only executed in the central site. Finds the next folder to scan and starts it via WATO automation. The result is written to the folder in the master site.""" init_wato_datastructures(with_wato_lock=True) if is_wato_slave_site(): return # Don't execute this job on slaves. folder = _find_folder_to_scan() if not folder: return # Nothing to do. run_as = folder.attribute("network_scan")["run_as"] if not userdb.user_exists(run_as): raise MKGeneralException( _("The user %s used by the network " "scan of the folder %s does not exist.") % (run_as, folder.title())) with UserContext(run_as): result: NetworkScanResult = { "start": time.time(), "end": True, # means currently running "state": None, "output": "The scan is currently running.", } # Mark the scan in progress: Is important in case the request takes longer than # the interval of the cron job (1 minute). Otherwise the scan might be started # a second time before the first one finished. _save_network_scan_result(folder, result) try: if site_is_local(folder.site_id()): found = _do_network_scan(folder) else: found = do_remote_automation(get_site_config(folder.site_id()), "network-scan", [("folder", folder.path())]) if not isinstance(found, list): raise MKGeneralException( _("Received an invalid network scan result: %r") % found) _add_scanned_hosts_to_folder(folder, found) result.update({ "state": True, "output": _("The network scan found %d new hosts.") % len(found), }) except Exception as e: result.update({ "state": False, "output": _("An exception occured: %s") % e, }) logger.error("Exception in network scan:\n%s", traceback.format_exc()) result["end"] = time.time() _save_network_scan_result(folder, result)