def _save_services(self, checks, need_sync): message = _("Saved check configuration of host '%s' with %d services") % \ (self._host.name(), len(checks)) watolib.add_service_change(self._host, "set-autochecks", message, need_sync=need_sync) check_mk_automation(self._host.site_id(), "set-autochecks", [self._host.name()], checks)
def _get_check_table_from_remote(api_request): """Gathers the check table from a remote site Cares about pre 1.6 sites that does not support the new service-discovery-job API call. Falling back to the previously existing try-inventry and inventory automation calls. """ try: sync_changes_before_remote_automation(api_request.host.site_id()) return _deserialize_remote_result( watolib.do_remote_automation( get_site_config(api_request.host.site_id()), "service-discovery-job", [ ("host_name", api_request.host.name()), ("options", json.dumps(api_request.options._asdict())), ])) except watolib.MKAutomationException as e: if "Invalid automation command: service-discovery-job" not in "%s" % e: raise # Compatibility for pre 1.6 remote sites. # TODO: Replace with helpful exception in 1.7. if api_request.options.action == DiscoveryAction.TABULA_RASA: _counts, _failed_hosts = check_mk_automation( api_request.host.site_id(), "inventory", ["@scan", "refresh", api_request.host.name()]) if api_request.options.action == DiscoveryAction.REFRESH: options = ["@scan"] else: options = ["@noscan"] if not api_request.options.ignore_errors: options.append("@raiseerrors") options.append(api_request.host.name()) check_table = check_mk_automation(api_request.host.site_id(), "try-inventory", options) return DiscoveryResult( job_status={ "is_active": False, "state": JobStatusStates.INITIALIZED, }, check_table=check_table, check_table_created=int(time.time()), host_labels={}, new_labels={}, vanished_labels={}, changed_labels={}, )
def _save_services(self, old_autochecks: SetAutochecksTable, checks: SetAutochecksTable, need_sync: bool) -> None: message = _("Saved check configuration of host '%s' with %d services" ) % (self._host.name(), len(checks)) watolib.add_service_change( host=self._host, action_name="set-autochecks", text=message, need_sync=need_sync, diff_text=watolib.make_diff_text( _make_host_audit_log_object(old_autochecks), _make_host_audit_log_object(checks)), ) check_mk_automation(self._host.site_id(), "set-autochecks", [self._host.name()], checks)
def do_execute(self, diagnostics_parameters: DiagnosticsParameters, job_interface: BackgroundProcessInterface) -> None: job_interface.send_progress_update(_("Diagnostics dump started...")) job_interface.send_progress_update(repr(diagnostics_parameters)) job_interface.send_progress_update(repr(serialize_wato_parameters(diagnostics_parameters))) site = diagnostics_parameters["site"] timeout = html.request.request_timeout - 2 result = check_mk_automation(site, "create-diagnostics-dump", args=serialize_wato_parameters(diagnostics_parameters), timeout=timeout, non_blocking_http=True) job_interface.send_progress_update(result["output"]) if result["tarfile_created"]: tarfile_path = result['tarfile_path'] download_url = html.makeuri_contextless([("site", site), ("tarfile_name", str(Path(tarfile_path)))], "download_diagnostics_dump.py") button = html.render_icon_button(download_url, _("Download"), "diagnostics_dump_file") job_interface.send_progress_update(_("Dump file: %s") % tarfile_path) job_interface.send_result_message(_("%s Creating dump file successfully") % button) else: job_interface.send_result_message(_("Creating dump file failed"))
def get_result(self, request): # tupe: (StartDiscoveryRequest) -> DiscoveryResult """Executed from the outer world to report about the job state""" job_status = self.get_status() job_status["is_active"] = self.is_active() # TODO: Use the correct time. This is difficult because cmk.base does not have a single # time for all data of a host. The data sources should be able to provide this information # somehow. check_table_created = int(time.time()) result = check_mk_automation(request.host.site_id(), "try-inventory", ["@noscan", request.host.name()]) if not job_status['is_active'] and job_status['state'] == JobStatusStates.EXCEPTION: # There might be an exception when calling above 'check_mk_automation'. For example # this may happen if a hostname is not resolvable. Then if the error is fixed, ie. # configuring an IP address of this host, and the discovery is started again, we put # the cached/last job exception into the current job progress update instead of displaying # the error in a CRIT message box again. job_status['state'] = JobStatusStates.FINISHED job_status['loginfo'] = { 'JobProgressUpdate': ['%s:' % _('Last progress update')] + job_status['loginfo']['JobProgressUpdate'] + ["%s:" % _('Last exception')] + job_status['loginfo']['JobException'], 'JobException': [], 'JobResult': job_status['loginfo']['JobResult'], } return DiscoveryResult( job_status=job_status, check_table_created=check_table_created, check_table=result["check_table"], host_labels=result.get("host_labels", {}), )
def _perform_service_scan(self, api_request): """The try-inventory automation refreshes the Check_MK internal cache and makes the new information available to the next try-inventory call made by get_result().""" result = check_mk_automation(api_request.host.site_id(), "try-inventory", self._get_automation_options(api_request)) sys.stdout.write(result["output"])
def _save_services(self, old_autochecks: SetAutochecksTable, checks: SetAutochecksTable, need_sync: bool) -> None: message = _("Saved check configuration of host '%s' with %d services") % (self._host.name(), len(checks)) watolib.add_service_change( host=self._host, action_name="set-autochecks", text=message, need_sync=need_sync, diff_text=watolib.make_diff_text(_make_host_audit_log_object(old_autochecks), _make_host_audit_log_object(checks)), ) site_id = self._host.site_id() site_status = states().get(site_id, SiteStatus({})) if is_pre_17_remote_site(site_status): check_mk_automation(site_id, "set-autochecks", [self._host.name()], {x: y[1:3] for x, y in checks.items()}) else: check_mk_automation(site_id, "set-autochecks", [self._host.name()], checks)
def _get_try_discovery(api_request: StartDiscoveryRequest) -> Tuple[int, Dict]: # TODO: Use the correct time. This is difficult because cmk.base does not have a single # time for all data of a host. The data sources should be able to provide this information # somehow. return ( int(time.time()), check_mk_automation( api_request.host.site_id(), "try-inventory", ["@noscan", api_request.host.name()], ), )
def _execute_discovery(self, task, mode, do_scan, error_handling): arguments = [mode] + task.host_names if do_scan: arguments = ["@scan"] + arguments if not error_handling: arguments = ["@raiseerrors"] + arguments timeout = html.request.request_timeout - 2 counts, failed_hosts = check_mk_automation(task.site_id, "inventory", arguments, timeout=timeout, non_blocking_http=True) return counts, failed_hosts
def get_result(self, request): # tupe: (StartDiscoveryRequest) -> DiscoveryResult """Executed from the outer world to report about the job state""" job_status = self.get_status() job_status["is_active"] = self.is_active() # TODO: Use the correct time. This is difficult because cmk.base does not have a single # time for all data of a host. The data sources should be able to provide this information # somehow. check_table_created = int(time.time()) result = check_mk_automation(request.host.site_id(), "try-inventory", ["@noscan", request.host.name()]) return DiscoveryResult( job_status=job_status, check_table_created=check_table_created, check_table=result["check_table"], host_labels=result.get("host_labels", {}), )
def _rename_hosts_in_check_mk( renamings: List[_Tuple[CREFolder, HostName, HostName]]) -> Dict[str, int]: action_counts: Dict[str, int] = {} for site_id, name_pairs in _group_renamings_by_site(renamings).items(): message = _("Renamed host %s") % ", ".join([ _("%s into %s") % (oldname, newname) for (oldname, newname) in name_pairs ]) # Restart is done by remote automation (below), so don't do it during rename/sync # The sync is automatically done by the remote automation call add_change("renamed-hosts", message, sites=[site_id], need_restart=False) new_counts = check_mk_automation(site_id, "rename-hosts", [], name_pairs) _merge_action_counts(action_counts, new_counts) return action_counts
def do_execute(self, diagnostics_params, job_interface): # type: (DiagnosticsParams, BackgroundProcessInterface) -> None job_interface.send_progress_update(_("Diagnostics dump started...")) site = diagnostics_params["site"] timeout = html.request.request_timeout - 2 result = check_mk_automation(site, "create-diagnostics-dump", timeout=timeout, non_blocking_http=True) job_interface.send_progress_update(result["output"]) tarfile_path = result['tarfile_path'] #TODO next #download_url = html.makeuri_contextless([("site", site)], filename=tarfile_path) #button = html.render_icon_button(download_url, _("Download"), "diagnostics_dump_file") #job_interface.send_result_message( # _("Diagnostics dump file: %s %s") % (tarfile_path, button)) job_interface.send_progress_update(_("Dump file: %s") % tarfile_path) job_interface.send_result_message(_("Creating dump file successfully"))
def _perform_automatic_refresh(self, request): _counts, _failed_hosts = check_mk_automation( request.host.site_id(), "inventory", ["@scan", "refresh", request.host.name()])