def load_web_plugins(forwhat, globalvars): _failed_plugins[forwhat] = [] for plugins_path in [ cmk.utils.paths.web_dir + "/plugins/" + forwhat, cmk.utils.paths.local_web_dir + "/plugins/" + forwhat ]: if not os.path.exists(plugins_path): continue for fn in sorted(os.listdir(plugins_path)): file_path = plugins_path + "/" + fn try: if fn.endswith(".py") and not os.path.exists(file_path + "c"): execfile(file_path, globalvars) elif fn.endswith(".pyc"): code_bytes = file(file_path).read()[8:] code = marshal.loads(code_bytes) exec code in globalvars except Exception as e: logger.error("Failed to load plugin %s: %s", file_path, e, exc_info=True) _failed_plugins[forwhat].append((file_path, e))
def _execute_sync_action( self, job_interface: background_job.BackgroundProcessInterface, add_to_changelog: bool, enforce_sync: bool, load_users_func: Callable, save_users_func: Callable) -> bool: for connection_id, connection in active_connections(): try: if not enforce_sync and not connection.sync_is_needed(): continue job_interface.send_progress_update( _("[%s] Starting sync for connection") % connection_id) connection.do_sync(add_to_changelog=add_to_changelog, only_username=False, load_users_func=load_users, save_users_func=save_users) job_interface.send_progress_update( _("[%s] Finished sync for connection") % connection_id) except Exception as e: job_interface.send_exception( _("[%s] Exception: %s") % (connection_id, e)) logger.error('Exception (%s, userdb_job): %s', connection_id, traceback.format_exc()) job_interface.send_progress_update(_("Finalizing synchronization")) general_userdb_job() return True
def _execute_site_sync(site_id: SiteId, site_spec: SiteConfiguration, site_request: SiteRequest) -> SiteResult: """Executes the sync with a site. Is executed in a dedicated subprocess (One per site)""" try: logger.debug(_("[%s] Starting sync for site"), site_id) # timeout=100: Use a value smaller than the default apache request timeout result = DiscoveredHostLabelSyncResponse( **do_remote_automation(site_spec, "discovered-host-label-sync", [ ("request", repr(site_request.serialize())), ], timeout=100)) logger.debug(_("[%s] Finished sync for site"), site_id) return SiteResult( site_id=site_id, success=True, error="", updated_host_labels=result.updated_host_labels, ) except Exception as e: logger.error('Exception (%s, discovered_host_label_sync)', site_id, exc_info=True) return SiteResult( site_id=site_id, success=False, error=str(e), updated_host_labels=[], )
def sync_changes_before_remote_automation(site_id): # TODO: Cleanup this local import import cmk.gui.watolib.activate_changes # pylint: disable=redefined-outer-name manager = cmk.gui.watolib.activate_changes.ActivateChangesManager() manager.load() if not manager.is_sync_needed(site_id): return logger.info("Syncing %s", site_id) manager.start([site_id], activate_foreign=True, prevent_activate=True) # Wait maximum 30 seconds for sync to finish timeout = 30.0 while manager.is_running() and timeout > 0.0: time.sleep(0.5) timeout -= 0.5 state = manager.get_site_state(site_id) if state and state["_state"] != "success": logger.error( _("Remote automation tried to sync pending changes but failed: %s" ), state.get("_status_details"), )
def _process_request(environ, start_response) -> Response: # pylint: disable=too-many-branches try: html.init_modes() # Make sure all plugins are available as early as possible. At least # we need the plugins (i.e. the permissions declared in these) at the # time before the first login for generating auth.php. load_all_plugins() page_handler = get_and_wrap_page(html.myfile) response = page_handler() # If page_handler didn't raise we assume everything is OK. response.status_code = http_client.OK except HTTPRedirect as e: # This can't be a new Response as it can have already cookies set/deleted by the pages. # We can't return the response because the Exception has been raised instead. # TODO: Remove all HTTPRedirect exceptions from all pages. Making the Exception a subclass # of Response may also work as it can then be directly returned from here. response = html.response response.status_code = e.status response.headers["Location"] = e.url except FinalizeRequest as e: # TODO: Remove all FinalizeRequest exceptions from all pages and replace it with a `return`. # It may be necessary to rewire the control-flow a bit as this exception could have # been used to short-circuit some code and jump directly to the response. This # needs to be changed as well. response = html.response response.status_code = e.status except livestatus.MKLivestatusNotFoundError as e: response = _render_exception(e, title=_("Data not found")) except MKUserError as e: response = _render_exception(e, title=_("Invalid user Input")) except MKAuthException as e: response = _render_exception(e, title=_("Permission denied")) except livestatus.MKLivestatusException as e: response = _render_exception(e, title=_("Livestatus problem")) response.status_code = http_client.BAD_GATEWAY except MKUnauthenticatedException as e: response = _render_exception(e, title=_("Not authenticated")) response.status_code = http_client.UNAUTHORIZED except MKConfigError as e: response = _render_exception(e, title=_("Configuration error")) logger.error("MKConfigError: %s", e) except MKGeneralException as e: response = _render_exception(e, title=_("General error")) logger.error("MKGeneralException: %s", e) except Exception: response = handle_unhandled_exception() return response(environ, start_response)
def _answer_graph_image_request() -> None: try: host_name = request.var("host") if not host_name: raise MKGeneralException(_("Missing mandatory \"host\" parameter")) service_description = request.var("service", "_HOST_") site = request.var("site") # FIXME: We should really enforce site here. But it seems that the notification context # has no idea about the site of the host. This could be optimized later. #if not site: # raise MKGeneralException("Missing mandatory \"site\" parameter") try: row = get_graph_data_from_livestatus(site, host_name, service_description) except livestatus.MKLivestatusNotFoundError: if config.debug: raise raise Exception( _("Cannot render graph: host %s, service %s not found.") % (host_name, service_description)) site = row["site"] # Always use 25h graph in notifications end_time = time.time() start_time = end_time - (25 * 3600) graph_render_options = graph_image_render_options() graph_identification = ( "template", { "site": site, "host_name": host_name, "service_description": service_description, "graph_index": None, # all graphs }) graph_data_range = graph_image_data_range(graph_render_options, start_time, end_time) graph_recipes = graph_identification_types.create_graph_recipes( graph_identification, destination=html_render.GraphDestinations.notification) num_graphs = request.get_integer_input("num_graphs") or len(graph_recipes) graphs = [] for graph_recipe in graph_recipes[:num_graphs]: graph_artwork = artwork.compute_graph_artwork(graph_recipe, graph_data_range, graph_render_options) graph_png = render_graph_image(graph_artwork, graph_data_range, graph_render_options) graphs.append(base64.b64encode(graph_png).decode("ascii")) response.set_data(json.dumps(graphs)) except Exception as e: logger.error("Call to ajax_graph_images.py failed: %s\n%s", e, traceback.format_exc()) if config.debug: raise
def compute_config_hash(entity): try: entity_encoded = json.dumps(entity, sort_keys=True) entity_hash = md5(entity_encoded).hexdigest() except Exception as e: logger.error("Error %s" % e) entity_hash = "0" return entity_hash
def _process_request( environ: WSGIEnvironment, start_response: StartResponse, debug: bool = False, ) -> WSGIResponse: # pylint: disable=too-many-branches resp: Response try: page_handler = get_and_wrap_page(requested_file_name(request)) resp = page_handler() except HTTPRedirect as e: # This can't be a new Response as it can have already cookies set/deleted by the pages. # We can't return the response because the Exception has been raised instead. # TODO: Remove all HTTPRedirect exceptions from all pages. Making the Exception a subclass # of Response may also work as it can then be directly returned from here. resp = response resp.status_code = e.status resp.headers["Location"] = e.url except FinalizeRequest as e: # TODO: Remove all FinalizeRequest exceptions from all pages and replace it with a `return`. # It may be necessary to rewire the control-flow a bit as this exception could have # been used to short-circuit some code and jump directly to the response. This # needs to be changed as well. resp = response resp.status_code = e.status except livestatus.MKLivestatusNotFoundError as e: resp = _render_exception(e, title=_("Data not found")) except MKUserError as e: resp = _render_exception(e, title=_("Invalid user input")) except MKAuthException as e: resp = _render_exception(e, title=_("Permission denied")) except livestatus.MKLivestatusException as e: resp = _render_exception(e, title=_("Livestatus problem")) resp.status_code = http_client.BAD_GATEWAY except MKUnauthenticatedException as e: resp = _render_exception(e, title=_("Not authenticated")) resp.status_code = http_client.UNAUTHORIZED except MKConfigError as e: resp = _render_exception(e, title=_("Configuration error")) logger.error("MKConfigError: %s", e) except (MKGeneralException, cmk.utils.store.MKConfigLockTimeout) as e: resp = _render_exception(e, title=_("General error")) logger.error("%s: %s", e.__class__.__name__, e) except Exception: resp = handle_unhandled_exception() if debug: raise return resp(environ, start_response)
def _process_request(self, request, response): # pylint: disable=too-many-branches try: config.initialize() html.init_modes() # Make sure all plugins are available as early as possible. At least # we need the plugins (i.e. the permissions declared in these) at the # time before the first login for generating auth.php. _load_all_plugins() page_handler = get_and_wrap_page(request, html.myfile) page_handler() # If page_handler didn't raise we assume everything is OK. response.status_code = httplib.OK except HTTPRedirect as e: response.status_code = e.status response.headers["Location"] = e.url except FinalizeRequest as e: response.status_code = e.status except livestatus.MKLivestatusNotFoundError as e: _render_exception(e, title=_("Data not found")) except MKUserError as e: _render_exception(e, title=_("Invalid user Input")) except MKAuthException as e: _render_exception(e, title=_("Permission denied")) except livestatus.MKLivestatusException as e: _render_exception(e, title=_("Livestatus problem")) response.status_code = httplib.BAD_GATEWAY except MKUnauthenticatedException as e: _render_exception(e, title=_("Not authenticated")) response.status_code = httplib.UNAUTHORIZED except MKConfigError as e: _render_exception(e, title=_("Configuration error")) logger.error("MKConfigError: %s", e) except MKGeneralException as e: _render_exception(e, title=_("General error")) logger.error("MKGeneralException: %s", e) except Exception: crash = crash_reporting.GUICrashReport.from_exception() crash_reporting.CrashReportStore().save(crash) logger.exception("Unhandled exception (Crash-ID: %s)", crash.ident_to_text()) crash_reporting.show_crash_dump_message(crash, _plain_error(), _fail_silently())
def compute_config_hash(entity: Mapping) -> str: try: entity_encoded = json.dumps(entity, sort_keys=True) entity_hash = md5( # pylint: disable=unexpected-keyword-arg entity_encoded.encode(), usedforsecurity=False).hexdigest() except Exception as e: logger.error("Error %s", e) entity_hash = "0" return entity_hash
def ajax_snapin(): """Renders and returns the contents of the requested sidebar snapin(s) in JSON format""" html.set_output_format("json") # Update online state of the user (if enabled) if config.user.id is None: raise Exception("no user ID") userdb.update_user_access_time(config.user.id) user_config = UserSidebarConfig(config.user, config.sidebar) snapin_id = html.request.var("name") snapin_ids = [snapin_id ] if snapin_id else html.request.get_str_input_mandatory( "names", "").split(",") snapin_code: List[str] = [] for snapin_id in snapin_ids: try: snapin_instance = user_config.get_snapin(snapin_id).snapin_type() except KeyError: continue # Skip not existing snapins if not config.user.may(snapin_instance.permission_name()): continue # When restart snapins are about to be refreshed, only render # them, when the core has been restarted after their initial # rendering if not snapin_instance.refresh_regularly( ) and snapin_instance.refresh_on_restart(): since = html.request.get_float_input_mandatory('since', 0) newest = since for site in sites.states().values(): prog_start = site.get("program_start", 0) if prog_start > newest: newest = prog_start if newest <= since: # no restart snapin_code.append(u'') continue with html.plugged(): try: snapin_instance.show() except Exception as e: write_snapin_exception(e) e_message = _("Exception during snapin refresh (snapin \'%s\')" ) % snapin_instance.type_name() logger.error("%s %s: %s", html.request.requested_url, e_message, traceback.format_exc()) finally: snapin_code.append(html.drain()) html.write(json.dumps(snapin_code))
def ajax_graph(): html.set_output_format("json") try: context_var = html.request.get_str_input_mandatory("context") context = json.loads(context_var) response_data = render_ajax_graph(context) html.write(json.dumps(response_data)) except Exception as e: logger.error("Ajax call ajax_graph.py failed: %s\n%s", e, traceback.format_exc()) if config.debug: raise html.write("ERROR: %s" % e)
def ajax_graph(): response.set_content_type("application/json") try: context_var = request.get_str_input_mandatory("context") context = json.loads(context_var) response_data = render_ajax_graph(context) response.set_data(json.dumps(response_data)) except Exception as e: logger.error("Ajax call ajax_graph.py failed: %s\n%s", e, traceback.format_exc()) if config.debug: raise response.set_data("ERROR: %s" % e)
def ajax_graph_hover(): response.set_content_type("application/json") try: context_var = html.request.get_str_input_mandatory("context") context = json.loads(context_var) hover_time = html.request.get_integer_input_mandatory("hover_time") response_data = render_ajax_graph_hover(context, hover_time) html.write(json.dumps(response_data)) except Exception as e: logger.error("Ajax call ajax_graph_hover.py failed: %s\n%s", e, traceback.format_exc()) if config.debug: raise html.write("ERROR: %s" % e)
def ajax_snapin(): """Renders and returns the contents of the requested sidebar snapin(s) in JSON format""" response.set_content_type("application/json") user_config = UserSidebarConfig(user, config.sidebar) snapin_id = request.var("name") snapin_ids = ([snapin_id] if snapin_id else request.get_str_input_mandatory("names", "").split(",")) snapin_code: List[str] = [] for snapin_id in snapin_ids: try: snapin_instance = user_config.get_snapin(snapin_id).snapin_type() except KeyError: continue # Skip not existing snapins if not snapin_instance.may_see(): continue # When restart snapins are about to be refreshed, only render # them, when the core has been restarted after their initial # rendering if not snapin_instance.refresh_regularly( ) and snapin_instance.refresh_on_restart(): since = request.get_float_input_mandatory("since", 0) newest = since for site in sites.states().values(): prog_start = site.get("program_start", 0) if prog_start > newest: newest = prog_start if newest <= since: # no restart snapin_code.append("") continue with output_funnel.plugged(): try: snapin_instance.show() except Exception as e: write_snapin_exception(e) e_message = ( _("Exception during element refresh (element '%s')") % snapin_instance.type_name()) logger.error("%s %s: %s", request.requested_url, e_message, traceback.format_exc()) finally: snapin_code.append(output_funnel.drain()) response.set_data(json.dumps(snapin_code))
def _show_details_page(self, job_id: str) -> Optional[GUIBackgroundStatusSnapshot]: job = gui_background_job.GUIBackgroundJob(job_id) if not job.exists(): html.show_message(_("Background job info is not available")) return None try: job_snapshot = job.get_status_snapshot() except Exception: html.show_message(_("Background job info is not available")) logger.error(traceback.format_exc()) return None job_manager = gui_background_job.GUIBackgroundJobManager() job_manager.show_job_details_from_snapshot(job_snapshot) return job_snapshot
def _import_main_module_plugins(main_modules: List[ModuleType]) -> None: logger.debug("Importing main module plugins") for module in main_modules: main_module_name = module.__name__.split(".")[-1] for plugin_package_name in _plugin_package_names(main_module_name): if not _is_plugin_namespace(plugin_package_name): logger.debug(" Skip loading plugins from %s", plugin_package_name) continue logger.debug(" Importing plugins from %s", plugin_package_name) for plugin_name, exc in load_plugins_with_exceptions(plugin_package_name): logger.error( " Error in %s plugin '%s'\n", main_module_name, plugin_name, exc_info=exc ) utils.add_failed_plugin(main_module_name, plugin_name, exc) logger.debug("Main module plugins imported")
def page(self): job_id = html.request.var("job_id") job = gui_background_job.GUIBackgroundJob(job_id) if not job.exists(): html.message(_("Background job info is not available")) return try: # Race condition, the job might get deleted during snapshot generation job_snapshot = job.get_status_snapshot() except Exception: html.message(_("Background job info is not available")) logger.error(traceback.format_exc()) return job_manager = gui_background_job.GUIBackgroundJobManager() job_manager.show_job_details_from_snapshot(job_snapshot) if job_snapshot.is_running(): html.immediate_browser_redirect(1, "")
def _bulk_discovery_start(self, request): job = BulkDiscoveryBackgroundJob() if job.is_running(): raise MKUserError( None, _("A bulk discovery job is already running. Please use the " "\"bulk_discovery_status\" call to get the curent status.")) mode, use_cache, do_scan, bulk_size, error_handling = self._get_parameters_from_request( request) tasks = get_tasks(self._get_hosts_from_request(request), bulk_size) try: job.set_function(job.do_execute, mode, use_cache, do_scan, error_handling, tasks) job.start() return { "started": True, } except Exception as e: logger.error("Failed to start bulk discovery", exc_info=True) raise MKUserError(None, _("Failed to start discovery: %s") % e)
def _create_sample_config(): """Create a very basic sample configuration But only if none of the files that we will create already exists. That is e.g. the case after an update from an older version where no sample config had been created. """ if not _need_to_create_sample_config(): return logger.debug("Start creating the sample config") for generator in sample_config_generator_registry.get_generators(): try: logger.debug("Starting [%s]" % generator.ident()) generator.generate() logger.debug("Finished [%s]" % generator.ident()) except Exception: logger.error("Exception in sample config generator [%s]" % generator.ident(), exc_info=True) logger.debug("Finished creating the sample config")
def execute_network_scan_job() -> None: """Executed by the multisite cron job once a minute. Is only executed in the central site. Finds the next folder to scan and starts it via WATO automation. The result is written to the folder in the master site.""" init_wato_datastructures(with_wato_lock=True) if is_wato_slave_site(): return # Don't execute this job on slaves. folder = _find_folder_to_scan() if not folder: return # Nothing to do. run_as = folder.attribute("network_scan")["run_as"] if not userdb.user_exists(run_as): raise MKGeneralException( _("The user %s used by the network " "scan of the folder %s does not exist.") % (run_as, folder.title())) with UserContext(run_as): result: NetworkScanResult = { "start": time.time(), "end": True, # means currently running "state": None, "output": "The scan is currently running.", } # Mark the scan in progress: Is important in case the request takes longer than # the interval of the cron job (1 minute). Otherwise the scan might be started # a second time before the first one finished. _save_network_scan_result(folder, result) try: if site_is_local(folder.site_id()): found = _do_network_scan(folder) else: found = do_remote_automation(get_site_config(folder.site_id()), "network-scan", [("folder", folder.path())]) if not isinstance(found, list): raise MKGeneralException( _("Received an invalid network scan result: %r") % found) _add_scanned_hosts_to_folder(folder, found) result.update({ "state": True, "output": _("The network scan found %d new hosts.") % len(found), }) except Exception as e: result.update({ "state": False, "output": _("An exception occured: %s") % e, }) logger.error("Exception in network scan:\n%s", traceback.format_exc()) result["end"] = time.time() _save_network_scan_result(folder, result)
def execute_network_scan_job() -> None: init_wato_datastructures(with_wato_lock=True) if watolib.is_wato_slave_site(): return # Don't execute this job on slaves. folder = find_folder_to_scan() if not folder: return # Nothing to do. # We need to have the context of the user. The jobs are executed when # config.set_user_by_id() has not been executed yet. So there is no user context # available. Use the run_as attribute from the job config and revert # the previous state after completion. old_user = config.user.id run_as = folder.attribute("network_scan")["run_as"] if not userdb.user_exists(run_as): raise MKGeneralException( _("The user %s used by the network " "scan of the folder %s does not exist.") % (run_as, folder.title())) config.set_user_by_id(folder.attribute("network_scan")["run_as"]) result: NetworkScanResult = { "start": time.time(), "end": True, # means currently running "state": None, "output": "The scan is currently running.", } # Mark the scan in progress: Is important in case the request takes longer than # the interval of the cron job (1 minute). Otherwise the scan might be started # a second time before the first one finished. save_network_scan_result(folder, result) try: if config.site_is_local(folder.site_id()): found = cmk.gui.watolib.network_scan.do_network_scan(folder) else: found = watolib.do_remote_automation(config.site(folder.site_id()), "network-scan", [("folder", folder.path())]) if not isinstance(found, list): raise MKGeneralException(_("Received an invalid network scan result: %r") % found) add_scanned_hosts_to_folder(folder, found) result.update({ "state": True, "output": _("The network scan found %d new hosts.") % len(found), }) except Exception as e: result.update({ "state": False, "output": _("An exception occured: %s") % e, }) logger.error("Exception in network scan:\n%s", traceback.format_exc()) result["end"] = time.time() save_network_scan_result(folder, result) if old_user: config.set_user_by_id(old_user)