def page(self): watolib.init_wato_datastructures(with_wato_lock=True) if not config.user.may('wato.diag_host'): raise MKAuthException(_('You are not permitted to perform this action.')) if not html.check_transaction(): raise MKAuthException(_("Invalid transaction")) request = self.webapi_request() hostname = request.get("host") if not hostname: raise MKGeneralException(_('The hostname is missing.')) host = watolib.Host.host(hostname) if not host: raise MKGeneralException(_('The given host does not exist.')) if host.is_cluster(): raise MKGeneralException(_('This view does not support cluster hosts.')) host.need_permission("read") _test = request.get('_test') if not _test: raise MKGeneralException(_('The test is missing.')) # Execute a specific test if _test not in dict(ModeDiagHost.diag_host_tests()).keys(): raise MKGeneralException(_('Invalid test.')) # TODO: Use ModeDiagHost._vs_rules() for processing/validation? args: List[str] = [u""] * 13 for idx, what in enumerate([ 'ipaddress', 'snmp_community', 'agent_port', 'snmp_timeout', 'snmp_retries', 'tcp_connect_timeout', ]): args[idx] = request.get(what, u"") if config.user.may('wato.add_or_modify_executables'): args[6] = request.get("datasource_program", "") if request.get("snmpv3_use"): snmpv3_use = { u"0": u"noAuthNoPriv", u"1": u"authNoPriv", u"2": u"authPriv", }.get(request.get("snmpv3_use", u""), u"") args[7] = snmpv3_use if snmpv3_use != u"noAuthNoPriv": snmpv3_auth_proto = { str(DropdownChoice.option_id("md5")): u"md5", str(DropdownChoice.option_id("sha")): u"sha" }.get(request.get("snmpv3_auth_proto", u""), u"") args[8] = snmpv3_auth_proto args[9] = request.get("snmpv3_security_name", u"") args[10] = request.get("snmpv3_security_password", u"") if snmpv3_use == "authPriv": snmpv3_privacy_proto = { str(DropdownChoice.option_id("DES")): u"DES", str(DropdownChoice.option_id("AES")): u"AES" }.get(request.get("snmpv3_privacy_proto", u""), u"") args[11] = snmpv3_privacy_proto args[12] = request.get("snmpv3_privacy_password", u"") else: args[9] = request.get("snmpv3_security_name", u"") result = watolib.check_mk_automation(host.site_id(), "diag-host", [hostname, _test] + args) return { "next_transid": html.transaction_manager.fresh_transid(), "status_code": result[0], "output": ensure_str(result[1], errors="replace"), }
def __init__(self): super().__init__() if not user.may("wato.rename_hosts"): raise MKGeneralException( _("You don't have the right to rename hosts"))
def __init__(self, perfometer: Perfometer, translated_metrics: TranslatedMetrics) -> None: super(MetricometerRendererLogarithmic, self).__init__(perfometer, translated_metrics) if self._perfometer is not None and "metric" not in self._perfometer: raise MKGeneralException( _("Missing key \"metric\" in logarithmic perfometer: %r") % self._perfometer)
def _wato_page_handler(current_mode: str, mode_permissions: List[PermissionName], mode_class: Type[WatoMode]) -> None: try: init_wato_datastructures(with_wato_lock=not html.is_transaction()) except Exception: # Snapshot must work in any case if current_mode == 'snapshot': pass else: raise # Check general permission for this mode if mode_permissions is not None and not config.user.may("wato.seeall"): _ensure_mode_permissions(mode_permissions) mode = mode_class() # Do actions (might switch mode) if html.is_transaction(): try: config.user.need_permission("wato.edit") # Even if the user has seen this mode because auf "seeall", # he needs an explicit access permission for doing changes: if config.user.may("wato.seeall"): if mode_permissions: _ensure_mode_permissions(mode_permissions) if cmk.gui.watolib.read_only.is_enabled( ) and not cmk.gui.watolib.read_only.may_override(): raise MKUserError(None, cmk.gui.watolib.read_only.message()) result = mode.action() if isinstance(result, (tuple, str, bool)): raise MKGeneralException( f"WatoMode \"{current_mode}\" returns unsupported return value: {result!r}") # We assume something has been modified and increase the config generation ID by one. update_config_generation() # Handle two cases: # a) Don't render the page content after action # (a confirm dialog is displayed by the action, or a non-HTML content was sent) # b) Redirect to another page if isinstance(result, FinalizeRequest): raise result except MKUserError as e: html.add_user_error(e.varname, str(e)) except MKAuthException as e: reason = e.args[0] html.add_user_error(None, reason) breadcrumb = make_main_menu_breadcrumb(mode.main_menu()) + mode.breadcrumb() page_menu = mode.page_menu(breadcrumb) wato_html_head(title=mode.title(), breadcrumb=breadcrumb, page_menu=page_menu, show_body_start=display_options.enabled(display_options.H), show_top_heading=display_options.enabled(display_options.T)) if not html.is_transaction() or (cmk.gui.watolib.read_only.is_enabled() and cmk.gui.watolib.read_only.may_override()): _show_read_only_warning() # Show outcome of failed action on this page if html.has_user_errors(): html.show_user_errors() # Show outcome of previous page (that redirected to this one) for message in get_flashed_messages(): html.show_message(message) # Show content mode.handle_page() if is_sidebar_reload_needed(): html.reload_whole_page() if config.wato_use_git and html.is_transaction(): do_git_commit() wato_html_footer(show_body_end=display_options.enabled(display_options.H))
def hex_color_to_rgb_color(color: str) -> Tuple[int, int, int]: """Convert '#112233' to (17, 34, 51)""" try: return int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16) except Exception: raise MKGeneralException(_("Invalid color specification '%s'") % color)
def declare_host_attribute(a, show_in_table=True, show_in_folder=True, show_in_host_search=True, topic=None, sort_index=None, show_in_form=True, depends_on_tags=None, depends_on_roles=None, editable=True, show_inherited_value=True, may_edit=None, from_config=False): if not issubclass(a, ABCHostAttribute): raise MKGeneralException( _("Failed to load legacy host attribute from local plugins: %r") % a) attrs = {} if depends_on_tags is not None: attrs["_depends_on_tags"] = depends_on_tags attrs["depends_on_tags"] = lambda self: self._depends_on_tags if depends_on_roles is not None: attrs["_depends_on_roles"] = depends_on_roles attrs["depends_on_roles"] = lambda self: self._depends_on_roles if topic is None or isinstance(topic, str): ident = str(topic).replace(" ", "_").lower() if topic else None attrs["_topic"] = _declare_host_attribute_topic(ident, topic) elif issubclass(topic, HostAttributeTopic): attrs["_topic"] = topic else: raise NotImplementedError() attrs["topic"] = lambda self: self._topic if may_edit is not None: attrs["_may_edit_func"] = (may_edit, ) attrs["may_edit"] = lambda self: self._may_edit_func[0]() if sort_index is not None: attrs["_sort_index"] = sort_index attrs["sort_index"] = classmethod(lambda c: c._sort_index) attrs.update({ "_show_in_table": show_in_table, "show_in_table": lambda self: self._show_in_table, "_show_in_folder": show_in_folder, "show_in_folder": lambda self: self._show_in_folder, "_show_in_host_search": show_in_host_search, "show_in_host_search": lambda self: self._show_in_host_search, "_show_in_form": show_in_form, "show_in_form": lambda self: self._show_in_form, "_show_inherited_value": show_inherited_value, "show_inherited_value": lambda self: self._show_inherited_value, "_editable": editable, "editable": lambda self: self._editable, "_from_config": from_config, "from_config": lambda self: self._from_config, }) # Apply the left over missing attributes that we get from the function arguments # by creating the final concrete class of this attribute final_class = type("%sConcrete" % a.__name__, (a, ), attrs) host_attribute_registry.register(final_class)
def get_snapshot_status(snapshot, validate_checksums=False, check_correct_core=True): if isinstance(snapshot, tuple): name, file_stream = snapshot else: name = snapshot file_stream = None # Defaults of available keys status: Dict[str, Any] = { "name": "", "total_size": 0, "type": None, "files": {}, "comment": "", "created_by": "", "broken": False, "progress_status": "", } def access_snapshot(handler): if file_stream: file_stream.seek(0) return handler(file_stream) return handler(snapshot_dir + name) def check_size(): if file_stream: file_stream.seek(0, os.SEEK_END) size = file_stream.tell() else: statinfo = os.stat(snapshot_dir + name) size = statinfo.st_size if size < 256: raise MKGeneralException(_("Invalid snapshot (too small)")) status["total_size"] = size def check_extension(): # Check snapshot extension: tar or tar.gz if name.endswith(".tar.gz"): status["type"] = "legacy" status["comment"] = _("Snapshot created with old version") elif not name.endswith(".tar"): raise MKGeneralException( _("Invalid snapshot (incorrect file extension)")) def check_content(): status["files"] = access_snapshot(_list_tar_content) if status.get("type") == "legacy": allowed_files = [ "%s.tar" % x[1] for x in _get_default_backup_domains() ] for tarname in status["files"]: if tarname not in allowed_files: raise MKGeneralException( _("Invalid snapshot (contains invalid tarfile %s)") % tarname) else: # new snapshots for entry in ["comment", "created_by", "type"]: if entry in status["files"]: def handler(x, entry=entry): return _get_file_content(x, entry).decode("utf-8") status[entry] = access_snapshot(handler) else: raise MKGeneralException( _("Invalid snapshot (missing file: %s)") % entry) def check_core(): if "check_mk.tar.gz" not in status["files"]: return cmk_tar = io.BytesIO( access_snapshot(lambda x: _get_file_content(x, "check_mk.tar.gz"))) files = _list_tar_content(cmk_tar) using_cmc = (cmk.utils.paths.omd_root / "etc/check_mk/conf.d/microcore.mk").exists() snapshot_cmc = "conf.d/microcore.mk" in files if using_cmc and not snapshot_cmc: raise MKGeneralException( _("You are currently using the Check_MK Micro Core, but this snapshot does not use the " "Check_MK Micro Core. If you need to migrate your data, you could consider changing " "the core, restoring the snapshot and changing the core back again." )) if not using_cmc and snapshot_cmc: raise MKGeneralException( _("You are currently not using the Check_MK Micro Core, but this snapshot uses the " "Check_MK Micro Core. If you need to migrate your data, you could consider changing " "the core, restoring the snapshot and changing the core back again." )) def check_checksums(): for f in status["files"].values(): f["checksum"] = None # checksums field might contain three states: # a) None - This is a legacy snapshot, no checksum file available # b) False - No or invalid checksums # c) True - Checksums successfully validated if status["type"] == "legacy": status["checksums"] = None return if "checksums" not in status["files"]: status["checksums"] = False return # Extract all available checksums from the snapshot checksums_raw = access_snapshot( lambda x: _get_file_content(x, "checksums")) checksums = {} for l in checksums_raw.split("\n"): line = l.strip() if " " in line: parts = line.split(" ") if len(parts) == 3: checksums[parts[0]] = (parts[1], parts[2]) # now loop all known backup domains and check wheter or not they request # checksum validation, there is one available and it is valid status["checksums"] = True for domain_id, domain in backup_domains.items(): filename = domain_id + ".tar.gz" if not domain.get("checksum", True) or filename not in status["files"]: continue if filename not in checksums: continue checksum, signed = checksums[filename] # Get hashes of file in question def handler(x, filename=filename): return _get_file_content(x, filename) subtar = access_snapshot(handler) subtar_hash = sha256(subtar).hexdigest() subtar_signed = sha256(subtar_hash.encode() + _snapshot_secret()).hexdigest() status["files"][filename]["checksum"] = (checksum == subtar_hash and signed == subtar_signed) status["checksums"] &= status["files"][filename]["checksum"] try: if len(name) > 35: status["name"] = "%s %s" % (name[14:24], name[25:33].replace( "-", ":")) else: status["name"] = name if not file_stream: # Check if the snapshot build is still in progress... path_status = "%s/workdir/%s/%s.status" % (snapshot_dir, name, name) path_pid = "%s/workdir/%s/%s.pid" % (snapshot_dir, name, name) # Check if this process is still running if os.path.exists(path_pid): with Path(path_pid).open(encoding="utf-8") as f: pid = int(f.read()) if not os.path.exists("/proc/%d" % pid): status["progress_status"] = _( "ERROR: Snapshot progress no longer running!") raise MKGeneralException( _("Error: The process responsible for creating the snapshot is no longer running!" )) status["progress_status"] = _( "Snapshot build currently in progress") # Read snapshot status file (regularly updated by snapshot process) if os.path.exists(path_status): with Path(path_status).open(encoding="utf-8") as f: lines = f.readlines() status["comment"] = lines[0].split(":", 1)[1] file_info = {} for filename in lines[1:]: name, info = filename.split(":", 1) text, size = info[:-1].split(":", 1) file_info[name] = {"size": int(size), "text": text} status["files"] = file_info return status # Snapshot exists and is finished - do some basic checks check_size() check_extension() check_content() if check_correct_core: check_core() if validate_checksums: check_checksums() except Exception as e: if config.debug: status["broken_text"] = traceback.format_exc() status["broken"] = True else: status["broken_text"] = "%s" % e status["broken"] = True return status
def _perform_tests(self): test_sites = self._analyze_sites() self._logger.debug("Executing tests for %d sites" % len(test_sites)) results_by_site = {} # Results are fetched simultaneously from the remote sites result_queue = multiprocessing.JoinableQueue() processes = [] for site_id in test_sites: process = multiprocessing.Process(target=self._perform_tests_for_site, args=(site_id, result_queue)) process.start() processes.append((site_id, process)) # Now collect the results from the queue until all processes are finished while any([p.is_alive() for site_id, p in processes]): try: site_id, results_data = result_queue.get_nowait() result_queue.task_done() result = ast.literal_eval(results_data) if result["state"] == 1: raise MKGeneralException(result["response"]) elif result["state"] == 0: test_results = [] for result_data in result["response"]: result = ACResult.from_repr(result_data) test_results.append(result) results_by_site[site_id] = test_results else: raise NotImplementedError() except Queue.Empty: time.sleep(0.5) # wait some time to prevent CPU hogs except Exception as e: logger.exception("error analyzing configuration for site %s", site_id) html.show_error("%s: %s" % (site_id, e)) self._logger.debug("Got test results") # Group results by category in first instance and then then by test results_by_category = {} for site_id, results in results_by_site.items(): for result in results: category_results = results_by_category.setdefault(result.category, {}) test_results_by_site = category_results.setdefault(result.test_id, { "site_results": {}, "test": { "title": result.title, "help": result.help, } }) test_results_by_site["site_results"][result.site_id] = result return results_by_category
def host_service_graph_dashlet_cmk( graph_identification: GraphIdentifier, custom_graph_render_options, ): graph_render_options = {**default_dashlet_graph_render_options} graph_render_options = artwork.add_default_render_options( graph_render_options) graph_render_options.update(custom_graph_render_options) width_var = request.get_float_input_mandatory("width", 0.0) width = int((width_var / html_size_per_ex)) height_var = request.get_float_input_mandatory("height", 0.0) height = int((height_var / html_size_per_ex)) bounds = _graph_margin_ex(graph_render_options) height -= _graph_title_height_ex(graph_render_options) height -= bounds.top + bounds.bottom width -= bounds.left + bounds.right graph_render_options["size"] = (width, height) timerange = json.loads(request.get_str_input_mandatory("timerange")) if isinstance(timerange, list): end_time = timerange[1] start_time = timerange[0] else: end_time = time.time() start_time = end_time - float(timerange) graph_data_range = { "time_range": (start_time, end_time), } graph_data_range["step"] = estimate_graph_step_for_html( graph_data_range["time_range"], graph_render_options) graph_recipes = resolve_graph_recipe_with_error_handling( graph_identification, destination=GraphDestinations.dashlet, ) if not isinstance(graph_recipes, list): return graph_recipes # This is to html.write the exception if graph_recipes: graph_recipe = graph_recipes[0] else: raise MKGeneralException(_("Failed to calculate a graph recipe.")) # When the legend is enabled, we need to reduce the height by the height of the legend to # make the graph fit into the dashlet area. if graph_render_options["show_legend"]: # TODO FIXME: This graph artwork is calulated twice. Once here and once in render_graphs_from_specification_html() graph_artwork = artwork.compute_graph_artwork(graph_recipe, graph_data_range, graph_render_options) if graph_artwork["curves"]: legend_height = graph_legend_height_ex(graph_render_options, graph_artwork) graph_render_options["size"] = (width, height - legend_height) html_code = render_graphs_from_definitions([graph_recipe], graph_data_range, graph_render_options, render_async=False) html.write_html(html_code)
def evaluate_time_series_expression(expression, rrd_data): if rrd_data: num_points = len(next(iter(rrd_data.values()))) else: num_points = 1 if expression[0] == "operator": operator_id, operands = expression[1:] operands_evaluated = [ evaluate_time_series_expression(a, rrd_data) for a in operands ] return time_series_math(operator_id, operands_evaluated) if expression[0] == "transformation": (transform, conf), operands = expression[1:] operands_evaluated = evaluate_time_series_expression( operands[0], rrd_data) if transform == 'percentile': return time_series_operator_perc(operands_evaluated, conf) if transform == 'filter_top': if isinstance(operands_evaluated, TimeSeries): return operands_evaluated return operands_evaluated[:conf["amount"]] if transform == 'value_sort': if isinstance(operands_evaluated, TimeSeries): return operands_evaluated aggr_func = { "min": lambda x: min(x or [0]), "max": lambda x: max(x or [0]), "average": lambda x: sum(x) / float(len(x) or 1), }[conf['aggregation']] orderlist = sorted(operands_evaluated, key=lambda metric: aggr_func( clean_time_series_point(metric[3])), reverse=conf["reverse"]) # fix multi-line stack line styling if orderlist[0][0] == 'stack': line_types = ['area'] + ['stack'] * (len(orderlist) - 1) orderlist = [(lt, ) + metric[1:] for lt, metric in zip(line_types, orderlist)] return orderlist if transform == 'forecast': if cmk_version.is_raw_edition(): raise MKGeneralException( _("Forecast calculations are only available with the " "Checkmk Enterprise Editions")) # Suppression is needed to silence pylint in CRE environment from cmk.gui.cee.plugins.metrics.forecasts import time_series_transform_forecast # pylint: disable=no-name-in-module return time_series_transform_forecast( TimeSeries(operands_evaluated, rrd_data['__range']), conf) if expression[0] == "rrd": key = tuple(expression[1:]) if key in rrd_data: return rrd_data[key] return [None] * num_points if expression[0] == "constant": return [expression[1]] * num_points if expression[0] == "combined" and not cmk_version.is_raw_edition(): # Suppression is needed to silence pylint in CRE environment from cmk.gui.cee.plugins.metrics.graphs import resolve_combined_single_metric_spec # pylint: disable=no-name-in-module metrics = resolve_combined_single_metric_spec(expression[1]) return [(m["line_type"], m["color"], m['title'], evaluate_time_series_expression(m['expression'], rrd_data)) for m in metrics] raise NotImplementedError()
def mk_eval(s: Union[bytes, str]) -> Any: try: return ast.literal_eval(ensure_str(base64.b64decode(s))) except Exception: raise MKGeneralException( _('Unable to parse provided data: %s') % html.render_text(repr(s)))
def _verify_host_type(cls, host): if not host.is_cluster(): raise MKGeneralException( _("Can not clone a regular host as cluster host"))
def activate(self): raise MKGeneralException( _('The domain "%s" does not support activation.') % self.ident())
def evaluate_timeseries_transformation(transform, conf, operands_evaluated): raise MKGeneralException( _("Metric transformations and combinations like Forecasts calculations, " "aggregations and filtering are only available with the " "Checkmk Enterprise Editions"))
def registration_hook(self, plugin_class): ident = plugin_class().ident if ident == "aggr_group": return # TODO: Allow this broken thing for the moment if "_" in ident: raise MKGeneralException("Underscores must not be used in info names: %s" % ident)
def page_graph(): host_name = HostName(request.get_str_input_mandatory("host")) service = request.get_str_input_mandatory("service") dsname = request.get_str_input_mandatory("dsname") breadcrumb = make_service_breadcrumb(host_name, service) html.header( _("Prediction for %s - %s - %s") % (host_name, service, dsname), breadcrumb) # Get current value from perf_data via Livestatus current_value = get_current_perfdata(host_name, service, dsname) prediction_store = prediction.PredictionStore(host_name, service, dsname) timegroup, choices = _load_prediction_information( tg_name=request.var("timegroup"), prediction_store=prediction_store, ) html.begin_form("prediction") html.write_text(_("Show prediction for ")) html.dropdown("timegroup", choices, deflt=timegroup.name, onchange="document.prediction.submit();") html.hidden_fields() html.end_form() # Get prediction data tg_data = prediction_store.get_data(timegroup.name) if tg_data is None: raise MKGeneralException(_("Missing prediction data.")) swapped = swap_and_compute_levels(tg_data, timegroup.params) vertical_range = compute_vertical_range(swapped) legend = [ ("#000000", _("Reference")), ("#ffffff", _("OK area")), ("#ffff00", _("Warning area")), ("#ff0000", _("Critical area")), ] if current_value is not None: legend.append(("#0000ff", _("Current value: %.2f") % current_value)) create_graph(timegroup.name, graph_size, timegroup.range, vertical_range, legend) if "levels_upper" in timegroup.params: render_dual_area(swapped["upper_warn"], swapped["upper_crit"], "#fff000", 0.4) render_area_reverse(swapped["upper_crit"], "#ff0000", 0.1) if "levels_lower" in timegroup.params: render_dual_area(swapped["lower_crit"], swapped["lower_warn"], "#fff000", 0.4) render_area(swapped["lower_crit"], "#ff0000", 0.1) vscala_low = vertical_range[0] vscala_high = vertical_range[1] vert_scala = compute_vertical_scala(vscala_low, vscala_high) time_scala = [[timegroup.range[0] + i * 3600, "%02d:00" % i] for i in range(0, 25, 2)] render_coordinates(vert_scala, time_scala) if "levels_lower" in timegroup.params: render_dual_area(swapped["average"], swapped["lower_warn"], "#ffffff", 0.5) render_curve(swapped["lower_warn"], "#e0e000", square=True) render_curve(swapped["lower_crit"], "#f0b0a0", square=True) if "levels_upper" in timegroup.params: render_dual_area(swapped["upper_warn"], swapped["average"], "#ffffff", 0.5) render_curve(swapped["upper_warn"], "#e0e000", square=True) render_curve(swapped["upper_crit"], "#f0b0b0", square=True) render_curve(swapped["average"], "#000000") render_curve(swapped["average"], "#000000") # repetition makes line bolder # Try to get current RRD data and render it also from_time, until_time = timegroup.range now = time.time() if from_time <= now <= until_time: timeseries = prediction.get_rrd_data(host_name, service, dsname, "MAX", from_time, until_time) rrd_data = timeseries.values render_curve(rrd_data, "#0000ff", 2) if current_value is not None: rel_time = (now - prediction.timezone_at(now)) % timegroup.slice render_point(timegroup.range[0] + rel_time, current_value, "#0000ff") html.footer()
def page(self): check_csrf_token() if not user.may("wato.diag_host"): raise MKAuthException(_("You are not permitted to perform this action.")) if not transactions.check_transaction(): raise MKAuthException(_("Invalid transaction")) api_request = self.webapi_request() hostname = api_request.get("host") if not hostname: raise MKGeneralException(_("The hostname is missing.")) host = Host.host(hostname) if not host: raise MKGeneralException(_("The given host does not exist.")) if host.is_cluster(): raise MKGeneralException(_("This view does not support cluster hosts.")) host.need_permission("read") _test = api_request.get("_test") if not _test: raise MKGeneralException(_("The test is missing.")) # Execute a specific test if _test not in dict(ModeDiagHost.diag_host_tests()): raise MKGeneralException(_("Invalid test.")) # TODO: Use ModeDiagHost._vs_rules() for processing/validation? args: List[str] = [""] * 13 for idx, what in enumerate( [ "ipaddress", "snmp_community", "agent_port", "snmp_timeout", "snmp_retries", "tcp_connect_timeout", ] ): args[idx] = api_request.get(what, "") if api_request.get("snmpv3_use"): snmpv3_use = { "0": "noAuthNoPriv", "1": "authNoPriv", "2": "authPriv", }.get(api_request.get("snmpv3_use", ""), "") args[7] = snmpv3_use if snmpv3_use != "noAuthNoPriv": snmpv3_auth_proto = { str(DropdownChoice.option_id("md5")): "md5", str(DropdownChoice.option_id("sha")): "sha", }.get(api_request.get("snmpv3_auth_proto", ""), "") args[8] = snmpv3_auth_proto args[9] = api_request.get("snmpv3_security_name", "") args[10] = api_request.get("snmpv3_security_password", "") if snmpv3_use == "authPriv": snmpv3_privacy_proto = { str(DropdownChoice.option_id("DES")): "DES", str(DropdownChoice.option_id("AES")): "AES", }.get(api_request.get("snmpv3_privacy_proto", ""), "") args[11] = snmpv3_privacy_proto args[12] = api_request.get("snmpv3_privacy_password", "") else: args[9] = api_request.get("snmpv3_security_name", "") result = diag_host( host.site_id(), hostname, _test, *args, ) return { "next_transid": transactions.fresh_transid(), "status_code": result.return_code, "output": result.response, }
def parse_file(site, host_name, file_name, hidecontext=False): log_chunks: List[Dict[str, Any]] = [] try: chunk: Optional[Dict[str, Any]] = None lines = get_logfile_lines(site, host_name, file_name) if lines is None: return None # skip hash line. this doesn't exist in older files while lines and lines[0].startswith('#'): lines = lines[1:] for line in lines: line = line.strip() if line == '': continue if line[:3] == '<<<': # new chunk begins log_lines: List[Dict[str, Any]] = [] chunk = {'lines': log_lines} log_chunks.append(chunk) # New header line date, logtime, level = line[3:-3].split(' ') # Save level as integer to make it better comparable if level == 'CRIT': chunk['level'] = 2 elif level == 'WARN': chunk['level'] = 1 elif level == 'OK': chunk['level'] = 0 else: chunk['level'] = 0 # Gather datetime object # Python versions below 2.5 don't provide datetime.datetime.strptime. # Use the following instead: #chunk['datetime'] = datetime.datetime.strptime(date + ' ' + logtime, "%Y-%m-%d %H:%M:%S") chunk['datetime'] = datetime.datetime( *time.strptime(date + ' ' + logtime, "%Y-%m-%d %H:%M:%S")[0:5]) elif chunk: # else: not in a chunk?! # Data line line_display = line[2:] # Classify the line for styling if line[0] == 'W': line_level = 1 line_class = 'WARN' elif line[0] == 'u': line_level = 1 line_class = 'WARN' elif line[0] == 'C': line_level = 2 line_class = 'CRIT' elif line[0] == 'O': line_level = 0 line_class = 'OK' elif not hidecontext: line_level = 0 line_class = 'context' else: continue # ignore this line log_lines.append({'level': line_level, 'class': line_class, 'line': line_display}) except Exception as e: if config.debug: raise raise MKGeneralException( html.render_text(_("Cannot parse log file %s: %s") % (file_name, e))) return log_chunks
def host_service_graph_dashlet_cmk(graph_identification, custom_graph_render_options): graph_render_options = {**default_dashlet_graph_render_options} graph_render_options = artwork.add_default_render_options( graph_render_options) graph_render_options.update(custom_graph_render_options) width_var = html.request.get_float_input_mandatory("width", 0.0) width = int((width_var / html_size_per_ex)) height_var = html.request.get_float_input_mandatory("height", 0.0) height = int((height_var / html_size_per_ex)) bounds = _graph_margin_ex(graph_render_options) height -= _graph_title_height_ex(graph_render_options) height -= bounds.top + bounds.bottom width -= bounds.left + bounds.right graph_render_options["size"] = (width, height) # The timerange is specified in PNP like manner. range_secs = { "0": 4 * 3600, "1": 25 * 3600, "2": 7 * 86400, "3": 31 * 86400, "4": 366 * 86400, } secs_var = html.request.var("timerange") if secs_var not in range_secs: secs = 4 * 3600 else: secs = range_secs[secs_var] end_time = time.time() start_time = end_time - secs graph_data_range = { "time_range": (start_time, end_time), } graph_data_range["step"] = estimate_graph_step_for_html( graph_data_range["time_range"], graph_render_options) graph_recipes = resolve_graph_recipe(graph_identification, destination=GraphDestinations.dashlet) if not isinstance(graph_recipes, list): return graph_recipes # This is to html.write the exception if graph_recipes: graph_recipe = graph_recipes[0] else: raise MKGeneralException(_("Failed to calculate a graph recipe.")) # When the legend is enabled, we need to reduce the height by the height of the legend to # make the graph fit into the dashlet area. if graph_render_options["show_legend"]: # TODO FIXME: This graph artwork is calulated twice. Once here and once in render_graphs_from_specification_html() graph_artwork = artwork.compute_graph_artwork(graph_recipe, graph_data_range, graph_render_options) if graph_artwork["curves"]: legend_height = graph_legend_height_ex(graph_render_options, graph_artwork) graph_render_options["size"] = (width, height - legend_height) html_code = render_graphs_from_definitions([graph_recipe], graph_data_range, graph_render_options, render_async=False) html.write(html_code)
def _perform_tests(self): test_sites = self._analyze_sites() self._logger.debug("Executing tests for %d sites" % len(test_sites)) results_by_site = {} # Results are fetched simultaneously from the remote sites result_queue = multiprocessing.JoinableQueue( ) # type: multiprocessing.Queue[Tuple[SiteId, str]] processes = [] site_id = SiteId("unknown_site") for site_id in test_sites: process = multiprocessing.Process( target=self._perform_tests_for_site, args=(site_id, result_queue)) process.start() processes.append((site_id, process)) # Now collect the results from the queue until all processes are finished while any(p.is_alive() for site_id, p in processes): try: site_id, results_data = result_queue.get_nowait() result_queue.task_done() result = ast.literal_eval(results_data) if result["state"] == 1: raise MKGeneralException(result["response"]) if result["state"] == 0: test_results = [] for result_data in result["response"]: result = ACResult.from_repr(result_data) test_results.append(result) # Add general connectivity result result = ACResultOK(_("No connectivity problems")) result.from_test(ACTestConnectivity()) result.site_id = site_id test_results.append(result) results_by_site[site_id] = test_results else: raise NotImplementedError() except six.moves.queue.Empty: time.sleep(0.5) # wait some time to prevent CPU hogs except Exception as e: result = ACResultCRIT("%s" % e) result.from_test(ACTestConnectivity()) result.site_id = site_id results_by_site[site_id] = [result] logger.exception("error analyzing configuration for site %s", site_id) self._logger.debug("Got test results") # Group results by category in first instance and then then by test results_by_category = {} # type: Dict[str, Dict[str, Dict[str, Any]]] for site_id, results in results_by_site.items(): for result in results: category_results = results_by_category.setdefault( result.category, {}) test_results_by_site = category_results.setdefault( result.test_id, { "site_results": {}, "test": { "title": result.title, "help": result.help, } }) test_results_by_site["site_results"][result.site_id] = result return results_by_category
def _do_create_snapshot(data): snapshot_name = data["snapshot_name"] work_dir = snapshot_dir.rstrip("/") + "/workdir/%s" % snapshot_name try: if not os.path.exists(work_dir): os.makedirs(work_dir) # Open / initialize files filename_target = "%s/%s" % (snapshot_dir, snapshot_name) filename_work = "%s/%s.work" % (work_dir, snapshot_name) with open(filename_target, "wb"): pass def get_basic_tarinfo(name): tarinfo = tarfile.TarInfo(name) tarinfo.mtime = int(time.time()) tarinfo.uid = 0 tarinfo.gid = 0 tarinfo.mode = 0o644 tarinfo.type = tarfile.REGTYPE return tarinfo # Initialize the snapshot tar file and populate with initial information tar_in_progress = tarfile.open(filename_work, "w") # pylint:disable=consider-using-with for key in ["comment", "created_by", "type"]: tarinfo = get_basic_tarinfo(key) encoded_value = data[key].encode("utf-8") tarinfo.size = len(encoded_value) tar_in_progress.addfile(tarinfo, io.BytesIO(encoded_value)) tar_in_progress.close() # Process domains (sorted) subtar_info = {} for name, info in sorted(_get_default_backup_domains().items()): prefix = info.get("prefix", "") filename_subtar = "%s.tar.gz" % name path_subtar = "%s/%s" % (work_dir, filename_subtar) paths = [ "." if x[1] == "" else x[1] for x in info.get("paths", []) ] command = [ "tar", "czf", path_subtar, "--ignore-failed-read", "--force-local", "-C", prefix, ] + paths proc_create = subprocess.Popen( # pylint:disable=consider-using-with command, stdin=None, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=prefix, encoding="utf-8", ) _stdout, stderr = proc_create.communicate() exit_code = proc_create.wait() # Allow exit codes 0 and 1 (files changed during backup) if exit_code not in [0, 1]: raise MKGeneralException( "Error while creating backup of %s (Exit Code %d) - %s.\n%s" % (name, exit_code, stderr, command)) with open(path_subtar, "rb") as subtar: subtar_hash = sha256(subtar.read()).hexdigest() subtar_signed = sha256(subtar_hash.encode() + _snapshot_secret()).hexdigest() subtar_info[filename_subtar] = (subtar_hash, subtar_signed) # Append tar.gz subtar to snapshot command = [ "tar", "--append", "--file=" + filename_work, filename_subtar ] proc_append = subprocess.Popen( # pylint:disable=consider-using-with command, cwd=work_dir, close_fds=True, ) proc_append.communicate() exit_code = proc_append.wait() if os.path.exists(filename_subtar): os.unlink(filename_subtar) if exit_code != 0: raise MKGeneralException( "Error on adding backup domain %s to tarfile" % name) # Now add the info file which contains hashes and signed hashes for # each of the subtars info = "".join( ["%s %s %s\n" % (k, v[0], v[1]) for k, v in subtar_info.items()]) + "\n" tar_in_progress = tarfile.open(filename_work, "a") # pylint:disable=consider-using-with tarinfo = get_basic_tarinfo("checksums") tarinfo.size = len(info) tar_in_progress.addfile(tarinfo, io.BytesIO(info.encode())) tar_in_progress.close() shutil.move(filename_work, filename_target) finally: shutil.rmtree(work_dir)
def page(self): # growth_root_nodes: a list of mandatory hostnames # mesh_depth: number of hops from growth root # growth_forbidden: block further traversal at the given nodes # growth_continue_nodes: expand these nodes, event if the depth has been reached try: topology_config = json.loads(html.request.var("topology_config")) except (TypeError, ValueError): raise MKGeneralException( _("Invalid topology_config %r") % html.request.var("topology_config")) topology = self._get_topology_instance(topology_config) meshes = topology.compute() topology_info = {"topology_meshes": {}} topology_info = { "topology_chunks": {}, } topology_info["headline"] = topology.title() topology_info["errors"] = topology.errors() topology_info["max_nodes"] = topology.max_nodes topology_info["mesh_depth"] = topology.mesh_depth for mesh in meshes: if not mesh: continue # Pick root host growth_roots = sorted( mesh.intersection(set(topology_config["growth_root_nodes"]))) if growth_roots: mesh_root = growth_roots[0] else: mesh_root = list(mesh)[0] mesh_info = topology.get_info_for_host(mesh_root, mesh) mesh.remove(mesh_root) mesh = sorted(list(mesh)) mesh.insert(0, mesh_root) if mesh: mesh_info["children"] = [] mesh_info["children"].extend( [topology.get_info_for_host(x, mesh) for x in mesh[1:]]) mesh_links = set() # Incoming connections for idx, hostname in enumerate(mesh): for child in topology.get_host_incoming(hostname): if child in mesh: mesh_links.add((mesh.index(child), idx)) # Outgoing connections for idx, hostname in enumerate(mesh): for parent in topology.get_host_outgoing(hostname): if parent in mesh: mesh_links.add((idx, mesh.index(parent))) topology_info["topology_chunks"][mesh_root] = { "layout": { "config": { "line_config": { "style": "straight", "dashed": True, } } }, "hierarchy": mesh_info, "links": list(mesh_links) } return topology_info
def get_graph_template(template_id): if template_id.startswith("METRIC_"): return generic_graph_template(template_id[7:]) if template_id in graph_info: return graph_info[template_id] raise MKGeneralException(_("There is no graph template with the id '%s'") % template_id)
def _transform_builtin_dashboards() -> None: if 'builtin_dashboards_transformed' in g: return # Only do this once for name, dashboard in builtin_dashboards.items(): # Do not transform dashboards which are already in the new format if 'context' in dashboard: continue # Transform the dashlets for nr, dashlet in enumerate(dashboard['dashlets']): dashlet.setdefault('show_title', True) if dashlet.get('url', '').startswith('dashlet_hoststats') or \ dashlet.get('url', '').startswith('dashlet_servicestats'): # hoststats and servicestats dashlet['type'] = dashlet['url'][8:].split('.', 1)[0] if '?' in dashlet['url']: # Transform old parameters: # wato_folder # host_contact_group # service_contact_group paramstr = dashlet['url'].split('?', 1)[1] dashlet['context'] = {} for key, val in [ p.split('=', 1) for p in paramstr.split('&') ]: if key == 'host_contact_group': dashlet['context']['opthost_contactgroup'] = { 'neg_opthost_contact_group': '', 'opthost_contact_group': val, } elif key == 'service_contact_group': dashlet['context']['optservice_contactgroup'] = { 'neg_optservice_contact_group': '', 'optservice_contact_group': val, } elif key == 'wato_folder': dashlet['context']['wato_folder'] = { 'wato_folder': val, } del dashlet['url'] elif dashlet.get('urlfunc') and not isinstance( dashlet['urlfunc'], str): raise MKGeneralException( _('Unable to transform dashlet %d of dashboard %s: ' 'the dashlet is using "urlfunc" which can not be ' 'converted automatically.') % (nr, name)) elif dashlet.get('url', '') != '' or dashlet.get( 'urlfunc') or dashlet.get('iframe'): # Normal URL based dashlet dashlet['type'] = 'url' if dashlet.get('iframe'): dashlet['url'] = dashlet['iframe'] del dashlet['iframe'] elif dashlet.get('view', '') != '': # Transform views # There might be more than the name in the view definition view_name = dashlet['view'].split('&')[0] # Copy the view definition into the dashlet copy_view_into_dashlet(dashlet, nr, view_name, load_from_all_views=True) del dashlet['view'] else: raise MKGeneralException( _('Unable to transform dashlet %d of dashboard %s. ' 'You will need to migrate it on your own. Definition: %r' ) % (nr, name, escaping.escape_attribute(dashlet))) dashlet.setdefault('context', {}) dashlet.setdefault('single_infos', []) # the modification time of builtin dashboards can not be checked as on user specific # dashboards. Set it to 0 to disable the modification chech. dashboard.setdefault('mtime', 0) dashboard.setdefault('show_title', True) if dashboard['title'] is None: dashboard['title'] = _('No title') dashboard['show_title'] = False dashboard.setdefault('single_infos', []) dashboard.setdefault('context', {}) dashboard.setdefault('topic', _('Overview')) dashboard.setdefault('description', dashboard.get('title', '')) g.builtin_dashboards_transformed = True
def activate(self): raise MKGeneralException( _("The domain \"%s\" does not support activation.") % self.ident())
def results(self) -> List[ACResult]: if not self._executed: raise MKGeneralException(_("The test has not been executed yet")) return self._results
def _verify_not_using_threaded_mpm(self) -> None: if self.is_multithread: raise MKGeneralException( _("You are trying to Checkmk together with a threaded Apache multiprocessing module (MPM). " "Check_MK is only working with the prefork module. Please change the MPM module to make " "Check_MK work."))
def execute_network_scan_job() -> None: init_wato_datastructures(with_wato_lock=True) if watolib.is_wato_slave_site(): return # Don't execute this job on slaves. folder = find_folder_to_scan() if not folder: return # Nothing to do. # We need to have the context of the user. The jobs are executed when # config.set_user_by_id() has not been executed yet. So there is no user context # available. Use the run_as attribute from the job config and revert # the previous state after completion. old_user = config.user.id run_as = folder.attribute("network_scan")["run_as"] if not userdb.user_exists(run_as): raise MKGeneralException( _("The user %s used by the network " "scan of the folder %s does not exist.") % (run_as, folder.title())) config.set_user_by_id(folder.attribute("network_scan")["run_as"]) result: NetworkScanResult = { "start": time.time(), "end": True, # means currently running "state": None, "output": "The scan is currently running.", } # Mark the scan in progress: Is important in case the request takes longer than # the interval of the cron job (1 minute). Otherwise the scan might be started # a second time before the first one finished. save_network_scan_result(folder, result) try: if config.site_is_local(folder.site_id()): found = cmk.gui.watolib.network_scan.do_network_scan(folder) else: found = watolib.do_remote_automation(config.site(folder.site_id()), "network-scan", [("folder", folder.path())]) if not isinstance(found, list): raise MKGeneralException( _("Received an invalid network scan result: %r") % found) add_scanned_hosts_to_folder(folder, found) result.update({ "state": True, "output": _("The network scan found %d new hosts.") % len(found), }) except Exception as e: result.update({ "state": False, "output": _("An exception occured: %s") % e, }) logger.error("Exception in network scan:\n%s", traceback.format_exc()) result["end"] = time.time() save_network_scan_result(folder, result) if old_user: config.set_user_by_id(old_user)
def extract_domains(tar, domains): # type: (tarfile.TarFile, Dict[str, DomainSpec]) -> None tar_domains = {} for member in tar.getmembers(): try: if member.name.endswith(".tar.gz"): tar_domains[member.name[:-7]] = member except Exception: pass # We are using the var_dir, because tmp_dir might not have enough space restore_dir = cmk.utils.paths.var_dir + "/wato/snapshots/restore_snapshot" if not os.path.exists(restore_dir): os.makedirs(restore_dir) def check_domain(domain, tar_member): # type: (DomainSpec, tarfile.TarInfo) -> List[Text] errors = [] prefix = domain["prefix"] def check_exists_or_writable(path_tokens): # type: (List[str]) -> bool if not path_tokens: return False if os.path.exists("/".join(path_tokens)): if os.access("/".join(path_tokens), os.W_OK): return True # exists and writable errors.append( _("Permission problem: Path not writable %s") % "/".join(path_tokens)) return False # not writable return check_exists_or_writable(path_tokens[:-1]) # The complete tar file never fits in stringIO buffer.. tar.extract(tar_member, restore_dir) # Older versions of python tarfile handle empty subtar archives :( # This won't work: subtar = tarfile.open("%s/%s" % (restore_dir, tar_member.name)) p = subprocess.Popen( ["tar", "tzf", "%s/%s" % (restore_dir, tar_member.name)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) stdout, stderr = p.communicate() if stderr: errors.append(_("Contains corrupt file %s") % tar_member.name) return errors for line in stdout: full_path = prefix + "/" + line path_tokens = full_path.split("/") check_exists_or_writable(path_tokens) # Cleanup os.unlink("%s/%s" % (restore_dir, tar_member.name)) return errors def cleanup_domain(domain): # type: (DomainSpec) -> List[Text] # Some domains, e.g. authorization, do not get a cleanup if domain.get("cleanup") is False: return [] def path_valid(prefix, path): # type: (str, str) -> bool if path.startswith("/") or path.startswith(".."): return False return True # Remove old stuff for what, path in domain.get("paths", {}): if not path_valid(domain["prefix"], path): continue full_path = "%s/%s" % (domain["prefix"], path) if os.path.exists(full_path): if what == "dir": exclude_files = [] for pattern in domain.get("exclude", []): if "*" in pattern: exclude_files.extend( glob.glob("%s/%s" % (domain["prefix"], pattern))) else: exclude_files.append("%s/%s" % (domain["prefix"], pattern)) cleanup_dir(full_path, exclude_files) else: os.remove(full_path) return [] def extract_domain(domain, tar_member): # type: (DomainSpec, tarfile.TarInfo) -> List[Text] try: target_dir = domain.get("prefix") if not target_dir: return [] # The complete tar.gz file never fits in stringIO buffer.. tar.extract(tar_member, restore_dir) command = [ "tar", "xzf", "%s/%s" % (restore_dir, tar_member.name), "-C", target_dir ] p = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) _stdout, stderr = p.communicate() exit_code = p.wait() if exit_code: return ["%s - %s" % (domain["title"], stderr)] except Exception as e: return ["%s - %s" % (domain["title"], str(e))] return [] def execute_restore(domain, is_pre_restore=True): # type: (DomainSpec, bool) -> List[Text] if is_pre_restore: if "pre_restore" in domain: return domain["pre_restore"]() else: if "post_restore" in domain: return domain["post_restore"]() return [] total_errors = [] logger.info("Restoring snapshot: %s", tar.name) logger.info("Domains: %s", ", ".join(tar_domains.keys())) for what, abort_on_error, handler in [ ("Permissions", True, check_domain), ("Pre-Restore", True, lambda domain, tar_member: execute_restore( domain, is_pre_restore=True)), ("Cleanup", False, lambda domain, tar_member: cleanup_domain(domain)), ("Extract", False, extract_domain), ("Post-Restore", False, lambda domain, tar_member: execute_restore( domain, is_pre_restore=False)) ]: errors = [] # type: List[Text] for name, tar_member in tar_domains.items(): if name in domains: try: dom_errors = handler(domains[name], tar_member) errors.extend(dom_errors or []) except Exception: # This should NEVER happen err_info = "Restore-Phase: %s, Domain: %s\nError: %s" % ( what, name, traceback.format_exc()) errors.append(err_info) logger.critical(err_info) if not abort_on_error: # At this state, the restored data is broken. # We still try to apply the rest of the snapshot # Hopefully the log entry helps in identifying the problem.. logger.critical( "Snapshot restore FAILED! (possible loss of snapshot data)" ) continue break if errors: if what == "Permissions": errors = list(set(errors)) errors.append( _("<br>If there are permission problems, please ensure the site user has write permissions." )) if abort_on_error: raise MKGeneralException( _("%s - Unable to restore snapshot:<br>%s") % (what, "<br>".join(errors))) total_errors.extend(errors) # Cleanup wipe_directory(restore_dir) if total_errors: raise MKGeneralException( _("Errors on restoring snapshot:<br>%s") % "<br>".join(total_errors))
def _show_configuration_variables(self, groups): search = self._search at_least_one_painted = False html.open_div(class_="globalvars") for group in sorted(groups, key=lambda g: g.sort_index()): header_is_painted = False # needed for omitting empty groups for config_variable_class in group.config_variables(): config_variable = config_variable_class() varname = config_variable.ident() valuespec = config_variable.valuespec() if not config_variable.domain().enabled(): continue if config_variable.domain( ) == watolib.ConfigDomainCore and varname not in self._default_values: if config.debug: raise MKGeneralException( "The configuration variable <tt>%s</tt> is unknown to " "your local Check_MK installation" % varname) continue if not config_variable.in_global_settings(): continue if self._show_only_modified and varname not in self._current_settings: continue help_text = valuespec.help() or '' title_text = valuespec.title() if search and search not in group.title().lower() \ and search not in config_variable.domain().ident.lower() \ and search not in varname \ and search not in help_text.lower() \ and search not in title_text.lower(): continue # skip variable when search is performed and nothing matches at_least_one_painted = True if not header_is_painted: # always open headers when searching forms.header(group.title(), isopen=search or self._show_only_modified) header_is_painted = True default_value = self._default_values[varname] edit_url = watolib.folder_preserving_link([ ("mode", self._edit_mode()), ("varname", varname), ("site", html.request.var("site", "")) ]) title = html.render_a(title_text, href=edit_url, class_="modified" if varname in self._current_settings else None, title=escaping.strip_tags(help_text)) if varname in self._current_settings: value = self._current_settings[varname] elif varname in self._global_settings: value = self._global_settings[varname] else: value = default_value try: to_text = valuespec.value_to_text(value) except Exception: logger.exception("error converting %r to text", value) to_text = html.render_error( _("Failed to render value: %r") % value) # Is this a simple (single) value or not? change styling in these cases... simple = True if '\n' in to_text or '<td>' in to_text: simple = False forms.section(title, simple=simple) if varname in self._current_settings: modified_cls: Optional[str] = "modified" value_title: Optional[str] = _( "This option has been modified.") elif varname in self._global_settings: modified_cls = "modified globally" value_title = _( "This option has been modified in global settings.") else: modified_cls = None value_title = None if is_a_checkbox(valuespec): html.open_div( class_=["toggle_switch_container", modified_cls]) html.toggle_switch( enabled=value, help_txt=_("Immediately toggle this setting"), href=html.makeactionuri([("_action", "toggle"), ("_varname", varname)]), class_=modified_cls, title=value_title, ) html.close_div() else: html.a(HTML(to_text), href=edit_url, class_=modified_cls, title=value_title) if header_is_painted: forms.end() if not at_least_one_painted and search: html.show_message( _('Did not find any global setting matching your search.')) html.close_div()