Ejemplo n.º 1
0
def ajax_sync() -> None:
    try:
        job = UserSyncBackgroundJob()
        job.set_function(job.do_sync,
                         add_to_changelog=False,
                         enforce_sync=True,
                         load_users_func=load_users,
                         save_users_func=save_users)
        try:
            job.start()
        except background_job.BackgroundJobAlreadyRunning as e:
            raise MKUserError(None, _("Another user synchronization is already running: %s") % e)
        html.write('OK Started synchronization\n')
    except Exception as e:
        logger.exception("error synchronizing user DB")
        if config.debug:
            raise
        html.write('ERROR %s\n' % e)
Ejemplo n.º 2
0
    def action(self) -> ActionResult:
        try:
            html.check_transaction()
            config.user.save_file("parentscan", dict(self._settings._asdict()))

            self._job.set_function(self._job.do_execute, self._settings,
                                   self._get_tasks())
            self._job.start()
        except Exception as e:
            if config.debug:
                raise
            logger.exception("Failed to start parent scan")
            raise MKUserError(
                None,
                _("Failed to start parent scan: %s") %
                ("%s" % e).replace("\n", "\n<br>"))

        raise HTTPRedirect(self._job.detail_url())
Ejemplo n.º 3
0
def ajax_render_graph_content():
    html.set_output_format("json")
    try:
        request = html.get_request()
        response = {
            "result_code": 0,
            "result": render_graph_content_html(request["graph_recipe"],
                                                request["graph_data_range"],
                                                request["graph_render_options"]),
        }
    except Exception:
        logger.exception("could not render graph")
        response = {
            "result_code": 1,
            "result": _("Unhandled exception: %s") % traceback.format_exc(),
        }

    html.write(json.dumps(response))
Ejemplo n.º 4
0
 def from_config(
     cls,
     folder: CREFolder,
     ruleset: Ruleset,
     rule_config: Any,
 ) -> Rule:
     try:
         if isinstance(rule_config, dict):
             return cls._parse_dict_rule(
                 folder,
                 ruleset,
                 rule_config,
             )
         raise NotImplementedError()
     except Exception:
         logger.exception("error parsing rule")
         raise MKGeneralException(
             _("Invalid rule <tt>%s</tt>") % (rule_config, ))
Ejemplo n.º 5
0
 def activate(
     self,
     settings: Optional[SerializedSettings] = None
 ) -> ConfigurationWarnings:
     try:
         warnings = self._update_trusted_cas(
             active_config.trusted_certificate_authorities)
         stunnel_pid = pid_from_file(cmk.utils.paths.omd_root / "tmp" /
                                     "run" / "stunnel-server.pid")
         if stunnel_pid:
             send_signal(stunnel_pid, signal.SIGHUP)
         return warnings
     except Exception:
         logger.exception("error updating trusted CAs")
         return [
             "Failed to create trusted CA file '%s': %s" %
             (self.trusted_cas_file, traceback.format_exc())
         ]
Ejemplo n.º 6
0
    def action(self):
        if html.request.var('_delete'):
            delid = html.get_unicode_input("_delete")
            c = wato_confirm(
                _("Confirm deletion of user %s") % delid,
                _("Do you really want to delete the user %s?") % delid)
            if c:
                delete_users([delid])
            elif c is False:
                return ""

        elif html.request.var('_sync') and html.check_transaction():
            try:

                job = userdb.UserSyncBackgroundJob()
                job.set_function(job.do_sync,
                                 add_to_changelog=True,
                                 enforce_sync=True)

                try:
                    job.start()
                except background_job.BackgroundJobAlreadyRunning as e:
                    raise MKUserError(
                        None,
                        _("Another synchronization job is already running: %s")
                        % e)

                self._job_snapshot = job.get_status_snapshot()
            except Exception:
                logger.exception("error syncing users")
                raise MKUserError(
                    None,
                    traceback.format_exc().replace('\n', '<br>\n'))

        elif html.request.var("_bulk_delete_users"):
            return self._bulk_delete_users_after_confirm()

        elif html.check_transaction():
            action_handler = gui_background_job.ActionHandler()
            action_handler.handle_actions()
            if action_handler.did_acknowledge_job():
                self._job_snapshot = userdb.UserSyncBackgroundJob(
                ).get_status_snapshot()
                return None, _("Synchronization job acknowledged")
Ejemplo n.º 7
0
 def _handle_exc(self, method) -> None:
     # FIXME: cyclical link between crash_reporting.py and pages.py
     from cmk.gui.crash_reporting import handle_exception_as_gui_crash_report
     try:
         # FIXME: These methods write to the response themselves. This needs to be refactored.
         method()
     except MKException as e:
         response.status_code = http_client.BAD_REQUEST
         html.write_text(str(e))
     except Exception as e:
         response.status_code = http_client.INTERNAL_SERVER_ERROR
         if config.debug:
             raise
         logger.exception("error calling AJAX page handler")
         handle_exception_as_gui_crash_report(
             plain_error=True,
             show_crash_link=getattr(g, "may_see_crash_reports", False),
         )
         html.write_text(str(e))
Ejemplo n.º 8
0
    def action(self):
        config.user.need_permission("wato.services")

        tasks = get_tasks(self._get_hosts_to_discover(), self._bulk_size)

        try:
            html.check_transaction()
            self._job.set_function(self._job.do_execute, self._mode, self._use_cache, self._do_scan,
                                   self._error_handling, tasks)
            self._job.start()
        except Exception as e:
            if config.debug:
                raise
            logger.exception("Failed to start bulk discovery")
            raise MKUserError(
                None,
                _("Failed to start discovery: %s") % ("%s" % e).replace("\n", "\n<br>"))

        raise HTTPRedirect(self._job.detail_url())
Ejemplo n.º 9
0
def parse_perf_data(perf_data_string, check_command=None):
    """ Convert perf_data_string into perf_data, extract check_command

This methods must not return None or anything else. It must strictly
return a tuple of perf_data list and the check_command. In case of
errors during parsing it returns an empty list for the perf_data.
"""
    # Strip away arguments like in "check_http!-H checkmk.com"
    if hasattr(check_command, 'split'):
        check_command = check_command.split("!")[0]

    # Split the perf data string into parts. Preserve quoted strings!
    parts = _split_perf_data(perf_data_string)

    if not parts:
        return [], check_command

    # Try if check command is appended to performance data
    # in a PNP like style
    if parts[-1].startswith("[") and parts[-1].endswith("]"):
        check_command = parts[-1][1:-1]
        del parts[-1]

    # Parse performance data, at least try
    perf_data = []

    for part in parts:
        try:
            varname, value_text, value_parts = parse_perf_values(part)

            value, unit_name = split_unit(value_text)
            if value is None:
                continue  # ignore useless empty variable

            perf_data_tuple = (varname, value, unit_name) + tuple(
                map(_float_or_int, value_parts))
            perf_data.append(perf_data_tuple)
        except Exception as exc:
            logger.exception("Failed to parse perfdata '%s'", perf_data_string)
            if config.debug:
                raise exc

    return perf_data, check_command
Ejemplo n.º 10
0
def page_view() -> None:
    view_name = html.request.var("view_name")
    if not view_name:
        return page_index()

    view_spec = views.get_permitted_views().get(view_name)
    if not view_spec:
        raise MKUserError("view_name",
                          "No view defined with the name '%s'." % view_name)

    datasource = data_source_registry[view_spec["datasource"]]()
    context = visuals.get_merged_context(
        visuals.get_context_from_uri_vars(
            datasource.infos, single_infos=view_spec["single_infos"]),
        view_spec["context"],
    )

    view = views.View(view_name, view_spec, context)
    view.row_limit = views.get_limit()
    view.only_sites = visuals.get_only_sites_from_context(context)
    view.user_sorters = views.get_user_sorters()
    view.want_checkboxes = views.get_want_checkboxes()

    title = views.view_title(view.spec, view.context)
    mobile_html_head(title)

    # Need to be loaded before processing the painter_options below.
    # TODO: Make this dependency explicit
    display_options.load_from_html(html)

    painter_options = PainterOptions.get_instance()
    painter_options.load(view_name)

    try:
        views.process_view(MobileViewRenderer(view))
    except Exception as e:
        logger.exception("error showing mobile view")
        if config.debug:
            raise
        html.write("ERROR showing view: %s" %
                   escaping.escape_attribute(str(e)))

    mobile_html_foot()
Ejemplo n.º 11
0
def page_api():
    try:
        pretty_print = False
        if not html.request.has_var("output_format"):
            html.set_output_format("json")
        if html.output_format not in _FORMATTERS:
            html.set_output_format("python")
            raise MKUserError(
                None, "Only %s are supported as output formats" %
                " and ".join('"%s"' % f for f in _FORMATTERS))

        # TODO: Add some kind of helper for boolean-valued variables?
        pretty_print_var = html.request.var("pretty_print", "no").lower()
        if pretty_print_var not in ("yes", "no"):
            raise MKUserError(None, 'pretty_print must be "yes" or "no"')
        pretty_print = pretty_print_var == "yes"

        api_call = _get_api_call()
        _check_permissions(api_call)
        watolib.init_wato_datastructures()  # Initialize host and site attributes
        request_object = _get_request(api_call)
        _check_formats(api_call, request_object)
        _check_request_keys(api_call, request_object)
        response = _execute_action(api_call, request_object)

    except MKAuthException as e:
        response = {
            "result_code": 1,
            "result": _("Authorization Error. Insufficent permissions for '%s'") % e
        }
    except MKException as e:
        response = {"result_code": 1, "result": _("Check_MK exception: %s") % e}
    except Exception as e:
        if config.debug:
            raise
        logger.exception()
        response = {
            "result_code": 1,
            "result": _("Unhandled exception: %s") % traceback.format_exc(),
        }

    html.write(_FORMATTERS[html.output_format][1 if pretty_print else 0](response))
Ejemplo n.º 12
0
def ajax_render_graph_content():
    response.set_content_type("application/json")
    try:
        api_request = request.get_request()
        resp = {
            "result_code":
            0,
            "result":
            render_graph_content_html(api_request["graph_recipe"],
                                      api_request["graph_data_range"],
                                      api_request["graph_render_options"]),
        }
    except Exception:
        logger.exception("could not render graph")
        resp = {
            "result_code": 1,
            "result": _("Unhandled exception: %s") % traceback.format_exc(),
        }

    response.set_data(json.dumps(resp))
Ejemplo n.º 13
0
def _create_sample_config():
    """Create a very basic sample configuration

    But only if none of the files that we will create already exists. That is
    e.g. the case after an update from an older version where no sample config
    had been created.
    """
    if not _need_to_create_sample_config():
        return

    logger.debug("Start creating the sample config")
    for generator in sample_config_generator_registry.get_generators():
        try:
            logger.debug("Starting [%s]", generator.ident())
            generator.generate()
            logger.debug("Finished [%s]", generator.ident())
        except Exception:
            logger.exception("Exception in sample config generator [%s]", generator.ident())

    logger.debug("Finished creating the sample config")
Ejemplo n.º 14
0
    def wrapper(*args, **kw):
        user = kw.get('user')
        token_info = kw.get('token_info')

        try:
            with verify_user(user, token_info):
                try:
                    return func(*args, **kw)
                except MKException:
                    crash = crash_reporting.GUICrashReport.from_exception()
                    crash_reporting.CrashReportStore().save(crash)
                    logger.exception("Unhandled exception (Crash-ID: %s)",
                                     crash.ident_to_text())
                    raise
        except MKException as exc:
            return problem(
                status=MK_STATUS.get(type(exc), 500),
                title=str(exc),
                detail="",
            )
Ejemplo n.º 15
0
    def _bulk_discovery_start(self, request):
        job = BulkDiscoveryBackgroundJob()
        if job.is_active():
            raise MKUserError(
                None,
                _("A bulk discovery job is already running. Please use the "
                  "\"bulk_discovery_status\" call to get the curent status."))

        mode, do_scan, bulk_size, error_handling = self._get_parameters_from_request(request)
        tasks = get_tasks(self._get_hosts_from_request(request), bulk_size)

        try:
            job.set_function(job.do_execute, mode, do_scan, error_handling, tasks)
            job.start()
            return {
                "started": True,
            }
        except Exception as e:
            logger.exception("Failed to start bulk discovery")
            raise MKUserError(None, _("Failed to start discovery: %s") % e)
Ejemplo n.º 16
0
def parse_perf_data(perf_data_string, check_command=None):
    # type: (str, Optional[str]) -> Tuple[List, str]
    """ Convert perf_data_string into perf_data, extract check_command"""
    # Strip away arguments like in "check_http!-H checkmk.com"
    if check_command is None:
        check_command = ""
    elif hasattr(check_command, 'split'):
        check_command = check_command.split("!")[0]

    # Split the perf data string into parts. Preserve quoted strings!
    parts = _split_perf_data(perf_data_string)

    if not parts:
        return [], check_command

    # Try if check command is appended to performance data
    # in a PNP like style
    if parts[-1].startswith("[") and parts[-1].endswith("]"):
        check_command = parts[-1][1:-1]
        del parts[-1]

    # Parse performance data, at least try
    perf_data = []

    for part in parts:
        try:
            varname, value_text, value_parts = parse_perf_values(part)

            value, unit_name = split_unit(value_text)
            if value is None:
                continue  # ignore useless empty variable

            perf_data_tuple = (varname, value, unit_name) + tuple(
                map(_float_or_int, value_parts))
            perf_data.append(perf_data_tuple)
        except Exception as exc:
            logger.exception("Failed to parse perfdata '%s'", perf_data_string)
            if config.debug:
                raise exc

    return perf_data, check_command
Ejemplo n.º 17
0
    def activate(self):
        try:
            self._write_config_file()

            p = subprocess.Popen(
                ["omd", "restart", "rrdcached"],
                stdin=open(os.devnull),
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                close_fds=True,
                encoding="utf-8",
            )

            stdout, _stderr = p.communicate()
            if p.returncode != 0:
                raise Exception(stdout)

            return []
        except Exception:
            logger.exception("error restarting rrdcached")
            return ["Failed to activate rrdcached configuration: %s" % (traceback.format_exc())]
Ejemplo n.º 18
0
    def activate(self, settings: _Optional[SerializedSettings] = None) -> ConfigurationWarnings:
        try:
            self._write_config_file()

            p = subprocess.Popen(  # pylint:disable=consider-using-with
                ["omd", "restart", "rrdcached"],
                stdin=open(os.devnull),  # pylint:disable=consider-using-with
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                close_fds=True,
                encoding="utf-8",
            )

            stdout, _stderr = p.communicate()
            if p.returncode != 0:
                raise Exception(stdout)

            return []
        except Exception:
            logger.exception("error restarting rrdcached")
            return ["Failed to activate rrdcached configuration: %s" % (traceback.format_exc())]
Ejemplo n.º 19
0
    def _ajax_search(self) -> None:
        """Generate the search result list"""
        query = _maybe_strip(html.request.get_unicode_input('q'))
        if not query:
            return

        try:
            results = self._quicksearch_manager.generate_results(query)
            QuicksearchResultRenderer().show(results, query)

        except TooManyRowsError as e:
            html.show_warning(str(e))

        except MKException as e:
            html.show_error("%s" % e)

        except Exception:
            logger.exception("error generating quicksearch results")
            if config.debug:
                raise
            html.show_error(traceback.format_exc())
Ejemplo n.º 20
0
    def handle_page(self) -> None:
        """The page handler, called by the page registry"""
        # FIXME: cyclical link between crash_reporting.py and pages.py
        from cmk.gui.crash_reporting import handle_exception_as_gui_crash_report
        html.set_output_format("json")
        try:
            action_response = self.page()
            response = {
                "result_code": 0,
                "result": action_response,
                "severity": "success"
            }
        except MKMissingDataError as e:
            response = {
                "result_code": 1,
                "result": "%s" % e,
                "severity": "success"
            }
        except MKException as e:
            response = {
                "result_code": 1,
                "result": "%s" % e,
                "severity": "error"
            }

        except Exception as e:
            if config.debug:
                raise
            logger.exception("error calling AJAX page handler")
            handle_exception_as_gui_crash_report(
                plain_error=True,
                show_crash_link=getattr(g, "may_see_crash_reports", False),
            )
            response = {
                "result_code": 1,
                "result": "%s" % e,
                "severity": "error"
            }

        html.write(json.dumps(response))
Ejemplo n.º 21
0
    def _get_system_wide_trusted_ca_certificates(self):
        # type: () -> Tuple[List[bytes], List[str]]
        trusted_cas = set()  # type: Set[bytes]
        errors = []  # type: List[str]
        for p in self.system_wide_trusted_ca_search_paths:
            cert_path = Path(p)

            if not cert_path.is_dir():
                continue

            for entry in cert_path.iterdir():
                cert_file_path = entry.absolute()
                try:
                    if entry.suffix not in [".pem", ".crt"]:
                        continue

                    trusted_cas.update(
                        self._get_certificates_from_file(cert_file_path))
                except IOError:
                    logger.exception("Error reading certificates from %s",
                                     cert_file_path)

                    # This error is shown to the user as warning message during "activate changes".
                    # We keep this message for the moment because we think that it is a helpful
                    # trigger for further checking web.log when a really needed certificate can
                    # not be read.
                    #
                    # We know a permission problem with some files that are created by default on
                    # some distros. We simply ignore these files because we assume that they are
                    # not needed.
                    if cert_file_path == Path("/etc/ssl/certs/localhost.crt"):
                        continue

                    errors.append(
                        "Failed to add certificate '%s' to trusted CA certificates. "
                        "See web.log for details." % cert_file_path)

            break

        return list(trusted_cas), errors
Ejemplo n.º 22
0
    def activate(self):
        try:
            self._write_config_file()

            p = subprocess.Popen(["omd", "reload", "apache"],
                                 shell=False,
                                 stdin=open(os.devnull),
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 close_fds=True)

            stdout = p.communicate()[0]
            if p.returncode != 0:
                raise Exception(stdout)

            return []
        except Exception:
            logger.exception("error reloading apache")
            return [
                "Failed to activate apache configuration: %s" %
                (traceback.format_exc())
            ]
Ejemplo n.º 23
0
def page_run_cron():
    # type: () -> None

    lock_file = _lock_file()

    # Prevent cron jobs from being run too often, also we need
    # locking in order to prevent overlapping runs
    if lock_file.exists():
        last_run = lock_file.stat().st_mtime
        if time.time() - last_run < 59:
            raise MKGeneralException("Cron called too early. Skipping.")

    with lock_file.open("wb"):
        pass  # touches the file

    with store.locked(lock_file):
        # The cron page is accessed unauthenticated. After leaving the page_run_cron area
        # into the job functions we always want to have a user context initialized to keep
        # the code free from special cases (if no user logged in, then...).
        # The jobs need to be run in privileged mode in general. Some jobs, like the network
        # scan, switch the user context to a specific other user during execution.
        config.set_super_user()

        logger.debug("Starting cron jobs")

        for cron_job in multisite_cronjobs:
            try:
                job_name = cron_job.__name__

                logger.debug("Starting [%s]", job_name)
                cron_job()
                logger.debug("Finished [%s]", job_name)
            except Exception:
                html.write(
                    "An exception occured. Take a look at the web.log.\n")
                logger.exception("Exception in cron job [%s]", job_name)

        logger.debug("Finished all cron jobs")
        html.write("OK\n")
Ejemplo n.º 24
0
def page_view() -> None:
    view_name = html.request.var("view_name")
    if not view_name:
        return page_index()

    view_spec = views.get_permitted_views().get(view_name)
    if not view_spec:
        raise MKUserError("view_name",
                          "No view defined with the name '%s'." % view_name)

    datasource = data_source_registry[view_spec["datasource"]]()
    context = visuals.get_merged_context(
        visuals.get_context_from_uri_vars(
            datasource.infos, single_infos=view_spec["single_infos"]),
        view_spec["context"],
    )

    view = views.View(view_name, view_spec, context)
    view.row_limit = views.get_limit()
    view.only_sites = views.get_only_sites()
    view.user_sorters = views.get_user_sorters()

    title = views.view_title(view_spec)
    mobile_html_head(title)

    painter_options = PainterOptions.get_instance()
    painter_options.load(view_name)

    try:
        view_renderer = MobileViewRenderer(view)
        views.show_view(view, view_renderer)
    except Exception as e:
        logger.exception("error showing mobile view")
        if config.debug:
            raise
        html.write("ERROR showing view: %s" %
                   escaping.escape_attribute(str(e)))

    mobile_html_foot()
Ejemplo n.º 25
0
    def action(self) -> ActionResult:
        if not html.check_transaction():
            return redirect(self.mode_url())

        if html.request.var('_delete'):
            delete_users([html.request.get_unicode_input("_delete")])

        elif html.request.var('_sync'):
            try:

                job = userdb.UserSyncBackgroundJob()
                job.set_function(job.do_sync,
                                 add_to_changelog=True,
                                 enforce_sync=True,
                                 load_users_func=userdb.load_users,
                                 save_users_func=userdb.save_users)

                try:
                    job.start()
                except background_job.BackgroundJobAlreadyRunning as e:
                    raise MKUserError(None,
                                      _("Another synchronization job is already running: %s") % e)

                self._job_snapshot = job.get_status_snapshot()
            except Exception:
                logger.exception("error syncing users")
                raise MKUserError(None, traceback.format_exc().replace('\n', '<br>\n'))

        elif html.request.var("_bulk_delete_users"):
            self._bulk_delete_users_after_confirm()

        else:
            action_handler = gui_background_job.ActionHandler(self.breadcrumb())
            action_handler.handle_actions()
            if action_handler.did_acknowledge_job():
                self._job_snapshot = userdb.UserSyncBackgroundJob().get_status_snapshot()
                flash(_("Synchronization job acknowledged"))
        return redirect(self.mode_url())
Ejemplo n.º 26
0
    def run(self):
        self._executed = True
        try:
            # Do not merge results that have been gathered on one site for different sites
            results = list(self.execute())
            num_sites = len(set(r.site_id for r in results))
            if num_sites > 1:
                for result in results:
                    result.from_test(self)
                    yield result
                return

            # Merge multiple results produced for a single site
            total_result = ACResult.merge(*list(self.execute()))
            total_result.from_test(self)
            yield total_result
        except Exception:
            logger.exception("error executing configuration test %s", self.__class__.__name__)
            result = ACResultCRIT(
                "<pre>%s</pre>" % _("Failed to execute the test %s: %s") %
                (html.attrencode(self.__class__.__name__), traceback.format_exc()))
            result.from_test(self)
            yield result
Ejemplo n.º 27
0
def load_web_plugins(forwhat, globalvars):
    _failed_plugins[forwhat] = []

    for plugins_path in [
            Path(cmk.utils.paths.web_dir, "plugins", forwhat),
            cmk.utils.paths.local_web_dir / "plugins" / forwhat,
    ]:
        if not plugins_path.exists():
            continue

        for file_path in sorted(plugins_path.iterdir()):
            try:
                if file_path.suffix == ".py" and not file_path.with_suffix(".pyc").exists():
                    exec (_drop_comments(file_path.open().read()), globalvars)

                elif file_path.suffix == ".pyc":
                    code_bytes = file_path.open().read()[8:]
                    code = marshal.loads(code_bytes)
                    exec(code, globalvars)  # yapf: disable

            except Exception as e:
                logger.exception("Failed to load plugin %s: %s", file_path, e)
                _failed_plugins[forwhat].append((str(file_path), e))
Ejemplo n.º 28
0
    def render(self, row, cell):
        classes = ["perfometer"]
        if is_stale(row):
            classes.append("stale")

        try:
            title, h = Perfometer(row).render()
            if title is None and h is None:
                return "", ""
        except Exception as e:
            logger.exception("error rendering performeter")
            if config.debug:
                raise
            return " ".join(classes), _("Exception: %s") % e

        content = html.render_div(HTML(h), class_=["content"]) \
                + html.render_div(title, class_=["title"]) \
                + html.render_div("", class_=["glass"])

        # pnpgraph_present: -1 means unknown (path not configured), 0: no, 1: yes
        if display_options.enabled(display_options.X) \
           and row["service_pnpgraph_present"] != 0:
            if metrics.cmk_graphs_possible():
                import cmk.gui.cee.plugins.views.graphs
                url = cmk.gui.cee.plugins.views.graphs.cmk_graph_url(
                    row, "service")
            else:
                url = pnp_url(row, "service")
            disabled = False
        else:
            url = "javascript:void(0)"
            disabled = True

        return " ".join(classes), \
            html.render_a(content=content, href=url, title=html.strip_tags(title),
                          class_=["disabled" if disabled else None])
Ejemplo n.º 29
0
    def action(self) -> ActionResult:
        user.need_permission("wato.services")

        try:
            transactions.check_transaction()
            start_bulk_discovery(
                self._job,
                self._get_hosts_to_discover(),
                self._mode,
                self._do_full_scan,
                self._ignore_errors,
                self._bulk_size,
            )

        except Exception as e:
            if active_config.debug:
                raise
            logger.exception("Failed to start bulk discovery")
            raise MKUserError(
                None,
                _("Failed to start discovery: %s") %
                ("%s" % e).replace("\n", "\n<br>"))

        raise HTTPRedirect(self._job.detail_url())
Ejemplo n.º 30
0
    def _perform_tests(self):
        test_sites = self._analyze_sites()

        self._logger.debug("Executing tests for %d sites" % len(test_sites))
        results_by_site = {}

        # Results are fetched simultaneously from the remote sites
        result_queue = multiprocessing.JoinableQueue(
        )  # type: multiprocessing.Queue[Tuple[SiteId, str]]

        processes = []
        site_id = SiteId("unknown_site")
        for site_id in test_sites:
            process = multiprocessing.Process(
                target=self._perform_tests_for_site,
                args=(site_id, result_queue))
            process.start()
            processes.append((site_id, process))

        # Now collect the results from the queue until all processes are finished
        while any(p.is_alive() for site_id, p in processes):
            try:
                site_id, results_data = result_queue.get_nowait()
                result_queue.task_done()
                result = ast.literal_eval(results_data)

                if result["state"] == 1:
                    raise MKGeneralException(result["response"])

                if result["state"] == 0:
                    test_results = []
                    for result_data in result["response"]:
                        result = ACResult.from_repr(result_data)
                        test_results.append(result)

                    # Add general connectivity result
                    result = ACResultOK(_("No connectivity problems"))
                    result.from_test(ACTestConnectivity())
                    result.site_id = site_id
                    test_results.append(result)

                    results_by_site[site_id] = test_results

                else:
                    raise NotImplementedError()

            except six.moves.queue.Empty:
                time.sleep(0.5)  # wait some time to prevent CPU hogs

            except Exception as e:
                result = ACResultCRIT("%s" % e)
                result.from_test(ACTestConnectivity())
                result.site_id = site_id
                results_by_site[site_id] = [result]

                logger.exception("error analyzing configuration for site %s",
                                 site_id)

        self._logger.debug("Got test results")

        # Group results by category in first instance and then then by test
        results_by_category = {}  # type: Dict[str, Dict[str, Dict[str, Any]]]
        for site_id, results in results_by_site.items():
            for result in results:
                category_results = results_by_category.setdefault(
                    result.category, {})
                test_results_by_site = category_results.setdefault(
                    result.test_id, {
                        "site_results": {},
                        "test": {
                            "title": result.title,
                            "help": result.help,
                        }
                    })

                test_results_by_site["site_results"][result.site_id] = result

        return results_by_category