Example #1
0
def _backup_objects_file(core: MonitoringCore) -> Iterator[None]:
    if config.monitoring_core == "nagios":
        objects_file = cmk.utils.paths.nagios_objects_file
    else:
        objects_file = cmk.utils.paths.var_dir + "/core/config"

    backup_path = None
    if os.path.exists(objects_file):
        backup_path = objects_file + ".save"
        os.rename(objects_file, backup_path)

    try:
        try:
            yield None
        except Exception:
            if backup_path:
                os.rename(backup_path, objects_file)
            raise

        if (config.monitoring_core == "nagios"
                and Path(cmk.utils.paths.nagios_config_file).exists()
                and not do_check_nagiosconfig()):
            broken_config_path = Path(
                cmk.utils.paths.tmp_dir) / "check_mk_objects.cfg.broken"
            shutil.move(cmk.utils.paths.nagios_objects_file,
                        broken_config_path)

            if backup_path:
                os.rename(backup_path, objects_file)
            elif os.path.exists(objects_file):
                os.remove(objects_file)

            raise MKGeneralException(
                "Configuration for monitoring core is invalid. Rolling back. "
                "The broken file has been copied to \"%s\" for analysis." %
                broken_config_path)
    finally:
        if backup_path and os.path.exists(backup_path):
            os.remove(backup_path)
Example #2
0
def _get_sorted_check_table(hostname, remove_duplicates=False, filter_mode=None, skip_ignored=True):
    # type: (str, bool, Optional[str], bool) -> List[Service]
    # Convert from dictionary into simple tuple list. Then sort it according to
    # the service dependencies.
    # TODO: Use the Service objects from get_check_table once it returns these objects
    is_cmc = config.is_cmc()
    unsorted = [(service,
                 [] if is_cmc else config.service_depends_on(hostname, service.description))
                for service in get_check_table(hostname,
                                               remove_duplicates=remove_duplicates,
                                               filter_mode=filter_mode,
                                               skip_ignored=skip_ignored).values()]

    unsorted.sort(key=lambda x: x[0].description)

    ordered = []  # type: List[Service]
    while len(unsorted) > 0:
        unsorted_descrs = {entry[0].description for entry in unsorted}
        left = []
        at_least_one_hit = False
        for check in unsorted:
            deps_fulfilled = True
            for dep in check[1]:  # dependencies
                if dep in unsorted_descrs:
                    deps_fulfilled = False
                    break
            if deps_fulfilled:
                ordered.append(check[0])
                at_least_one_hit = True
            else:
                left.append(check)
        if len(left) == 0:
            break
        if not at_least_one_hit:
            raise MKGeneralException("Cyclic service dependency of host %s. Problematic are: %s" %
                                     (hostname, ",".join(unsorted_descrs)))
        unsorted = left
    return ordered
Example #3
0
    def _is_expected_agent_version(self, agent_version, expected_version):
        try:
            if agent_version in ['(unknown)', None, 'None']:
                return False

            if isinstance(expected_version, str) and expected_version != agent_version:
                return False

            elif isinstance(expected_version, tuple) and expected_version[0] == 'at_least':
                spec = expected_version[1]
                if utils.is_daily_build_version(agent_version) and 'daily_build' in spec:
                    expected = int(spec['daily_build'].replace('.', ''))

                    branch = utils.branch_of_daily_build(agent_version)
                    if branch == "master":
                        agent = int(agent_version.replace('.', ''))

                    else:  # branch build (e.g. 1.2.4-2014.06.01)
                        agent = int(agent_version.split('-')[1].replace('.', ''))

                    if agent < expected:
                        return False

                elif 'release' in spec:
                    if utils.is_daily_build_version(agent_version):
                        return False

                    if cmk.utils.werks.parse_check_mk_version(agent_version) \
                        < cmk.utils.werks.parse_check_mk_version(spec['release']):
                        return False

            return True
        except Exception as e:
            if cmk.utils.debug.enabled():
                raise
            raise MKGeneralException(
                "Unable to check agent version (Agent: %s Expected: %s, Error: %s)" %
                (agent_version, expected_version, e))
Example #4
0
def _make_target_columns(
        oid_info: Union[OIDInfo,
                        SNMPTree]) -> Tuple[OID, List[Any], SNMPColumns]:
    #
    # OIDInfo is one of:
    #   - OIDWithColumns = Tuple[OID, SNMPColumns]
    #   - OIDWithSubOIDsAndColumns = Tuple[OID, List[OID], SNMPColumns]
    #     where List[OID] is a list if OID-infixes that are put between the
    #     baseoid and the columns and prefixed with the index column.
    #
    # TODO: The Union[OIDWithColumns, OIDWithSubOIDsAndColumns] dance is absurd!
    #       Here, we should just have OIDWithSubOIDsAndColumns and
    #       replace `OIDWithColumns` with `Tuple[OID, [], SNMPColumns]`.
    #
    # This allows to merge distinct SNMP subtrees with a similar structure
    # to one virtual new tree (look into cmctc_temp for an example)
    suboids: List = [None]
    if isinstance(oid_info, SNMPTree):
        # TODO (mo): Via SNMPTree is the way to go. Remove all other cases
        #            once we have the auto-conversion of SNMPTrees in place.
        #            In particular:
        #              * remove all 'suboids' related code (index_column!)
        #              * remove all casts, and extend the livetime of the
        #                 SNMPTree Object as far as possible.
        #             * I think the below code can be improved by making
        #               SNMPTree an iterable.
        tmp_base = str(oid_info.base)
        oid, targetcolumns = cast(OIDWithColumns, (tmp_base, oid_info.oids))
    elif len(oid_info) == 2:
        oid, targetcolumns = cast(OIDWithColumns, oid_info)
    else:
        oid, suboids, targetcolumns = cast(OIDWithSubOIDsAndColumns, oid_info)

    if not oid.startswith("."):
        raise MKGeneralException("OID definition '%s' does not begin with ." %
                                 oid)

    return oid, suboids, targetcolumns
Example #5
0
    def _gather_check_plugin_names(self):
        # type: () -> Set[CheckPluginName]
        """Returns a list of check types that shal be executed with this source.

        The logic is only processed once per hostname+ipaddress combination. Once processed
        check types are cached to answer subsequent calls to this function.
        """

        if self._check_plugin_name_filter_func is None:
            raise MKGeneralException(
                "The check type filter function has not been set")

        try:
            return self._check_plugin_names[(self._hostname, self._ipaddress)]
        except KeyError:
            check_plugin_names = self._check_plugin_name_filter_func(
                self._snmp_config,
                on_error=self._on_error,
                do_snmp_scan=self._do_snmp_scan,
                for_mgmt_board=self._for_mgmt_board)
            self._check_plugin_names[(self._hostname,
                                      self._ipaddress)] = check_plugin_names
            return check_plugin_names
Example #6
0
def do_snmpwalk(options, hostnames):
    if "oids" in options and "extraoids" in options:
        raise MKGeneralException(
            "You cannot specify --oid and --extraoid at the same time.")

    if not hostnames:
        raise MKBailOut("Please specify host names to walk on.")

    if not os.path.exists(cmk.utils.paths.snmpwalks_dir):
        os.makedirs(cmk.utils.paths.snmpwalks_dir)

    for hostname in hostnames:
        #TODO: What about SNMP management boards?
        snmp_config = create_snmp_host_config(hostname)

        try:
            _do_snmpwalk_on(snmp_config, options,
                            cmk.utils.paths.snmpwalks_dir + "/" + hostname)
        except Exception as e:
            console.error("Error walking %s: %s\n" % (hostname, e))
            if cmk.utils.debug.enabled():
                raise
        cmk_base.cleanup.cleanup_globals()
Example #7
0
    def runner_plugin(self, opsys: OS) -> Plugin:
        # TODO: when external mode:
        #  => bin!
        #  when not:
        #  no target, interval!
        if self.execution_mode == 'external':
            # Runner and Controller have to be deployed as bin
            # /omd/sites/v2/lib/python3/cmk/base/cee/bakery/core_bakelets/bin_files.py

            # cmk.utils.paths.local_agents_dir ??
            pass
        elif self.execution_mode == 'agent_serial':
            # the runner plugin gets
            return Plugin(
                base_os=opsys,
                source=Path('robotmk-runner.py'),
                # TODO: interval=interval,
                interval=self.execution_interval,
            )
        else:
            raise MKGeneralException(
                "Error: Execution mode %s is not supported." %
                self.execution_mode)
Example #8
0
def _agent_cache_file_age(hostname: HostName,
                          check_plugin_name: CheckPluginNameStr) -> Optional[float]:
    host_config = _config.get_config_cache().get_host_config(hostname)
    if host_config.is_cluster:
        raise MKGeneralException("get_agent_data_time() not valid for cluster")

    # NOTE: This is a workaround for the 'old' API and will not be correct
    # for the new one. This is a check plugin name, and the property of being
    # 'TCP' or 'SNMP' is a property of the section.
    # This function is deprecated for new plugins.
    # For old-style plugins, plugin and section name are same, so check the
    # corresponding section:
    section_name_str = _cmk_utils.check_utils.section_name_of(check_plugin_name)
    section = _agent_based_register.get_section_plugin(_SectionName(section_name_str))
    if hasattr(section, "trees"):
        cachefile = "%s/%s.%s" % (_paths.tcp_cache_dir, hostname, section_name_str)
    else:
        cachefile = "%s/%s" % (_paths.tcp_cache_dir, hostname)

    if os.path.exists(cachefile):
        return _cmk_utils.cachefile_age(cachefile)

    return None
Example #9
0
def _do_submit_to_core(host,
                       service,
                       state,
                       output,
                       cached_at=None,
                       cache_interval=None):
    # type: (HostName, ServiceName, ServiceState, ServiceDetails, Optional[int], Optional[int]) -> None
    if _in_keepalive_mode():
        # Regular case for the CMC - check helpers are running in keepalive mode
        keepalive.add_keepalive_check_result(host, service, state, output,
                                             cached_at, cache_interval)

    elif config.check_submission == "pipe" or config.monitoring_core == "cmc":
        # In case of CMC this is used when running "cmk" manually
        _submit_via_command_pipe(host, service, state, output)

    elif config.check_submission == "file":
        _submit_via_check_result_file(host, service, state, output)

    else:
        raise MKGeneralException("Invalid setting %r for check_submission. "
                                 "Must be 'pipe' or 'file'" %
                                 config.check_submission)
Example #10
0
    def get_livestatus_filters(self, livestatus_table: LivestatusTable,
                               used_filters: UsedFilters) -> LivestatusFilterHeaders:
        filter_lines = []

        entries = used_filters.get(self.name, [])
        if len(entries) > 3:
            raise MKGeneralException("You can only set up to three 'tg:' filters")

        for entry in entries:
            if ":" not in entry:
                # Be compatible to pre 1.6 filtering for some time (no
                # tag-group:tag-value, but tag-value only)
                filter_lines.append("Filter: tag_values >= %s" % livestatus.lqencode(entry))
                continue

            tag_key, tag_value = entry.split(":", 1)
            filter_lines.append("Filter: tags = %s %s" %
                                (livestatus.lqencode(tag_key), livestatus.lqencode(tag_value)))

        if len(filter_lines) > 1:
            filter_lines.append("And: %d" % len(filter_lines))

        return "\n".join(filter_lines)
Example #11
0
def _extract_snmp_sections(
    inv_info: Dict[str, InventoryInfo],
    plugin_file_lookup: Dict[str, str],
) -> Sequence[str]:
    errors = []
    for plugin_name, plugin_info in sorted(inv_info.items()):
        if "snmp_info" not in plugin_info:
            continue
        section_name = section_name_of(plugin_name)
        if isinstance(
                agent_based_register.get_section_plugin(
                    SectionName(section_name)), SNMPSectionPlugin):
            continue

        fallback_files = [
            _include_file_path(i) for i in plugin_info.get("includes", [])
        ] + [plugin_file_lookup[plugin_name]]

        try:
            agent_based_register.add_section_plugin(
                create_snmp_section_plugin_from_legacy(
                    section_name,
                    {},
                    plugin_info["snmp_scan_function"],
                    plugin_info["snmp_info"],
                    scan_function_fallback_files=fallback_files,
                    # We have to validate, because we read inventory plugin files
                    # directly, and do not know whether they changed.
                    validate_creation_kwargs=True,
                ))
        except (NotImplementedError, KeyError, AssertionError, ValueError):
            msg = config.AUTO_MIGRATION_ERR_MSG % ("section", plugin_name)
            if cmk.utils.debug.enabled():
                raise MKGeneralException(msg)
            errors.append(msg)

    return errors
Example #12
0
def _get_needed_check_plugin_names(host_config):
    # type: (config.HostConfig) -> Set[CheckPluginName]
    import cmk.base.check_table as check_table  # pylint: disable=import-outside-toplevel
    needed_check_plugin_names = set([])

    # In case the host is monitored as special agent, the check plugin for the special agent needs
    # to be loaded
    sources = data_sources.DataSources(host_config.hostname, ipaddress=None)
    for source in sources.get_data_sources():
        if isinstance(source, data_sources.programs.SpecialAgentDataSource):
            needed_check_plugin_names.add(
                source.special_agent_plugin_file_name)

    # Collect the needed check plugin names using the host check table
    for check_plugin_name in check_table.get_needed_check_names(
            host_config.hostname,
            filter_mode="include_clustered",
            skip_ignored=False):
        if config.check_info[check_plugin_name].get("extra_sections"):
            for section_name in config.check_info[check_plugin_name][
                    "extra_sections"]:
                if section_name in config.check_info:
                    needed_check_plugin_names.add(section_name)

        needed_check_plugin_names.add(check_plugin_name)

    # Also include the check plugins of the cluster nodes to be able to load
    # the autochecks of the nodes
    if host_config.is_cluster:
        nodes = host_config.nodes
        if nodes is None:
            raise MKGeneralException("Invalid cluster configuration")
        for node in nodes:
            needed_check_plugin_names.update(
                check_table.get_needed_check_names(node, skip_ignored=False))

    return needed_check_plugin_names
Example #13
0
def do_create_config(core: MonitoringCore) -> None:
    """Creating the monitoring core configuration and additional files

    Ensures that everything needed by the monitoring core and it's helper processes is up-to-date
    and available for starting the monitoring.
    """
    with _backup_objects_file(core):
        out.output("Generating configuration for core (type %s)..." %
                   config.monitoring_core)
        try:
            _create_core_config(core)
            out.output(tty.ok + "\n")
        except Exception as e:
            if cmk.utils.debug.enabled():
                raise
            raise MKGeneralException("Error creating configuration: %s" % e)

    core.precompile()

    try:
        import cmk.base.cee.bakery.agent_bakery  # pylint: disable=redefined-outer-name,import-outside-toplevel
        cmk.base.cee.bakery.agent_bakery.bake_on_restart()
    except ImportError:
        pass
Example #14
0
    def __init__(self,
                 job_id: str,
                 logger: Optional[logging.Logger] = None,
                 **kwargs: Any) -> None:
        super(BackgroundJob, self).__init__()
        self._job_id = job_id
        self._job_base_dir = BackgroundJobDefines.base_dir
        self._job_initializiation_lock = os.path.join(
            self._job_base_dir, "job_initialization.lock")

        if not logger:
            raise MKGeneralException(
                _("The background job is missing a logger instance"))
        self._logger = logger

        kwargs.setdefault("stoppable", True)

        self._kwargs = kwargs
        self._work_dir = os.path.join(self._job_base_dir, self._job_id)
        self._jobstatus = JobStatus(self._work_dir)

        # The function ptr and its args/kwargs
        self._queued_function: Optional[Tuple[Callable, Tuple[Any, ...],
                                              Dict[str, Any]]] = None
Example #15
0
    def save(self, hostname):
        # type: (HostName) -> None
        """ The job of the save function is to update the item state on disk.
        It simply returns, if it detects that the data wasn't changed at all since the last loading
        If the data on disk has been changed in the meantime, the cached data is updated from disk.
        Afterwards only the actual modifications (update/remove) are applied to the updated cached
        data before it is written back to disk.
        """
        filename = cmk.utils.paths.counters_dir + "/" + hostname
        if not self._removed_item_state_keys and not self._updated_item_states:
            return

        try:
            if not os.path.exists(cmk.utils.paths.counters_dir):
                os.makedirs(cmk.utils.paths.counters_dir)

            store.aquire_lock(filename)
            last_mtime = os.stat(filename).st_mtime
            if last_mtime != self._last_mtime:
                self._item_states = store.load_object_from_file(filename, default={})

                # Remove obsolete keys
                for key in self._removed_item_state_keys:
                    try:
                        del self._item_states[key]
                    except KeyError:
                        pass

                # Add updated keys
                self._item_states.update(self._updated_item_states)

            store.save_object_to_file(filename, self._item_states, pretty=False)
        except Exception:
            raise MKGeneralException("Cannot write to %s: %s" % (filename, traceback.format_exc()))
        finally:
            store.release_lock(filename)
Example #16
0
    def _transform_wato_rulesets_params(self, all_rulesets):
        num_errors = 0
        for ruleset in all_rulesets.get_rulesets().values():
            valuespec = ruleset.valuespec()
            for folder, folder_index, rule in ruleset.get_rules():
                try:
                    rule.value = valuespec.transform_value(rule.value)
                except Exception as e:
                    if self._arguments.debug:
                        raise
                    self._logger.error(
                        "ERROR: Failed to transform rule: (Ruleset: %s, Folder: %s, "
                        "Rule: %d, Value: %s: %s",
                        ruleset.name,
                        folder.path(),
                        folder_index,
                        rule.value,
                        e,
                    )
                    num_errors += 1

        if num_errors and self._arguments.debug:
            raise MKGeneralException("Failed to transform %d rule values" %
                                     num_errors)
Example #17
0
def _do_submit_to_core(
    host: HostName,
    service: ServiceName,
    state: ServiceState,
    output: ServiceDetails,
    cache_info: Optional[Tuple[int, int]],
) -> None:
    if keepalive and keepalive.enabled():
        cached_at, cache_interval = cache_info or (None, None)
        # Regular case for the CMC - check helpers are running in keepalive mode
        keepalive.add_check_result(host, service, state, output, cached_at,
                                   cache_interval)

    elif config.check_submission == "pipe" or config.monitoring_core == "cmc":
        # In case of CMC this is used when running "cmk" manually
        _submit_via_command_pipe(host, service, state, output)

    elif config.check_submission == "file":
        _submit_via_check_result_file(host, service, state, output)

    else:
        raise MKGeneralException("Invalid setting %r for check_submission. "
                                 "Must be 'pipe' or 'file'" %
                                 config.check_submission)
Example #18
0
def do_core_action(action, quiet=False):
    if not quiet:
        console.output("%sing monitoring core..." % action.title())

    if config.monitoring_core == "nagios":
        os.putenv("CORE_NOVERIFY", "yes")
        command = ["%s/etc/init.d/core" % cmk.utils.paths.omd_root, action]
    else:
        command = ["omd", action, "cmc"]

    p = subprocess.Popen(command,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.STDOUT,
                         close_fds=True)
    result = p.wait()
    if result != 0:
        output = p.stdout.read()
        if not quiet:
            console.output("ERROR: %s\n" % output)
        raise MKGeneralException("Cannot %s the monitoring core: %s" %
                                 (action, output))
    else:
        if not quiet:
            console.output(tty.ok + "\n")
Example #19
0
def active_check_arguments(hostname: HostName,
                           description: Optional[ServiceName],
                           args: config.SpecialAgentInfoFunctionResult) -> str:
    if isinstance(args, config.SpecialAgentConfiguration):
        # TODO: Silly dispatching because of broken types/variance.
        if isinstance(args.args, str):
            cmd_args: Union[str, List[Union[int, float, str,
                                            Tuple[str, str, str]]]] = args.args
        elif isinstance(args.args, list):
            cmd_args = [arg for arg in args.args if isinstance(arg, str)]
        else:
            raise Exception("funny SpecialAgentConfiguration args %r" %
                            (args.args, ))
    elif isinstance(args, str):
        cmd_args = args
    elif isinstance(args, list):
        cmd_args = [arg for arg in args if isinstance(arg, (str, tuple))]
    else:
        raise MKGeneralException(
            "The check argument function needs to return either a list of arguments or a "
            "string of the concatenated arguments (Host: %s, Service: %s)." %
            (hostname, description))

    return config.prepare_check_command(cmd_args, hostname, description)
Example #20
0
def in_extraconf_hostlist(hostlist, hostname):
    """Whether or not the given host matches the hostlist.

    Entries in list are hostnames that must equal the hostname.
    Expressions beginning with ! are negated: if they match,
    the item is excluded from the list.

    Expressions beginning with ~ are treated as regular expression.
    Also the three special tags '@all', '@clusters', '@physical'
    are allowed.
    """

    # Migration help: print error if old format appears in config file
    # FIXME: When can this be removed?
    try:
        if hostlist[0] == "":
            raise MKGeneralException(
                'Invalid empty entry [ "" ] in configuration')
    except IndexError:
        pass  # Empty list, no problem.

    for hostentry in hostlist:
        if hostentry == '':
            raise MKGeneralException('Empty hostname in host list %r' %
                                     hostlist)
        negate = False
        use_regex = False
        if hostentry[0] == '@':
            if hostentry == '@all':
                return True
            # TODO: Is not used anymore for a long time. Will be cleaned up
            # with 1.6 tuple ruleset cleanup
            #ic = is_cluster(hostname)
            #if hostentry == '@cluster' and ic:
            #    return True
            #elif hostentry == '@physical' and not ic:
            #    return True

        # Allow negation of hostentry with prefix '!'
        else:
            if hostentry[0] == '!':
                hostentry = hostentry[1:]
                negate = True

            # Allow regex with prefix '~'
            if hostentry[0] == '~':
                hostentry = hostentry[1:]
                use_regex = True

        try:
            if not use_regex and hostname == hostentry:
                return not negate
            # Handle Regex. Note: hostname == True -> generic unknown host
            elif use_regex and hostname != True:
                if regex(hostentry).match(hostname) is not None:
                    return not negate
        except MKGeneralException:
            if cmk.utils.debug.enabled():
                raise

    return False
Example #21
0
 def validate_job_id(job_id: str) -> None:
     if not regex(REGEX_GENERIC_IDENTIFIER).match(job_id):
         raise MKGeneralException(_("Invalid Job ID"))
Example #22
0
 def __init__(self, name):
     self.name = name
     man_page = load_man_page(name)
     if not man_page:
         raise MKGeneralException("No manpage for %s. Sorry.\n" % self.name)
     self._header = man_page["header"]
Example #23
0
def get_title_from_man_page(path: Path) -> str:
    with path.open(encoding="utf-8") as fp:
        for line in fp:
            if line.startswith("title:"):
                return line.split(":", 1)[1].strip()
    raise MKGeneralException(_("Invalid man page: Failed to get the title"))
Example #24
0
 def __getattr__(self, name):
     if name not in self._job_status:
         raise MKGeneralException(
             _("The function %s is not in the snapshotted functions.") %
             name)
     return lambda: self._job_status[name]
Example #25
0
def _validate_tag_id(tag_id):
    if not re.match("^[-a-z0-9A-Z_]*$", tag_id):
        raise MKGeneralException(
            _("Invalid tag ID. Only the characters a-z, A-Z, 0-9, _ and - are allowed."
              ))
Example #26
0
def _priv_proto_for(proto_name: str) -> str:
    if proto_name == "DES":
        return "DES"
    if proto_name == "AES":
        return "AES"
    raise MKGeneralException("Invalid SNMP priv protocol: %s" % proto_name)
Example #27
0
    def _snmp_base_command(
            self, what: str,
            context_name: Optional[SNMPContextName]) -> List[str]:
        options = []

        if what == "get":
            command = ["snmpget"]
        elif what == "getnext":
            command = ["snmpgetnext", "-Cf"]
        elif self.config.is_bulkwalk_host:
            command = ["snmpbulkwalk"]

            options.append("-Cr%d" % self.config.bulk_walk_size_of)
        else:
            command = ["snmpwalk"]

        if not self.config.is_snmpv3_host:
            # Handle V1 and V2C
            if self.config.is_bulkwalk_host:
                options.append("-v2c")
            else:
                if what == "walk":
                    command = ["snmpwalk"]
                if self.config.is_snmpv2or3_without_bulkwalk_host:
                    options.append("-v2c")
                else:
                    options.append("-v1")

            if not isinstance(self.config.credentials, str):
                raise TypeError()
            options += ["-c", self.config.credentials]

        else:
            # TODO: Fix the horrible credentials typing
            if not (isinstance(self.config.credentials, tuple)
                    and len(self.config.credentials) in (2, 4, 6)):
                raise MKGeneralException(
                    "Invalid SNMP credentials '%r' for host %s: must be "
                    "string, 2-tuple, 4-tuple or 6-tuple" %
                    (self.config.credentials, self.config.hostname))

            if len(self.config.credentials) == 6:
                (
                    sec_level,
                    auth_proto,
                    sec_name,
                    auth_pass,
                    priv_proto,
                    priv_pass,
                ) = self.config.credentials
                options += [
                    "-v3",
                    "-l",
                    sec_level,
                    "-a",
                    _auth_proto_for(auth_proto),
                    "-u",
                    sec_name,
                    "-A",
                    auth_pass,
                    "-x",
                    _priv_proto_for(priv_proto),
                    "-X",
                    priv_pass,
                ]

            elif len(self.config.credentials) == 4:
                sec_level, auth_proto, sec_name, auth_pass = self.config.credentials
                options += [
                    "-v3",
                    "-l",
                    sec_level,
                    "-a",
                    _auth_proto_for(auth_proto),
                    "-u",
                    sec_name,
                    "-A",
                    auth_pass,
                ]

            else:
                sec_level, sec_name = self.config.credentials
                options += ["-v3", "-l", sec_level, "-u", sec_name]

        # Do not load *any* MIB files. This save lot's of CPU.
        options += ["-m", "", "-M", ""]

        # Configuration of timing and retries
        settings = self.config.timing
        if "timeout" in settings:
            options += ["-t", "%0.2f" % settings["timeout"]]
        if "retries" in settings:
            options += ["-r", "%d" % settings["retries"]]

        if context_name is not None:
            options += ["-n", context_name]

        return command + options
Example #28
0
 def _get_validated_item_state_prefix() -> ServicePrefix:
     prefix = get_item_state_prefix()
     if not prefix:
         raise MKGeneralException(
             "accessing value store outside check function")
     return prefix
Example #29
0
def _get_post_discovery_services(
    host_name: HostName,
    services: ServicesByTransition,
    service_filters: _ServiceFilters,
    result: DiscoveryResult,
    mode: DiscoveryMode,
) -> List[autochecks.ServiceWithNodes]:
    """
    The output contains a selction of services in the states "new", "old", "ignored", "vanished"
    (depending on the value of `mode`) and "clusterd_".

    Service in with the state "custom", "legacy", "active" and "manual" are currently not checked.

    Note:

        Discovered checks that are shadowed by manual checks will vanish that way.

    """
    post_discovery_services: List[autochecks.ServiceWithNodes] = []
    for check_source, discovered_services_with_nodes in services.items():
        if check_source in ("custom", "legacy", "active", "manual"):
            # This is not an autocheck or ignored and currently not
            # checked. Note: Discovered checks that are shadowed by manual
            # checks will vanish that way.
            continue

        if check_source == "new":
            if mode in (DiscoveryMode.NEW, DiscoveryMode.FIXALL,
                        DiscoveryMode.REFRESH):
                new = [
                    s for s in discovered_services_with_nodes
                    if service_filters.new(host_name, s.service)
                ]
                result.self_new += len(new)
                post_discovery_services.extend(new)
            continue

        if check_source in ("old", "ignored"):
            # keep currently existing valid services in any case
            post_discovery_services.extend(discovered_services_with_nodes)
            result.self_kept += len(discovered_services_with_nodes)
            continue

        if check_source == "vanished":
            # keep item, if we are currently only looking for new services
            # otherwise fix it: remove ignored and non-longer existing services
            for entry in discovered_services_with_nodes:
                if mode in (DiscoveryMode.FIXALL,
                            DiscoveryMode.REMOVE) and service_filters.vanished(
                                host_name, entry.service):
                    result.self_removed += 1
                else:
                    post_discovery_services.append(entry)
                    result.self_kept += 1
            continue

        if check_source.startswith("clustered_"):
            # Silently keep clustered services
            post_discovery_services.extend(discovered_services_with_nodes)
            setattr(
                result, check_source,
                getattr(result, check_source) +
                len(discovered_services_with_nodes))
            continue

        raise MKGeneralException("Unknown check source '%s'" % check_source)

    return post_discovery_services
Example #30
0
 def _append(self, aux_tag):
     if self.exists(aux_tag.id):
         raise MKGeneralException(
             _("The tag ID \"%s\" does already exist in the list of auxiliary tags."
               ) % aux_tag)
     self._tags.append(aux_tag)