Beispiel #1
0
    def validate(self):
        seen = set()  # type: Set[str]
        for aux_tag in self._tags:
            aux_tag.validate()

            # Tag groups were made builtin with ~1.4. Previously users could modify
            # these groups.  These users now have the modified tag groups in their
            # user configuration and should be able to cleanup this using the GUI
            # for the moment.
            # With 1.7 we use cmk-update-config to enforce the user to cleanup this.
            # Then we can re-enable this consistency check.
            #builtin_config = BuiltinTagConfig()
            #if builtin_config.aux_tag_list.exists(aux_tag.id):
            #    raise MKGeneralException(
            #        _("You can not override the builtin auxiliary tag \"%s\".") % aux_tag.id)

            if aux_tag.id in seen:
                raise MKGeneralException(_("Duplicate tag ID \"%s\" in auxilary tags") % aux_tag.id)

            seen.add(aux_tag.id)
Beispiel #2
0
    def __init__(self, job_id, logger=None, **kwargs):
        super(BackgroundJob, self).__init__()
        self._job_id = job_id
        self._job_base_dir = BackgroundJobDefines.base_dir
        self._job_initializiation_lock = os.path.join(
            self._job_base_dir, "job_initialization.lock")

        if not logger:
            raise MKGeneralException(
                _("The background job is missing a logger instance"))
        self._logger = logger

        kwargs.setdefault("stoppable", True)

        self._kwargs = kwargs
        self._work_dir = os.path.join(self._job_base_dir, self._job_id)
        self._jobstatus = JobStatus(self._work_dir)

        # The function ptr and its args/kwargs
        self._queued_function = None
Beispiel #3
0
    def _gather_check_plugin_names(self):
        """Returns a list of check types that shal be executed with this source.

        The logic is only processed once per hostname+ipaddress combination. Once processed
        check types are cached to answer subsequent calls to this function.
        """

        if self._check_plugin_name_filter_func is None:
            raise MKGeneralException("The check type filter function has not been set")

        try:
            return self._check_plugin_names[(self._hostname, self._ipaddress)]
        except KeyError:
            check_plugin_names = self._check_plugin_name_filter_func(
                self._host_config.snmp_config(self._ipaddress),
                on_error=self._on_error,
                do_snmp_scan=self._do_snmp_scan,
                for_mgmt_board=self._for_mgmt_board)
            self._check_plugin_names[(self._hostname, self._ipaddress)] = check_plugin_names
            return check_plugin_names
Beispiel #4
0
def _agent_cache_file_age(hostname, check_plugin_name):
    # type: (HostName, CheckPluginName) -> Optional[float]
    host_config = _config.get_config_cache().get_host_config(hostname)
    if host_config.is_cluster:
        raise MKGeneralException("get_agent_data_time() not valid for cluster")

    # TODO 'import-outside-toplevel' not available in pylint for Python 2
    import cmk.base.check_utils  # pylint: disable-all
    if cmk.base.check_utils.is_snmp_check(check_plugin_name):
        cachefile = _paths.tcp_cache_dir + "/" + hostname + "." + check_plugin_name.split(".")[
            0]  # type: Optional[str]
    elif cmk.base.check_utils.is_tcp_check(check_plugin_name):
        cachefile = _paths.tcp_cache_dir + "/" + hostname
    else:
        cachefile = None

    if cachefile is not None and os.path.exists(cachefile):
        return _cmk_utils.cachefile_age(cachefile)

    return None
Beispiel #5
0
def _parse_to_raw(path: Path, content: str) -> Mapping[str, str]:

    parsed: dict[str, list[str]] = defaultdict(list)
    current: list[str] = []

    for no, line in enumerate(content.splitlines(), start=1):

        if not line.strip() or line.startswith(" "):  # continuation line
            current.append(line.strip())
            continue

        try:
            key, restofline = line.split(":", 1)
        except ValueError as exc:
            raise MKGeneralException(f"Syntax error in {path} line {no} ({exc}).\n")

        current = parsed[key]
        current.append(restofline.strip())

    return {k: "\n".join(v).strip() for k, v in parsed.items()}
Beispiel #6
0
def _run_inventory_export_hooks(host_config: config.HostConfig,
                                inventory_tree: StructuredDataTree) -> None:
    import cmk.base.inventory_plugins as inventory_plugins  # pylint: disable=import-outside-toplevel
    hooks = host_config.inventory_export_hooks

    if not hooks:
        return

    section.section_step("Execute inventory export hooks")
    for hookname, params in hooks:
        console.verbose("Execute export hook: %s%s%s%s" %
                        (tty.blue, tty.bold, hookname, tty.normal))
        try:
            func = inventory_plugins.inv_export[hookname]["export_function"]
            func(host_config.hostname, params, inventory_tree.get_raw_tree())
        except Exception as e:
            if cmk.utils.debug.enabled():
                raise
            raise MKGeneralException("Failed to execute export hook %s: %s" %
                                     (hookname, e))
Beispiel #7
0
def host_check_command(config_cache: ConfigCache, host_config: HostConfig, ip: HostAddress,
                       is_clust: bool, default_host_check_command: str,
                       host_check_via_service_status: Callable,
                       host_check_via_custom_check: Callable) -> Optional[CoreCommand]:
    value = _get_host_check_command(host_config, default_host_check_command)

    if value == "smart":
        if is_clust:
            return _cluster_ping_command(config_cache, host_config, ip)
        return "check-mk-host-smart"

    if value == "ping":
        if is_clust:
            return _cluster_ping_command(config_cache, host_config, ip)
        ping_args = check_icmp_arguments_of(config_cache, host_config.hostname)
        if ping_args:  # use special arguments
            return "check-mk-host-ping!%s" % ping_args
        return None

    if value == "ok":
        return "check-mk-host-ok"

    if value == "agent":
        return host_check_via_service_status("Check_MK")

    if isinstance(value, tuple) and value[0] == "service":
        return host_check_via_service_status(value[1])

    if isinstance(value, tuple) and value[0] == "tcp":
        if value[1] is None:
            raise TypeError()
        return "check-mk-host-tcp!" + str(value[1])

    if isinstance(value, tuple) and value[0] == "custom":
        if not isinstance(value[1], str):
            raise TypeError()
        return host_check_via_custom_check("check-mk-custom",
                                           "check-mk-custom!" + autodetect_plugin(value[1]))

    raise MKGeneralException("Invalid value %r for host_check_command of host %s." %
                             (value, host_config.hostname))
Beispiel #8
0
    def __init__(self, conf, opsys, execution_mode):
        self.os_newline = get_os_default('newline', opsys)

        self.execution_mode = conf['execution_mode'][0]
        mode_conf = conf['execution_mode'][1]
        self.cfg_dict = {
            'global': DictNoNone(),
            'suites': DictNoNone(),
        }
        # handy dict shortcuts
        global_dict = self.cfg_dict['global']
        suites_dict = self.cfg_dict['suites']
        global_dict['execution_mode'] = self.execution_mode
        global_dict['agent_output_encoding'] = conf['agent_output_encoding']
        global_dict['transmit_html'] = conf['transmit_html']
        global_dict['log_level'] = conf['log_level']
        global_dict['log_rotation'] = conf['log_rotation']
        global_dict['robotdir'] = conf['dirs'].get('robotdir', None)
        global_dict['outputdir'] = conf['dirs'].get('outputdir', None)
        global_dict['logdir'] = conf['dirs'].get('logdir', None)

        if self.execution_mode == 'agent_serial':
            global_dict['cache_time'] = mode_conf['cache_time']
            global_dict['execution_interval'] = mode_conf['execution_interval']
            self.execution_interval = mode_conf['execution_interval']
        elif self.execution_mode == 'external':
            # For now, we assume that the external mode is meant to execute all
            # suites exactly as configured. Hence, we can use the global cache time.
            global_dict['cache_time'] = mode_conf['cache_time']

        if len(mode_conf['suites']) > 0:
            for suite_dict in mode_conf['suites']:
                suite = RMKSuite(suite_dict)
                if suite.suiteid in self.cfg_dict['suites']:
                    raise MKGeneralException(
                        "Error in bakery plugin 'robotmk': Suite with ID %s is not unique. Please use tags to solve this problem."
                        % suite.suiteid)

                self.cfg_dict['suites'].update(
                    {suite.suiteid: suite.suite2dict})
        pass
Beispiel #9
0
def page_host_inv_api() -> None:
    # The response is always a top level dict with two elements:
    # a) result_code - This is 0 for expected processing and 1 for an error
    # b) result      - In case of an error this is the error message, a UTF-8 encoded string.
    #                  In case of success this is a dictionary containing the host inventory.
    try:
        api_request = request.get_request()
        # The user can either specify a single host or provide a list of host names. In case
        # multiple hosts are handled, there is a top level dict added with "host > invdict" pairs
        hosts = api_request.get("hosts")
        if hosts:
            result = {}
            for a_host_name in hosts:
                result[a_host_name] = inventory_of_host(a_host_name, api_request)

        else:
            host_name = api_request.get("host")
            if host_name is None:
                raise MKUserError("host", _("You need to provide a \"host\"."))

            result = inventory_of_host(host_name, api_request)

            if not result and not has_inventory(host_name):
                raise MKGeneralException(_("Found no inventory data for this host."))

        resp = {"result_code": 0, "result": result}

    except MKException as e:
        resp = {"result_code": 1, "result": "%s" % e}

    except Exception as e:
        if config.debug:
            raise
        resp = {"result_code": 1, "result": "%s" % e}

    if html.output_format == "json":
        _write_json(resp)
    elif html.output_format == "xml":
        _write_xml(resp)
    else:
        _write_python(resp)
Beispiel #10
0
def _discover_services(
    *,
    host_name: HostName,
    ipaddress: Optional[HostAddress],
    parsed_sections_broker: ParsedSectionsBroker,
    discovery_parameters: DiscoveryParameters,
    run_plugin_names: Container[CheckPluginName],
) -> List[Service]:
    # find out which plugins we need to discover
    plugin_candidates = _find_candidates(parsed_sections_broker, run_plugin_names)
    section.section_step("Executing discovery plugins (%d)" % len(plugin_candidates))
    console.vverbose("  Trying discovery with: %s\n" % ", ".join(str(n) for n in plugin_candidates))
    # The host name must be set for the host_name() calls commonly used to determine the
    # host name for host_extra_conf{_merged,} calls in the legacy checks.

    service_table: CheckTable = {}
    try:
        with plugin_contexts.current_host(host_name):
            for check_plugin_name in plugin_candidates:
                try:
                    service_table.update({
                        service.id(): service for service in _discover_plugins_services(
                            check_plugin_name=check_plugin_name,
                            host_name=host_name,
                            ipaddress=ipaddress,
                            parsed_sections_broker=parsed_sections_broker,
                            discovery_parameters=discovery_parameters,
                        )
                    })
                except (KeyboardInterrupt, MKTimeout):
                    raise
                except Exception as e:
                    if discovery_parameters.on_error == "raise":
                        raise
                    if discovery_parameters.on_error == "warn":
                        console.error(f"Discovery of '{check_plugin_name}' failed: {e}\n")

            return list(service_table.values())

    except KeyboardInterrupt:
        raise MKGeneralException("Interrupted by Ctrl-C.")
Beispiel #11
0
    def get_livestatus_filters(self, livestatus_table, used_filters):
        filter_lines = []

        if len(used_filters.get(self.get_filter_shortname())) > 3:
            raise MKGeneralException("You can only set up to three 'tg:' filters")

        for entry in used_filters.get(self.get_filter_shortname()):
            if ":" not in entry:
                # Be compatible to pre 1.6 filtering for some time (no
                # tag-group:tag-value, but tag-value only)
                filter_lines.append("Filter: tag_values >= %s" % livestatus.lqencode(entry))
                continue

            tag_key, tag_value = entry.split(":", 1)
            filter_lines.append("Filter: tags = %s %s" %
                                (livestatus.lqencode(tag_key), livestatus.lqencode(tag_value)))

        if len(filter_lines) > 1:
            filter_lines.append("And: %d" % len(filter_lines))

        return "\n".join(filter_lines)
Beispiel #12
0
def active_check_arguments(
    hostname: HostName,
    description: Optional[ServiceName],
    args: config.SpecialAgentInfoFunctionResult,
) -> str:
    if isinstance(args, str):
        return args

    cmd_args: CheckCommandArguments = []
    if isinstance(args, config.SpecialAgentConfiguration):
        cmd_args = args.args
    else:
        cmd_args = args

    if not isinstance(cmd_args, list):
        raise MKGeneralException(
            "The check argument function needs to return either a list of arguments or a "
            "string of the concatenated arguments (Host: %s, Service: %s)." %
            (hostname, description))

    return _prepare_check_command(cmd_args, hostname, description)
Beispiel #13
0
def do_core_action(action, quiet=False):
    # type: (str, bool) -> None
    if not quiet:
        out.output("%sing monitoring core..." % action.title())

    if config.monitoring_core == "nagios":
        os.putenv("CORE_NOVERIFY", "yes")
        command = ["%s/etc/init.d/core" % cmk.utils.paths.omd_root, action]
    else:
        command = ["omd", action, "cmc"]

    p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
    result = p.wait()
    if result != 0:
        assert p.stdout is not None
        output = p.stdout.read()
        if not quiet:
            out.output("ERROR: %r\n" % output)
        raise MKGeneralException("Cannot %s the monitoring core: %r" % (action, output))
    if not quiet:
        out.output(tty.ok + "\n")
Beispiel #14
0
    def resolve_plugin_dependencies(plugin_name, known_dependencies=None):
        '''recursively aggregate all plugin dependencies'''
        if known_dependencies is None:
            known_dependencies = set()
        if plugin_name in resolved_dependencies:
            known_dependencies.update(resolved_dependencies[plugin_name])
            return known_dependencies

        try:
            direct_dependencies = set(inv_info[plugin_name].get(
                'depends_on', []))
        except KeyError:
            raise MKGeneralException("unknown plugin dependency: %r" %
                                     plugin_name)

        new_dependencies = direct_dependencies - known_dependencies
        known_dependencies.update(new_dependencies)
        for dependency in new_dependencies:
            known_dependencies = resolve_plugin_dependencies(
                dependency, known_dependencies)
        return known_dependencies
Beispiel #15
0
def _make_index_rows(
    max_column: SNMPRowInfo,
    index_format: SpecialColumn,
    fetchoid: OID,
) -> SNMPRowInfo:
    index_rows = []
    for o, _unused_value in max_column:
        if index_format is SpecialColumn.END:
            val = ensure_binary(_extract_end_oid(fetchoid, o))
        elif index_format is SpecialColumn.STRING:
            val = ensure_binary(o)
        elif index_format is SpecialColumn.BIN:
            val = _oid_to_bin(o)
        elif index_format is SpecialColumn.END_BIN:
            val = _oid_to_bin(_extract_end_oid(fetchoid, o))
        elif index_format is SpecialColumn.END_OCTET_STRING:
            val = _oid_to_bin(_extract_end_oid(fetchoid, o))[1:]
        else:
            raise MKGeneralException("Invalid index format %r" % (index_format,))
        index_rows.append((o, val))
    return index_rows
Beispiel #16
0
def active_check_arguments(hostname, description, args):
    # type: (HostName, Optional[ServiceName], config.SpecialAgentInfoFunctionResult) -> str
    if isinstance(args, config.SpecialAgentConfiguration):
        # TODO: Silly dispatching because of broken types/variance.
        if isinstance(args.args, str):
            cmd_args = args.args  # type: Union[str, List[Union[int, float, str, Tuple[str, str, str]]]]
        elif isinstance(args.args, list):
            cmd_args = [arg for arg in args.args if isinstance(arg, str)]
        else:
            raise Exception("funny SpecialAgentConfiguration args %r" % (args.args,))
    elif isinstance(args, str):
        cmd_args = args
    elif isinstance(args, list):
        cmd_args = [arg for arg in args if isinstance(arg, (str, tuple))]
    else:
        raise MKGeneralException(
            "The check argument function needs to return either a list of arguments or a "
            "string of the concatenated arguments (Host: %s, Service: %s)." %
            (hostname, description))

    return config.prepare_check_command(cmd_args, hostname, description)
Beispiel #17
0
def do_snmpwalk(options, hostnames):
    if "oids" in options and "extraoids" in options:
        raise MKGeneralException("You cannot specify --oid and --extraoid at the same time.")

    if not hostnames:
        raise MKBailOut("Please specify host names to walk on.")

    if not os.path.exists(cmk.utils.paths.snmpwalks_dir):
        os.makedirs(cmk.utils.paths.snmpwalks_dir)

    for hostname in hostnames:
        #TODO: What about SNMP management boards?
        snmp_config = create_snmp_host_config(hostname)

        try:
            _do_snmpwalk_on(snmp_config, options, cmk.utils.paths.snmpwalks_dir + "/" + hostname)
        except Exception as e:
            console.error("Error walking %s: %s\n" % (hostname, e))
            if cmk.utils.debug.enabled():
                raise
        cmk_base.cleanup.cleanup_globals()
Beispiel #18
0
def do_create_config(core: MonitoringCore, hosts_to_update: HostsToUpdate = None) -> None:
    """Creating the monitoring core configuration and additional files

    Ensures that everything needed by the monitoring core and it's helper processes is up-to-date
    and available for starting the monitoring.
    """
    out.output("Generating configuration for core (type %s)...\n" % core.name())
    if hosts_to_update is not None:
        out.output(
            "Reuse old configuration, create new configuration for %s and dependant hosts\n"
            % ", ".join(hosts_to_update)
        )

    try:
        _create_core_config(core, hosts_to_update=hosts_to_update)
    except Exception as e:
        if cmk.utils.debug.enabled():
            raise
        raise MKGeneralException("Error creating configuration: %s" % e)

    _bake_on_restart()
Beispiel #19
0
    def save(self, hostname):
        # type: (HostName) -> None
        """ The job of the save function is to update the item state on disk.
        It simply returns, if it detects that the data wasn't changed at all since the last loading
        If the data on disk has been changed in the meantime, the cached data is updated from disk.
        Afterwards only the actual modifications (update/remove) are applied to the updated cached
        data before it is written back to disk.
        """
        filename = cmk.utils.paths.counters_dir + "/" + hostname
        if not self._removed_item_state_keys and not self._updated_item_states:
            return

        try:
            if not os.path.exists(cmk.utils.paths.counters_dir):
                os.makedirs(cmk.utils.paths.counters_dir)

            store.aquire_lock(filename)
            last_mtime = os.stat(filename).st_mtime
            if last_mtime != self._last_mtime:
                self._item_states = store.load_object_from_file(filename,
                                                                default={})

                # Remove obsolete keys
                for key in self._removed_item_state_keys:
                    try:
                        del self._item_states[key]
                    except KeyError:
                        pass

                # Add updated keys
                self._item_states.update(self._updated_item_states)

            store.save_object_to_file(filename,
                                      self._item_states,
                                      pretty=False)
        except Exception:
            raise MKGeneralException("Cannot write to %s: %s" %
                                     (filename, traceback.format_exc()))
        finally:
            store.release_lock(filename)
Beispiel #20
0
def _get_sorted_check_table(hostname: HostName,
                            remove_duplicates: bool = False,
                            filter_mode: Optional[str] = None,
                            skip_ignored: bool = True) -> List[Service]:
    # Convert from dictionary into simple tuple list. Then sort it according to
    # the service dependencies.
    # TODO: Use the Service objects from get_check_table once it returns these objects
    is_cmc = config.is_cmc()
    unsorted = [(service,
                 [] if is_cmc else config.service_depends_on(hostname, service.description))
                for service in get_check_table(hostname,
                                               remove_duplicates=remove_duplicates,
                                               filter_mode=filter_mode,
                                               skip_ignored=skip_ignored).values()]

    unsorted.sort(key=lambda x: x[0].description)

    ordered: List[Service] = []
    while len(unsorted) > 0:
        unsorted_descrs = {entry[0].description for entry in unsorted}
        left = []
        at_least_one_hit = False
        for check in unsorted:
            deps_fulfilled = True
            for dep in check[1]:  # dependencies
                if dep in unsorted_descrs:
                    deps_fulfilled = False
                    break
            if deps_fulfilled:
                ordered.append(check[0])
                at_least_one_hit = True
            else:
                left.append(check)
        if len(left) == 0:
            break
        if not at_least_one_hit:
            raise MKGeneralException("Cyclic service dependency of host %s. Problematic are: %s" %
                                     (hostname, ",".join(unsorted_descrs)))
        unsorted = left
    return ordered
Beispiel #21
0
def _do_submit_to_core(
    host: HostName,
    service: ServiceName,
    state: ServiceState,
    output: ServiceDetails,
    cache_info: Optional[Tuple[int, int]],
) -> None:
    if _in_keepalive_mode():
        cached_at, cache_interval = cache_info or (None, None)
        # Regular case for the CMC - check helpers are running in keepalive mode
        keepalive.add_check_result(host, service, state, output, cached_at, cache_interval)

    elif config.check_submission == "pipe" or config.monitoring_core == "cmc":
        # In case of CMC this is used when running "cmk" manually
        _submit_via_command_pipe(host, service, state, output)

    elif config.check_submission == "file":
        _submit_via_check_result_file(host, service, state, output)

    else:
        raise MKGeneralException("Invalid setting %r for check_submission. "
                                 "Must be 'pipe' or 'file'" % config.check_submission)
Beispiel #22
0
def _do_submit_to_core(host,
                       service,
                       state,
                       output,
                       cached_at=None,
                       cache_interval=None):
    if _in_keepalive_mode():
        # Regular case for the CMC - check helpers are running in keepalive mode
        keepalive.add_keepalive_check_result(host, service, state, output,
                                             cached_at, cache_interval)

    elif config.check_submission == "pipe" or config.monitoring_core == "cmc":
        # In case of CMC this is used when running "cmk" manually
        _submit_via_command_pipe(host, service, state, output)

    elif config.check_submission == "file":
        _submit_via_check_result_file(host, service, state, output)

    else:
        raise MKGeneralException("Invalid setting %r for check_submission. "
                                 "Must be 'pipe' or 'file'" %
                                 config.check_submission)
Beispiel #23
0
def _backup_objects_file(core: MonitoringCore) -> Iterator[None]:
    if config.monitoring_core == "nagios":
        objects_file = cmk.utils.paths.nagios_objects_file
    else:
        objects_file = cmk.utils.paths.var_dir + "/core/config"

    backup_path = None
    if os.path.exists(objects_file):
        backup_path = objects_file + ".save"
        os.rename(objects_file, backup_path)

    try:
        try:
            yield None
        except Exception:
            if backup_path:
                os.rename(backup_path, objects_file)
            raise

        if (config.monitoring_core == "nagios"
                and Path(cmk.utils.paths.nagios_config_file).exists()
                and not do_check_nagiosconfig()):
            broken_config_path = Path(
                cmk.utils.paths.tmp_dir) / "check_mk_objects.cfg.broken"
            shutil.move(cmk.utils.paths.nagios_objects_file,
                        broken_config_path)

            if backup_path:
                os.rename(backup_path, objects_file)
            elif os.path.exists(objects_file):
                os.remove(objects_file)

            raise MKGeneralException(
                "Configuration for monitoring core is invalid. Rolling back. "
                "The broken file has been copied to \"%s\" for analysis." %
                broken_config_path)
    finally:
        if backup_path and os.path.exists(backup_path):
            os.remove(backup_path)
Beispiel #24
0
def host_check_command(config_cache, host_config, ip, is_clust,
                       default_host_check_command,
                       host_check_via_service_status,
                       host_check_via_custom_check):
    value = _get_host_check_command(host_config, default_host_check_command)

    if value == "smart":
        if is_clust:
            return _cluster_ping_command(config_cache, host_config, ip)
        return "check-mk-host-smart"

    if value == "ping":
        if is_clust:
            return _cluster_ping_command(config_cache, host_config, ip)
        ping_args = check_icmp_arguments_of(config_cache, host_config.hostname)
        if ping_args:  # use special arguments
            return "check-mk-host-ping!%s" % ping_args
        return None

    if value == "ok":
        return "check-mk-host-ok"

    if value == "agent":
        return host_check_via_service_status("Check_MK")

    if value[0] == "service":
        return host_check_via_service_status(value[1])

    if value[0] == "tcp":
        return "check-mk-host-tcp!" + str(value[1])

    if value[0] == "custom":
        return host_check_via_custom_check(
            "check-mk-custom",
            "check-mk-custom!" + autodetect_plugin(value[1]))

    raise MKGeneralException(
        "Invalid value %r for host_check_command of host %s." %
        (value, host_config.hostname))
Beispiel #25
0
    def _is_expected_agent_version(self, agent_version, expected_version):
        try:
            if agent_version in ['(unknown)', None, 'None']:
                return False

            if isinstance(expected_version, str) and expected_version != agent_version:
                return False

            elif isinstance(expected_version, tuple) and expected_version[0] == 'at_least':
                spec = expected_version[1]
                if utils.is_daily_build_version(agent_version) and 'daily_build' in spec:
                    expected = int(spec['daily_build'].replace('.', ''))

                    branch = utils.branch_of_daily_build(agent_version)
                    if branch == "master":
                        agent = int(agent_version.replace('.', ''))

                    else:  # branch build (e.g. 1.2.4-2014.06.01)
                        agent = int(agent_version.split('-')[1].replace('.', ''))

                    if agent < expected:
                        return False

                elif 'release' in spec:
                    if utils.is_daily_build_version(agent_version):
                        return False

                    if cmk.utils.werks.parse_check_mk_version(agent_version) \
                        < cmk.utils.werks.parse_check_mk_version(spec['release']):
                        return False

            return True
        except Exception as e:
            if cmk.utils.debug.enabled():
                raise
            raise MKGeneralException(
                "Unable to check agent version (Agent: %s Expected: %s, Error: %s)" %
                (agent_version, expected_version, e))
Beispiel #26
0
def _make_target_columns(
        oid_info: Union[OIDInfo,
                        SNMPTree]) -> Tuple[OID, List[Any], SNMPColumns]:
    #
    # OIDInfo is one of:
    #   - OIDWithColumns = Tuple[OID, SNMPColumns]
    #   - OIDWithSubOIDsAndColumns = Tuple[OID, List[OID], SNMPColumns]
    #     where List[OID] is a list if OID-infixes that are put between the
    #     baseoid and the columns and prefixed with the index column.
    #
    # TODO: The Union[OIDWithColumns, OIDWithSubOIDsAndColumns] dance is absurd!
    #       Here, we should just have OIDWithSubOIDsAndColumns and
    #       replace `OIDWithColumns` with `Tuple[OID, [], SNMPColumns]`.
    #
    # This allows to merge distinct SNMP subtrees with a similar structure
    # to one virtual new tree (look into cmctc_temp for an example)
    suboids: List = [None]
    if isinstance(oid_info, SNMPTree):
        # TODO (mo): Via SNMPTree is the way to go. Remove all other cases
        #            once we have the auto-conversion of SNMPTrees in place.
        #            In particular:
        #              * remove all 'suboids' related code (index_column!)
        #              * remove all casts, and extend the livetime of the
        #                 SNMPTree Object as far as possible.
        #             * I think the below code can be improved by making
        #               SNMPTree an iterable.
        tmp_base = str(oid_info.base)
        oid, targetcolumns = cast(OIDWithColumns, (tmp_base, oid_info.oids))
    elif len(oid_info) == 2:
        oid, targetcolumns = cast(OIDWithColumns, oid_info)
    else:
        oid, suboids, targetcolumns = cast(OIDWithSubOIDsAndColumns, oid_info)

    if not oid.startswith("."):
        raise MKGeneralException("OID definition '%s' does not begin with ." %
                                 oid)

    return oid, suboids, targetcolumns
Beispiel #27
0
    def runner_plugin(self, opsys: OS) -> Plugin:
        # TODO: when external mode:
        #  => bin!
        #  when not:
        #  no target, interval!
        if self.execution_mode == 'external':
            # Runner and Controller have to be deployed as bin
            # /omd/sites/v2/lib/python3/cmk/base/cee/bakery/core_bakelets/bin_files.py

            # cmk.utils.paths.local_agents_dir ??
            pass
        elif self.execution_mode == 'agent_serial':
            # the runner plugin gets
            return Plugin(
                base_os=opsys,
                source=Path('robotmk-runner.py'),
                # TODO: interval=interval,
                interval=self.execution_interval,
            )
        else:
            raise MKGeneralException(
                "Error: Execution mode %s is not supported." %
                self.execution_mode)
Beispiel #28
0
def _agent_cache_file_age(hostname: HostName,
                          check_plugin_name: CheckPluginNameStr) -> Optional[float]:
    host_config = _config.get_config_cache().get_host_config(hostname)
    if host_config.is_cluster:
        raise MKGeneralException("get_agent_data_time() not valid for cluster")

    # NOTE: This is a workaround for the 'old' API and will not be correct
    # for the new one. This is a check plugin name, and the property of being
    # 'TCP' or 'SNMP' is a property of the section.
    # This function is deprecated for new plugins.
    # For old-style plugins, plugin and section name are same, so check the
    # corresponding section:
    section_name_str = _cmk_utils.check_utils.section_name_of(check_plugin_name)
    section = _agent_based_register.get_section_plugin(_SectionName(section_name_str))
    if hasattr(section, "trees"):
        cachefile = "%s/%s.%s" % (_paths.tcp_cache_dir, hostname, section_name_str)
    else:
        cachefile = "%s/%s" % (_paths.tcp_cache_dir, hostname)

    if os.path.exists(cachefile):
        return _cmk_utils.cachefile_age(cachefile)

    return None
Beispiel #29
0
def _get_needed_check_plugin_names(host_config):
    # type: (config.HostConfig) -> Set[CheckPluginName]
    import cmk.base.check_table as check_table  # pylint: disable=import-outside-toplevel
    needed_check_plugin_names = set([])

    # In case the host is monitored as special agent, the check plugin for the special agent needs
    # to be loaded
    sources = data_sources.DataSources(host_config.hostname, ipaddress=None)
    for source in sources.get_data_sources():
        if isinstance(source, data_sources.programs.SpecialAgentDataSource):
            needed_check_plugin_names.add(
                source.special_agent_plugin_file_name)

    # Collect the needed check plugin names using the host check table
    for check_plugin_name in check_table.get_needed_check_names(
            host_config.hostname,
            filter_mode="include_clustered",
            skip_ignored=False):
        if config.check_info[check_plugin_name].get("extra_sections"):
            for section_name in config.check_info[check_plugin_name][
                    "extra_sections"]:
                if section_name in config.check_info:
                    needed_check_plugin_names.add(section_name)

        needed_check_plugin_names.add(check_plugin_name)

    # Also include the check plugins of the cluster nodes to be able to load
    # the autochecks of the nodes
    if host_config.is_cluster:
        nodes = host_config.nodes
        if nodes is None:
            raise MKGeneralException("Invalid cluster configuration")
        for node in nodes:
            needed_check_plugin_names.update(
                check_table.get_needed_check_names(node, skip_ignored=False))

    return needed_check_plugin_names
Beispiel #30
0
def _extract_snmp_sections(
    inv_info: Dict[str, InventoryInfo],
    plugin_file_lookup: Dict[str, str],
) -> Sequence[str]:
    errors = []
    for plugin_name, plugin_info in sorted(inv_info.items()):
        if "snmp_info" not in plugin_info:
            continue
        section_name = section_name_of(plugin_name)
        if isinstance(
                agent_based_register.get_section_plugin(
                    SectionName(section_name)), SNMPSectionPlugin):
            continue

        fallback_files = [
            _include_file_path(i) for i in plugin_info.get("includes", [])
        ] + [plugin_file_lookup[plugin_name]]

        try:
            agent_based_register.add_section_plugin(
                create_snmp_section_plugin_from_legacy(
                    section_name,
                    {},
                    plugin_info["snmp_scan_function"],
                    plugin_info["snmp_info"],
                    scan_function_fallback_files=fallback_files,
                    # We have to validate, because we read inventory plugin files
                    # directly, and do not know whether they changed.
                    validate_creation_kwargs=True,
                ))
        except (NotImplementedError, KeyError, AssertionError, ValueError):
            msg = config.AUTO_MIGRATION_ERR_MSG % ("section", plugin_name)
            if cmk.utils.debug.enabled():
                raise MKGeneralException(msg)
            errors.append(msg)

    return errors