示例#1
0
 def __call__(self, srv_com, cur_ns):
     _mounts = [
         _line.strip().split()
         for _line in open("/proc/mounts", "r").read().split("\n")
         if _line.strip()
     ]
     srv_com["mounts"] = server_command.compress(_mounts, json=True)
示例#2
0
 def store(
     cls,
     structure: object,
     file_size: int,
     file_type: enumerate,
     install_device: object,
 ) -> object:
     from initat.tools import server_command
     _compr = server_command.compress(structure, json=True)
     try:
         cfile = cls.objects.get(Q(content=_compr))
     except cls.DoesNotExist:
         _create = True
     else:
         cfile.same_uploads += 1
         cfile.save(update_fields=["same_uploads"])
         _create = False
     if _create:
         cfile = BackendConfigFile(
             file_size=file_size,
             file_type=file_type.name,
             most_recent=True,
             content=_compr,
             install_device=install_device,
         )
         cfile.save()
         # remove most recent from other instances
         cls.objects.filter(Q(file_type=file_type.name,
                              most_recent=True)).exclude(Q(
                                  idx=cfile.idx)).update(most_recent=False)
     return cfile
示例#3
0
文件: server.py 项目: walong365/icsw
 def distribute_info(self, srv_com, **kwargs):
     di_info = server_command.decompress(srv_com["*info"], marshal=True)
     self.config_store["distribute_info"] = server_command.compress(
         di_info, json=True, to_string=True)
     self.config_store.write()
     self.SH.distribute_info(di_info)
     return None
示例#4
0
 def get_pure_data(lic_content):
     def _clean(_xml):
         for _sig in _xml.xpath(".//icsw:signature|.//icsw:license-file-meta/icsw:creation-datetime", namespaces=ICSW_XML_NS_MAP):
             _sig.text = ""
     _lic_xml = etree.fromstring(server_command.decompress(lic_content))  # .encode("utf-8")))
     _clean(_lic_xml)
     _lic_stream = server_command.compress(etree.tostring(_lic_xml))
     return _lic_stream
示例#5
0
文件: rpm_mod.py 项目: bopopescu/icsw
 def __call__(self, srv_com, cur_ns):
     s_time = time.time()
     update_list = get_update_list()
     e_time = time.time()
     srv_com.set_result(
         "ok got list in {}".format(
             logging_tools.get_diff_time_str(e_time - s_time)), )
     srv_com["update_list"] = server_command.compress(update_list,
                                                      pickle=True)
示例#6
0
 def _get_sys_info(self, *args, **kwargs):
     # to get the info to the frontend
     srv_com = server_command.srv_command(source=args[0])
     _inst_list = list(self.__slave_configs.values())
     _info_list = [_slave.info for _slave in _inst_list]
     srv_com.set_result("ok set info for {}".format(
         logging_tools.get_plural("system", len(_inst_list))))
     srv_com["sys_info"] = server_command.compress(_info_list, json=True)
     self.send_pool_message("remote_call_async_result", str(srv_com))
示例#7
0
def _get_ref_value(in_str):
    try:
        _v = json.loads(in_str)
    except:
        # in_str has old structure, pass
        # print("o", type(in_str))
        return in_str
    else:
        # in_str is json dump, compress it
        return server_command.compress(_v, json=True).decode("ascii")
示例#8
0
    def __call__(self, srv_com, cur_ns):
        if PLATFORM_SYSTEM_TYPE == PlatformSystemTypeEnum.WINDOWS:
            from threading import Thread

            def search_func():
                import win32com
                import win32com.client
                import pythoncom

                pythoncom.CoInitialize()

                update = win32com.client.Dispatch('Microsoft.Update.Session')
                update_searcher = update.CreateUpdateSearcher()

                search_result = update_searcher.Search(
                    "( IsInstalled = 0 and IsHidden = 0 )")

                for i in range(search_result.Updates.Count):
                    title = search_result.Updates.Item(i).Title
                    optional = not search_result.Updates.Item(i).IsMandatory
                    update_list.append((title, optional))

            update_list = []
            update_searcher_thread = Thread(target=search_func)
            update_searcher_thread.start()
            update_searcher_thread.join(timeout=30 * 5)

            srv_com["format"] = "windows"
            srv_com["update_list"] = server_command.compress(update_list,
                                                             pickle=True)
        else:
            s_time = time.time()
            update_list, log_list = get_update_list()
            for log_entry in log_list:
                self.log(log_entry)
            e_time = time.time()
            srv_com.set_result(
                "ok got list in {}".format(
                    logging_tools.get_diff_time_str(e_time - s_time)), )
            srv_com["format"] = "linux"
            srv_com["update_list"] = server_command.compress(update_list,
                                                             pickle=True)
示例#9
0
    def __call__(self, srv_com, cur_ns):
        # s_time = time.time()
        if cur_ns.arguments:
            name_list = cur_ns.arguments
            if "cron" in name_list:
                name_list.append("crond")
        else:
            name_list = []
        _p_dict = {}

        if PLATFORM_SYSTEM_TYPE == PlatformSystemTypeEnum.WINDOWS:
            attr_list = [
                "pid", "ppid", "name", "exe", "cmdline", "status", "ppid",
                "cpu_affinity"
            ]
        elif PLATFORM_SYSTEM_TYPE == PlatformSystemTypeEnum.LINUX:
            attr_list = [
                "pid",
                "ppid",
                "uids",
                "gids",
                "name",
                "exe",
                "cmdline",
                "status",
                "ppid",
                "cpu_affinity",
            ]
        else:
            attr_list = ["pid", "name"]
        for key, value in process_tools.get_proc_list(
                proc_name_list=name_list).items():
            try:
                if value.is_running():
                    _p_dict[key] = value.as_dict(attrs=attr_list)
            except psutil.NoSuchProcess:
                pass
        if cur_ns.arguments:
            # try to be smart about cron / crond
            t_dict = {
                key: value
                for key, value in _p_dict.items()
                if value["name"] in cur_ns.arguments
            }
            if not t_dict and cur_ns.arguments[0] == "cron":
                t_dict = {
                    key: value
                    for key, value in _p_dict.items()
                    if value["name"] in ["crond"]
                }
            _p_dict = t_dict
        srv_com["process_tree"] = server_command.compress(_p_dict, json=True)
        srv_com["process_tree"].attrib["format"] = "2"
示例#10
0
 def __call__(self, srv_com, cur_ns):
     _stat, _out = subprocess.getstatusoutput(self.module.lsmod_command)
     if _stat:
         srv_com.set_result("error getting module list",
                            server_command.SRV_REPLY_STATE_ERROR)
     else:
         _lines = _out.split("\n")[1:]
         srv_com["modules"] = server_command.compress(
             [(_part[0], int(_part[1]), int(
                 _part[2]), [] if len(_part) == 3 else _part[3].split(","))
              for _part in [_line.strip().split() for _line in _lines]],
             json=True)
示例#11
0
文件: struct.py 项目: walong365/icsw
 def get_syslog(srv_com):
     # print srv_com.pretty_print()
     for _dev in srv_com.xpath(".//ns:devices/ns:device"):
         _dev.attrib.update(
             {
                 "state": "{:d}".format(logging_tools.LOG_LEVEL_ERROR),
                 "result": "Device not found",
             }
         )
         _pk = int(_dev.attrib["pk"])
         if Machine.has_device(_pk):
             dev = Machine.get_device(_pk)
             _to_read = int(_dev.attrib.get("lines", "0"))
             _minutes_to_cover = int(_dev.attrib.get("minutes", "0"))
             lines = dev.filewatcher.get_logs(to_read=_to_read, minutes=_minutes_to_cover)
             dev.log(
                 "lines found: {:d} (of {:d} / {:d})".format(
                     len(lines),
                     _to_read,
                     _minutes_to_cover,
                 )
             )
             _dev.attrib.update(
                 {
                     "read": "{:d}".format(len(lines)),
                     # for line version
                     "version": "1",
                 }
             )
             rates = dev.filewatcher.get_rates()
             if rates:
                 _dev.append(
                     srv_com.builder(
                         "rates",
                         *[
                             srv_com.builder(
                                 "rate",
                                 timeframe="{:d}".format(_seconds),
                                 rate="{:.4f}".format(_rate)
                             ) for _seconds, _rate in rates.items()
                         ]
                     )
                 )
             _dev.append(
                 srv_com.builder(
                     "lines",
                     server_command.compress(
                         [_line.get_xml_format() for _line in lines],
                         json=True
                     )
                 )
             )
示例#12
0
 def send_info_message(self):
     # set latest contact for dist master
     self.__latest_contact = time.time()
     # send info to monitor daemon
     info_list = [
         _entry.get_info_dict()
         for _entry in [self] + list(self.__slave_configs.values())
     ]
     # print "ILIST", self.master_uuid, self.slave_uuid
     srv_com = self._get_config_srv_command(
         "info_list",
         slave_info=server_command.compress(info_list, json=True),
     )
     self.send_to_config_server(srv_com)
示例#13
0
文件: server.py 项目: bopopescu/icsw
 def _ocsp_results(self, *args, **kwargs):
     _src_proc, _src_pid, lines = args
     # print "* OCSP", lines
     if self.__sync_master_uuid:
         self.send_command(
             self.__sync_master_uuid,
             server_command.srv_command(
                 command="ocsp_lines",
                 ocsp_lines=server_command.compress(lines, json=True),
             ))
     else:
         self.log(
             "no sync_master_uuid set ({})".format(
                 logging_tools.get_plural("OCSP line", len(lines))),
             logging_tools.LOG_LEVEL_ERROR)
示例#14
0
 def __call__(self, srv_com, cur_ns):
     srv_com["psutil"] = "yes"
     srv_com["num_cores"] = psutil.cpu_count(logical=True)
     srv_com["process_tree"] = server_command.compress(
         process_tools.get_proc_list(attrs=[
             "pid",
             "ppid",
             "uids",
             "gids",
             "name",
             "exe",
             "cmdline",
             "status",
             "ppid",
             "cpu_affinity",
         ]),
         json=True)
示例#15
0
    def _build_file_content(self, send_list):
        srv_com = self._get_slave_srv_command(
            "file_content_bulk",
            config_version_send="{:d}".format(self.config_version_send),
            send_time="{:d}".format(int(self.send_time)),
        )
        _bld = srv_com.builder()

        srv_com["file_list"] = _bld.file_list(*[
            _bld.file(
                entry["path"],
                size="{:d}".format(entry["size"]),
                uid="{:d}".format(entry["uid"]),
                gid="{:d}".format(entry["gid"]),
            ) for entry in send_list
        ])
        srv_com["bulk"] = server_command.compress("".join(
            [entry["content"] for entry in send_list]).encode("utf-8"))
        return srv_com
示例#16
0
    def __call__(self, srv_com, cur_ns):
        installed_updates = []
        if PLATFORM_SYSTEM_TYPE == PlatformSystemTypeEnum.WINDOWS:
            import win32com.client
            import pywintypes

            update = win32com.client.Dispatch('Microsoft.Update.Session')
            update_searcher = update.CreateUpdateSearcher()
            count = update_searcher.GetTotalHistoryCount()

            update_history = update_searcher.QueryHistory(0, count)

            for i in range(update_history.Count):
                update = installedupdates_command.Update()
                update.title = update_history.Item(i).Title
                update.date = update_history.Item(i).Date
                try:
                    update.status = update_history.Item(i).ResultCode
                except pywintypes.com_error:
                    update.status = "Unknown"

                if update.status == 0:
                    update.status = "NotStarted"
                elif update.status == 1:
                    update.status = "InProgress"
                elif update.status == 2:
                    update.status = "Succeeded"
                elif update.status == 3:
                    update.status = "SucceededWithErrors"
                elif update.status == 4:
                    update.status = "Failed"
                elif update.status == 5:
                    update.status = "Aborted"

                installed_updates.append(update)

        installed_updates = [(update.title, update.date.isoformat(),
                              update.status) for update in installed_updates]
        srv_com["installed_updates"] = server_command.compress(
            installed_updates, pickle=True)
示例#17
0
文件: rpm_mod.py 项目: bopopescu/icsw
 def __call__(self, srv_com, cur_ns):
     if os.path.isfile("/etc/debian_version"):
         is_debian = True
     else:
         is_debian = False
     rpm_root_dir, re_strs = ("/", [])
     if len(cur_ns.arguments):
         for arg in cur_ns.arguments:
             if arg.startswith("/"):
                 rpm_root_dir = arg
             else:
                 re_strs.append(arg)
     if is_debian:
         self.log(
             "Starting dpkg -l command for root_dir '{}' ({:d} regexp_strs: {})"
             .format(rpm_root_dir, len(re_strs), ", ".join(re_strs)))
     else:
         self.log(
             "Starting rpm-list command for root_dir '{}' ({:d} regexp_strs: {})"
             .format(rpm_root_dir, len(re_strs), ", ".join(re_strs)))
     s_time = time.time()
     log_list, ret_dict, cur_stat = rpmlist_int(rpm_root_dir, re_strs,
                                                is_debian)
     e_time = time.time()
     for log in log_list:
         self.log(log)
     if not cur_stat:
         srv_com.set_result(
             "ok got list in {}".format(
                 logging_tools.get_diff_time_str(e_time - s_time)), )
         srv_com["root_dir"] = rpm_root_dir
         srv_com["format"] = "deb" if is_debian else "rpm"
         srv_com["pkg_list"] = server_command.compress(ret_dict,
                                                       pickle=True)
     else:
         srv_com["result"].set_result(
             "error getting list: {:d}".format(cur_stat),
             server_command.SRV_REPLY_STATE_ERROR)
示例#18
0
文件: bonnie.py 项目: walong365/icsw
 def _check_for_next_run(self):
     if not self.__act_run:
         self.log("init run_dict")
         self.__act_run += 1
         self.__act_run_dict = {
             "sysinfo": process_tools.fetch_sysinfo()[1]
         }
     if self.__act_run not in self.__act_run_dict:
         self.log("init run_dict for run %d" % (self.__act_run))
         self.__act_run_dict[self.__act_run] = {
             "num_threads": self.__thread_list[self.__act_run - 1],
             "started": 0,
             "ended": 0,
             "results": {}
         }
         if self.__loc_config["SYNC_GLOBAL"]:
             self._sync()
         for num_t in range(self.__act_run_dict[self.__act_run]["num_threads"]):
             self.__act_run_dict[self.__act_run]["started"] += 1
             self.__stq_list[num_t].put(("start_run"))
     # check for finished run
     ard = self.__act_run_dict[self.__act_run]
     if ard["started"] == ard["ended"]:
         self.log("run %d finished" % (self.__act_run))
         if self.__loc_config["SYNC_GLOBAL"]:
             self._sync()
         self.log("Saving result to %s" % (self.__loc_config["RESULT_FILE"]))
         open(
             self.__loc_config["RESULT_FILE"], "w"
         ).write(
             server_command.compress(self.__act_run_dict, json=True)
         )
         if self.__act_run == self.__num_runs:
             self.log("all runs finished, exiting")
             self._int_error("all runs finished")
         else:
             self.__act_run += 1
             self._check_for_next_run()
示例#19
0
 def _check_for_slaves(self, **kwargs):
     master_server = device.objects.get(Q(pk=global_config["SERVER_IDX"]))
     slave_servers = device.objects.exclude(
         # exclude master server
         Q(pk=master_server.idx)).filter(
             Q(device_config__config__config_service_enum__enum_name=
               icswServiceEnum.monitor_slave.name)).select_related(
                   "domain_tree_node")
     # slave configs
     self.__master_config = SyncConfig(
         self,
         master_server,
         distributed=True if len(slave_servers) else False)
     self.__slave_configs, self.__slave_lut = ({}, {})
     # create lut entry for master config
     self.__slave_configs[master_server.pk] = self.__master_config
     self.__slave_lut[master_server.full_name] = master_server.pk
     self.__slave_lut[master_server.uuid] = master_server.pk
     # connect to local relayer
     self.__primary_slave_uuid = routing.get_server_uuid(
         icswServiceEnum.monitor_slave, master_server.uuid)
     self.send_pool_message("set_sync_master_uuid",
                            self.__primary_slave_uuid)
     self.log("  master {} (IP {}, {})".format(master_server.full_name,
                                               "127.0.0.1",
                                               self.__primary_slave_uuid))
     self.send_pool_message("register_remote", "127.0.0.1",
                            self.__primary_slave_uuid,
                            icswServiceEnum.monitor_slave.name)
     _send_data = [self.__master_config.get_send_data()]
     if len(slave_servers):
         self.log("found {}: {}".format(
             logging_tools.get_plural("slave_server", len(slave_servers)),
             ", ".join(
                 sorted([cur_dev.full_name for cur_dev in slave_servers]))))
         for cur_dev in slave_servers:
             _slave_c = SyncConfig(
                 self,
                 cur_dev,
                 slave_name=cur_dev.full_name,
                 master_server=master_server,
             )
             self.__slave_configs[cur_dev.pk] = _slave_c
             self.__slave_lut[cur_dev.full_name] = cur_dev.pk
             self.__slave_lut[cur_dev.uuid] = cur_dev.pk
             self.log("  slave {} (IP {}, {})".format(
                 _slave_c.monitor_server.full_name, _slave_c.slave_ip,
                 _slave_c.monitor_server.uuid))
             _send_data.append(_slave_c.get_send_data())
             # if _slave_c.slave_ip:
             #    self.send_pool_message("register_slave", _slave_c.slave_ip, _slave_c.monitor_server.uuid)
             # else:
             #    self.log("slave has an invalid IP", logging_tools.LOG_LEVEL_CRITICAL)
     else:
         self.log("no slave-servers found")
     # send distribution info to local syncer
     distr_info = server_command.srv_command(
         command="distribute_info",
         info=server_command.compress(_send_data, marshal=True),
     )
     self.send_pool_message("distribution_info", _send_data)
     self.send_sync_command(distr_info)
示例#20
0
    def _interpret(ctrl_dict, cur_ns):
        # pprint.pprint(ctrl_dict)
        def get_status_lines(lines):
            stat_keys = [
                _key for _key, _value in lines if _key.endswith("_status")
            ]
            if stat_keys:
                # status keys found, return last status
                return [_value for _key, _value in lines
                        if _key in stat_keys][-1]
            else:
                # not status keys found, return first value
                return lines[0][1]

        def get_log_dict(lines):
            return {key: value for key, value in lines}

        def _get_info_str(key, lines):
            _entity_type = key.split(":")[-1][0]
            if _entity_type in ["v", "c", "b"]:
                _ld = get_log_dict(lines)
                if _entity_type == "v":
                    _ld["ctrl"] = key.split(":")[0]
                    for _key, _default in [("name", ""),
                                           ("virtual_drive", "")]:
                        if _key not in _ld:
                            _ld[_key] = _default
                    _info_str = "vd {virtual_drive} ('{name}', ctrl {ctrl}), RAID level {raid_level}, " \
                        "size={size}, drives={number_of_drives}, state={state}".format(
                            **_ld
                        )
                elif _entity_type == "c":
                    _info_f = []
                    for key in ["product_name"]:
                        if key in _ld:
                            _info_f.append("{}: {}".format(
                                key,
                                _ld[key],
                            ))
                    _info_str = ", ".join(_info_f)
                elif _entity_type == "b":
                    _info_f = []
                    for key in [
                            "temperature", "voltage",
                            "absolute_state_of_charge",
                            "relative_state_of_charge",
                            "*learn_cycle_requested", "learn_cycle_status",
                            "cycle_count"
                    ]:
                        if key[0] in ["*"]:
                            _key = key[1:]
                            _ignore_true = True
                        else:
                            _key = key
                            _ignore_true = False
                        if _key in _ld:
                            if _ignore_true and _ld[_key] is True:
                                continue
                            _info_f.append("{}: {}".format(
                                _key,
                                _ld[_key],
                            ))
                    _info_str = ", ".join(_info_f)
            else:
                _info_str = ""
            if _info_str:
                _info_str = "{}: {}".format(
                    _expand_key(_entity_type),
                    _info_str,
                )
            return _info_str

        def check_status(key, lines, check):
            _entity_type = key.split(":")[-1][0]
            if check == "status":
                _val = [
                    _val for _key, _val in lines if _key.endswith("_status")
                ]
            else:
                _val = [
                    _val for _key, _val in lines
                    if _key == check or _key.replace(" ", "_") == check
                ]
            if _val:
                _key_found = True
                _val = _val[0]
            else:
                _key_found = False
                # correct key not found
                _val = "not present / key not found"
            # _checked not needed right now ?
            _checked, _ret_state = (False, limits.mon_STATE_CRITICAL)
            if _entity_type == "v":
                _checked = True
                if check == "state":
                    if _val.lower().startswith("optimal"):
                        _ret_state = limits.mon_STATE_OK

                elif check == "current_cache_policy":
                    if _val.lower().strip().split()[0].startswith("writeback"):
                        _ret_state = limits.mon_STATE_OK
                    else:
                        _ret_state = limits.mon_STATE_WARNING
            elif _entity_type == "d":
                if _val.lower() == "online, spun up":
                    _ret_state = limits.mon_STATE_OK
            elif _entity_type == "f":
                if _val.lower() == "ok":
                    _ret_state = limits.mon_STATE_OK
            elif _entity_type == "s":
                if _val.lower() == "ok":
                    _ret_state = limits.mon_STATE_OK
            elif _entity_type == "c":
                _ret_state = limits.mon_STATE_OK
            elif _entity_type == "b":
                if _val.lower() in ("operational", "optimal"):
                    _ret_state = limits.mon_STATE_OK
                elif not _key_found:
                    _ld = get_log_dict(lines)
                    # state not definde, check for other flags
                    if not _ld.get("battery_pack_missing",
                                   True) and not _ld.get(
                                       "battery_replacement_required", True):
                        _ret_state = limits.mon_STATE_OK
            return _ret_state, _val, _entity_type

        def get_check_list(d_type, lines):
            if d_type == "virt":
                _keys = [_key for _key, _value in lines]
                return list(
                    set(_keys) & set(["state", "current_cache_policy"]))
            elif d_type == "pd":
                _keys = [_key for _key, _value in lines]
                return ["firmware_state"]
            elif d_type == "bbu":
                return ["battery_state"]
            elif d_type == "ctrl":
                return []
            else:
                status = get_status_lines(lines).lower()
                if status in set([
                        "not installed", "unknown", "medium speed",
                        "normal speed", "low speed", "high speed",
                        "not available"
                ]):
                    return None
                else:
                    return ["status"]

        def _prune(in_dict):
            return {
                _key: _prune(_value) if isinstance(_value, dict) else _value
                for _key, _value in in_dict.items() if _value
            }

        def reorder_dict(in_dict):
            _result = {
                "c{:02d}".format(_idx): _interpret_dict("ctrl", _value)
                for _idx, _value in in_dict.items()
            }
            # prune twice to remove empty subdicts
            _result = _prune(_prune(_result))
            return _result

        def emit_keys(in_dict, level=0):
            if isinstance(in_dict, dict):
                _dk_l = set(in_dict.keys()) - {"lines", "_checks"}
                r_list = sum([[
                    "{}{}{}".format(_s2_key, ":" if sub_key else "", sub_key)
                    for sub_key in emit_keys(in_dict[_s2_key], level + 1)
                ] for _s2_key in _dk_l], [])
                # force iteration over this key (to generate info_str)
                if "_checks" in in_dict:
                    r_list.append("")
                elif not level:
                    # add controller keys at top level
                    r_list.extend(list(in_dict.keys()))
                return r_list
            else:
                return [""]

        def _interpret_dict(d_type, in_dict):
            map_dict = {
                "enclosures": ("e", "enclosure"),
                "fans": ("f", "fan"),
                "power_supplies": ("p", "psu"),
                "slots": ("s", "slot"),
                "temperature_senors": ("t", "tempsensor"),
                "virt": ("v", "virt"),
                "pd": ("d", "pd"),
                "bbus": ("b", "bbu"),
            }
            r_dict = {}
            for _key, _t in map_dict.items():
                r_dict.update({
                    "{}{:02d}".format(_t[0], _idx):
                    _interpret_dict(_t[1], _value)
                    for _idx, _value in in_dict.get(_key, {}).items()
                    if isinstance(_idx, int)
                })
            if in_dict.get("lines", []):
                r_dict["lines"] = in_dict["lines"]
                _checks = get_check_list(d_type, in_dict["lines"])
                if _checks:
                    r_dict["_checks"] = _checks
            return r_dict

        def get_source(_ro_dict, _key):
            # return lines and check_list for given key
            _res = _ro_dict
            for _skey in _key.split(":"):
                _res = _res[_skey]
            return (_res.get("lines", []), _res.get("_checks", []))

        def _expand_key(entity_type):
            return {
                "c": "Ctrl",
                "v": "Virt",
                "p": "PSU",
                "s": "Slot",
                "e": "Encl",
                "b": "BBU",
                "f": "Fan",
                "d": "Disc",
            }[entity_type]

        def _full_key(_part):
            return "{}{}".format(
                {
                    "c": "ctrl",
                    "v": "virt",
                    "p": "psu",
                    "s": "slot",
                    "e": "encl",
                    "b": "bbu",
                    "f": "fan",
                    "d": "disc",
                }[_part[0]],
                "{:d}".format(int(_part[1:])) if len(_part) > 1 else "",
            )

        def get_service(_key, _check=None):
            return "{}{}".format(
                "/".join([_full_key(_part) for _part in _key.split(":")]),
                " {}".format(_check) if _check else "",
            )

        def _shorten_list(in_list):
            # pprint.pprint(in_list)
            # shorten_re = re.compile("^(?P<pre>c\d+:(v|e)\d+:(d|s|f))(?P<rest>\d+)$")
            shorten_re = re.compile(
                "^(?P<pre>c\d+:((v\d+:(d|s|f))|e))(?P<rest>.*\d+)$")
            _shorten_dict = {}
            new_list, _shorten_keys = ([], [])
            for key, _check, _info, _flag in r_list:
                _keep = True
                _match = shorten_re.match(key)
                if _match:
                    _keep = False
                    _gd = _match.groupdict()
                    if _gd["pre"] not in _shorten_keys:
                        _shorten_keys.append(_gd["pre"])
                        _shorten_dict[_gd["pre"]] = {
                            "list": [],
                            "check": _check,
                            "infos": [],
                            "flag": _flag,
                        }
                    _sde = _shorten_dict[_gd["pre"]]
                    if (_check, _flag) == (_sde["check"], _sde["flag"]):
                        _sde["list"].append((key, _gd["rest"]))
                        _sde["infos"].append(_info)
                    else:
                        _keep = True
                if _keep:
                    new_list.append((key, _check, _info, _flag))
            for _shorten_key in _shorten_keys:
                _sde = _shorten_dict[_shorten_key]
                new_list.append((_shorten_key, _sde["check"],
                                 _compress_infos(_sde["infos"]), _sde["flag"]))
            # print "out"
            # pprint.pprint(new_list)
            # pprint.pprint(_shorten_dict)
            # print "-" * 10
            return new_list, _shorten_dict

        def _generate_short_result(_common_key, _struct, _lss):
            _state_dict = {}
            # all keys are needef for the passive check result lookup key
            _all_keys = []
            for _state, _output, _skey in _lss:
                # we ignore _info here to make things easier
                _state_dict.setdefault(_state, {}).setdefault(_output,
                                                              []).append(_skey)
                _all_keys.append(_skey)
            _ret_state = max(_state_dict.keys())
            ret_list = []
            for _state in sorted(_state_dict.keys()):
                for _output in sorted(_state_dict[_state].keys()):
                    ret_list.append("{:d} {}: {}".format(
                        len(_state_dict[_state][_output]),
                        _output,
                        _compress_infos(_state_dict[_state][_output]),
                    ))
            return _compress_infos(_all_keys), _ret_state, ", ".join(ret_list)

        def _compress_infos(in_list):
            return logging_tools.struct_to_string(
                logging_tools.list_to_struct(in_list)[0])

        # rewrite bbu info
        for _c_id, _c_dict in ctrl_dict.items():
            if "main" in _c_dict.get("bbu_keys", {}):
                _c_dict["bbus"] = {
                    0: {
                        "lines": [(_key, _value) for _key, _value in
                                  _c_dict["bbu_keys"]["main"].items()]
                    }
                }
                del _c_dict["bbu_keys"]
            if "virt" not in _c_dict:
                # rewrite from old to new format
                _c_dict["virt"] = {
                    key: {
                        "lines": [(line[0].lower().replace(" ", "_").replace(
                            "virtual_disk", "virtual_drive"), line[1])
                                  for line in value]
                    }
                    for key, value in _c_dict["logical_lines"].items()
                }
                del _c_dict["logical_lines"]
        # print cur_ns
        # reorder dict
        _ro_dict = reorder_dict(ctrl_dict)
        # pprint.pprint(_ro_dict)
        # pprint.pprint(ctrl_dict)
        _key_list = emit_keys(_ro_dict)
        # pprint.pprint(_key_list)
        # print cur_ns
        # interpret flags
        _short_output = True if cur_ns.short_output in [
            True, "1", "y", "yes", "true", "True"
        ] else False
        _ignore_missing_bbu = True if cur_ns.ignore_missing_bbu in [
            True, "1", "y", "yes", "true", "True"
        ] else False
        _ignore_keys = [_char for _char in cur_ns.ignore_keys]
        if "N" in _ignore_keys:
            _ignore_keys = []
        if _ignore_keys:
            # filter key_list
            _key_list = [
                _entry for _entry in _key_list
                if not any([_entry.count(_ik) for _ik in _ignore_keys])
            ]
        if cur_ns.get_hints:
            r_list = []
            _ctrl_found = set()
            for _key in sorted(_key_list):
                _ctrl_key = _key.split(":")[0]
                if _ctrl_key not in _ctrl_found:
                    _ctrl_found.add(_ctrl_key)
                    r_list.extend([
                        (_ctrl_key, "all", "{} info".format(_full_key(_key)),
                         True),
                    ])
                _lines, _checks = get_source(_ro_dict, _key)
                if _checks:
                    if _short_output:
                        r_list.append((_key, "::".join(_checks),
                                       ShortOutputKeyCache.shorten_keys([
                                           get_service(_key, _check)
                                           for _check in _checks
                                       ]), False))
                    else:
                        r_list.extend([(_key, _check,
                                        get_service(_key, _check), False)
                                       for _check in _checks])
                # all checks in one line ? Todo ...
            if _short_output:
                # shorten list
                r_list, _ignore_dict = _shorten_list(r_list)
            # pprint.pprint(r_list)
            return r_list
        else:
            _store_passive = cur_ns.passive_check_prefix != "-"
            # generate passive results if cur_ns.passive_check_prefix is set (not "-")
            if not _store_passive:
                # only makes sense with _store_passive==True
                _short_output = False
            _passive_dict = {
                "source": "megaraid",
                "prefix": cur_ns.passive_check_prefix,
                "list": [],
            }
            # print "*", _key_list
            # if cur_ns.key != "all":
            # else:
            if cur_ns.check != "all":
                single_key = True
                _key_list = list(set(_key_list) & set([cur_ns.key]))
                target_checks = set(cur_ns.check.split("::"))
                # print "*", _key_list, target_checks
            else:
                target_checks = None
                single_key = False
                if cur_ns.key:
                    _key_list = [
                        _entry for _entry in _key_list
                        if _entry.startswith(cur_ns.key)
                    ]
            _ok_dict = {}
            _ret_list = []
            _g_ret_state = limits.mon_STATE_OK
            # list for shortened output
            r_list = []
            for _key in sorted(_key_list):
                # cache for short output
                _so_cache = ShortOutputKeyCache()
                _lines, _checks = get_source(_ro_dict, _key)
                if target_checks:
                    _checks = list(set(_checks) & target_checks)
                _info_str = _get_info_str(_key, _lines)
                if not _checks:
                    _ret_list.append(_info_str)
                for _check in _checks:
                    _info = get_service(_key, _check)
                    if _short_output:
                        r_list.append((_key, _check, _info, None))
                    _ret_state, _result, _entity_type = check_status(
                        _key, _lines, _check)
                    if _key.count(":b") and _ignore_missing_bbu:
                        # reduce state if necessary
                        _ret_state = min(_ret_state, limits.mon_STATE_WARNING)
                    if _store_passive and _entity_type != "c":
                        # never store controller checks in passive dict
                        if _short_output:
                            _so_cache.feed(_info, _ret_state, _result,
                                           _info_str)
                        else:
                            _passive_dict["list"].append(
                                # format: info, ret_state, result (always show), info (only shown in case of non-OK)
                                (
                                    _info,
                                    _ret_state,
                                    "{} {}".format(_result, _info_str),
                                ))
                    else:
                        _ret_list.append(_info_str)
                    _info_str = ""
                    # print _info, _ret_state, _result
                    if _ret_state != limits.mon_STATE_OK:
                        _ret_list.append("{}: {}".format(_info, _result))
                    else:
                        if single_key:
                            _ret_list.append(_result)
                        if _entity_type != "c":
                            # we ignore contoller checks because they are only dummy checks
                            _ok_dict.setdefault(_entity_type, []).append(0)
                    _g_ret_state = max(_g_ret_state, _ret_state)
                # check for addendum tio passive_dict
                if _short_output and _store_passive and _so_cache.valid:
                    _passive_dict["list"].append(_so_cache.get_passive_entry())
            _ret_list = [_val for _val in _ret_list if _val.strip()]
            if _short_output:
                # pprint.pprint(_passive_dict)
                r_list, shorten_dict = _shorten_list(r_list)
                # passive lut
                _pl = {
                    _info: (_ret_state, _result)
                    for _info, _ret_state, _result in _passive_dict["list"]
                }
                # rewrite the passive dict
                for _key, _struct in shorten_dict.items():
                    # pprint.pprint(_struct)
                    # local state list
                    _lss = [
                        list(_pl[_info]) + [_info]
                        for _info in _struct["infos"]
                    ]
                    # remove from passive_dict.list
                    _passive_dict["list"] = [
                        (_a, _b, _c) for _a, _b, _c in _passive_dict["list"]
                        if _a not in _struct["infos"]
                    ]
                    # add summed result
                    _passive_dict["list"].append(
                        _generate_short_result(_key, _struct, _lss))
                # pprint.pprint(_passive_dict)
            # pprint.pprint(_passive_dict)
            if _store_passive:
                ascii_chunk = server_command.compress(_passive_dict, json=True)
            else:
                ascii_chunk = ""
            # print _ret_list, _ok_dict
            if _ok_dict:
                _num_ok = sum([len(_val) for _val in _ok_dict.values()])
                if _num_ok == 1 and single_key:
                    pass
                else:
                    _ret_list.append("{}: {}".format(
                        logging_tools.get_plural("OK check", _num_ok),
                        ", ".join([
                            logging_tools.get_plural(_expand_key(_key),
                                                     len(_val))
                            for _key, _val in _ok_dict.items()
                        ])))
            # pprint.pprint(_ret_list)
            return ExtReturn(_g_ret_state,
                             ", ".join(_ret_list),
                             ascii_chunk=ascii_chunk)
示例#21
0
 def serialize(in_obj):
     return server_command.compress(in_obj, json=True)
示例#22
0
 def entry(self, result, ref_dict):
     # import pprint
     # pprint.pprint(ref_dict)
     m_list, gd = result
     for _key in [key for key, _xml in m_list]:
         # update dict with attribute dicts from the top-level node
         gd.update({
             _sk: _sv
             for _sk, _sv in ref_dict[_key][0].items()
             if _sk in ["info", "ti"]
         })
     # set default values
     for _key, _default in [
         ("key", "KEY???"),
         ("ti", "TI???"),
     ]:
         if _key not in gd:
             gd[_key] = _default
     # overrwrite empty ti
     if not gd["ti"]:
         gd["ti"] = gd["key"]
     # expand according to dict
     compound_key = self.__key.format(**gd)
     _info = "{} ({:d})".format(self.__info.format(**gd), len(m_list))
     # build info
     _build_info = [{
         "key": _s_key,
         "color": _xml.attrib["color"],
         "draw_type": _xml.get("draw_type", "LINE1"),
         "invert": _xml.get("invert", "0"),
     } for _s_key, _xml, in m_list]
     _node = [{
         # should not be needed for display
         # "type": "compound",
         "fn":
         "",
         "ti":
         "",
         "key":
         compound_key,
         "is_active":
         True,
         "is_compound":
         True,
         "mvvs": [{
             "unit":
             "",
             "info":
             _info,
             "key":
             "",
             "build_info":
             server_command.compress(_build_info,
                                     json=True).decode("ascii"),
             # "color": _xml.attrib["color"],
             # "draw_type": _xml.get("draw_type", "LINE1"),
             # "invert": _xml.get("invert", "0"),
         }  #
                  ],
     }]
     return _node
示例#23
0
def compress_pci_info(in_struct):
    return server_command.compress(in_struct, marshal=True)
示例#24
0
 def interpret(self, srv_com, ns, *args, **kwargs):
     sds = StorageDomain.deserialize(srv_com)
     # print etree.tostring(sds)
     ret = ExtReturn()
     ret.feed_str(logging_tools.get_plural("Storagedomain", len(sds.findall(".//storage_domain"))))
     ret.feed_str_state(
         *SimpleCounter(
             sds.xpath(".//external_status/state/text()"),
             ok=["ok"],
             prefix="State"
         ).result
     )
     ret.feed_str_state(
         *SimpleCounter(
             sds.xpath(".//storage_domain/type/text()"),
             ok=["data", "export", "image", "iso"],
             prefix="Domain Type"
         ).result
     )
     ret.feed_str_state(
         *SimpleCounter(
             sds.xpath(".//storage_domain/storage/type/text()"),
             ok=["glance", "iscsi", "nfs", "fcp"],
             prefix="Storage Type"
         ).result
     )
     size_dict = {
         _key: sum(
             [
                 int(_val) for _val in sds.xpath(".//storage_domain/{}/text()".format(_key))
             ]
         ) for _key in [
             "used",
             "available",
             "committed",
         ]
     }
     size_dict["size"] = size_dict["used"] + size_dict["available"]
     if ns.reference not in ["", "-"]:
         _ref = server_command.decompress(ns.reference, json=True)
         _passive_dict = {
             "source": "ovirt_overview",
             "prefix": ns.passive_check_prefix,
             "list": [],
         }
         for run_id, run_name in zip(_ref["run_ids"], _ref["run_names"]):
             _prefix = "ovirt StorageDomain {}".format(run_name)
             _sd = sds.xpath(".//storage_domain[@id='{}']".format(run_id))
             if len(_sd):
                 _sd = _sd[0]
                 # print(etree.tostring(_sd, pretty_print=True))
                 _state = _sd.findtext(".//external_status/state")
                 if _state is None:
                     # new format
                     _state = _sd.findtext(".//external_status")
                 if _state in ["ok"]:
                     _nag_state = limits.mon_STATE_OK
                 else:
                     _nag_state = limits.mon_STATE_CRITICAL
                 _stype = _sd.findtext("type")
                 _ret_f = [
                     "state is {}".format(_state),
                     "type is {}".format(_stype),
                     "storage type is {}".format(_sd.findtext("storage/type")),
                 ]
                 if _stype in ["data", "iso", "export"]:
                     try:
                         _avail = int(_sd.findtext("available"))
                         _used = int(_sd.findtext("used"))
                         _committed = int(_sd.findtext("committed"))
                         _pused = 100. * _used / max(1, _avail + _used)
                         _size_str = "size is {} (used {} [{:.2f}%], avail {}), commited {}".format(
                             logging_tools.get_size_str(_avail + _used),
                             logging_tools.get_size_str(_used),
                             _pused,
                             logging_tools.get_size_str(_avail),
                             logging_tools.get_size_str(_committed),
                         )
                         if _pused > 95:
                             _nag_state = max(_nag_state, limits.mon_STATE_CRITICAL)
                         elif _pused > 90:
                             _nag_state = max(_nag_state, limits.mon_STATE_WARNING)
                     except:
                         _ret_f.append("cannot evaluate size")
                         _nag_state = max(_nag_state, limits.mon_STATE_WARNING)
                     else:
                         _ret_f.append(_size_str)
                 _passive_dict["list"].append(
                     (
                         _prefix,
                         _nag_state,
                         ", ".join(_ret_f),
                     )
                 )
             else:
                 _passive_dict["list"].append(
                     (
                         _prefix,
                         limits.mon_STATE_CRITICAL,
                         "StorageDomain not found",
                     )
                 )
         ret.ascii_chunk = server_command.compress(_passive_dict, json=True)
     ret.feed_str(
         ", ".join(
             [
                 "{}: {}".format(
                     _key,
                     logging_tools.get_size_str(size_dict[_key])
                 ) for _key in sorted(size_dict.keys())
             ]
         )
     )
     return ret
示例#25
0
    def interpret(self, srv_com, ns, *args, **kwargs):
        if ns.reference not in ["", "-"]:
            # reference is a compressed dict (base64 encoded)
            _ref = server_command.decompress(ns.reference, json=True)
            _passive_dict = {
                "source": "ovirt_overview",
                "prefix": ns.passive_check_prefix,
                "list": [],
            }
        else:
            _ref = None
            _passive_dict = {}
        _vms = VM.deserialize(srv_com)
        _num_vms = len(_vms)
        _states = _vms.xpath(".//vm/status/state/text()", smart_strings=False)
        _state_dict = {_state: _states.count(_state) for _state in set(_states)}
        if _ref:
            for run_name in _ref["run_names"]:
                _vm = _vms.xpath(".//vm[name[text()='{}']]".format(run_name))
                _prefix = "ovirt Domain {}".format(run_name)
                if len(_vm):
                    _vm = _vm[0]
                    try:
                        _memory = int(_vm.findtext("memory"))
                        _topology = _vm.find("cpu/topology")
                        if "sockets" in _topology.attrib:
                            # old format
                            _sockets = int(_vm.find("cpu/topology").get("sockets"))
                            _cores = int(_vm.find("cpu/topology").get("cores"))
                        else:
                            # new format
                            _sockets = int(_topology.findtext("sockets"))
                            _cores = int(_topology.findtext("cores"))
                        _state = _vm.findtext("status/state")
                        if _state is None:
                            # try new format
                            _state = _vm.findtext("status")
                        _ret_f = [
                            "state is {}".format(_state),
                            "memory {}".format(
                                logging_tools.get_size_str(
                                    _memory, long_format=True
                                )
                            ),
                            "CPU info: {}, {}".format(
                                logging_tools.get_plural("socket", _sockets),
                                logging_tools.get_plural("core", _cores),
                            )
                        ]
                        if _state in ["up"]:
                            _nag_state = limits.mon_STATE_OK
                        else:
                            _nag_state = limits.mon_STATE_CRITICAL
                        _passive_dict["list"].append(
                            (
                                _prefix,
                                _nag_state,
                                ", ".join(_ret_f),
                            )
                        )
                    except:
                        _passive_dict["list"].append(
                            (
                                _prefix,
                                limits.mon_STATE_CRITICAL,
                                process_tools.get_except_info()
                            )
                        )
                else:
                    _passive_dict["list"].append(
                        (
                            _prefix,
                            limits.mon_STATE_CRITICAL,
                            "domain not found",
                        )
                    )
        _error_list = []
        ret_state = limits.mon_STATE_OK
        if _ref:
            ret_state = limits.mon_STATE_OK
            for _state in ["up", "down"]:
                _current = _state_dict.get(_state, 0)
                if _current != _ref[_state]:
                    _error_list.append(
                        "{} should by {:d} (found: {:d})".format(
                            _state,
                            _ref[_state],
                            _current,
                        )
                    )
                    ret_state = max(ret_state, limits.mon_STATE_WARNING)

        if _ref is None:
            ascii_chunk = ""
        else:
            ascii_chunk = server_command.compress(_passive_dict, json=True)
        return ExtReturn(
            ret_state,
            "{}, {}".format(
                logging_tools.get_plural("VM", _num_vms),
                ", ".join(
                    ["{:d} {}".format(_state_dict[_key], _key) for _key in sorted(_state_dict)] + _error_list
                ),
            ),
            ascii_chunk=ascii_chunk,
        )
示例#26
0
 def send_data(self):
     return server_command.compress(etree.tostring(self.xml, encoding="unicode"), json=True)
示例#27
0
文件: main.py 项目: walong365/icsw
def show_hm_help(options):
    from initat.host_monitoring.modules import local_mc

    def dummy_print(what, log_level=logging_tools.LOG_LEVEL_OK):
        print(
            "{} {}".format(
                logging_tools.get_log_level_str(log_level),
                what
            )
        )

    local_mc.set_log_command(dummy_print)
    local_mc.build_structure()
    for mod in local_mc.module_list:
        # trigger checksum generation
        _ = mod.checksum
    for cmd_name in sorted(local_mc.command_dict.keys()):
        cmd = local_mc[cmd_name]

    if options.update_json:
        if os.getuid() == 0 or __file__.startswith("/opt"):
            raise ValueError(
                "Not allowed to run update-json as root or in production environment"
            )
        json_file = os.path.normpath(
            os.path.join(
                # not beautiful but working
                os.path.dirname(__file__),
                "..",
                "..",
                "..",
                "opt",
                "cluster",
                "share",
                "json_defs",
                JSON_DEFINITION_FILE
            )
        )
        if os.path.exists(json_file):
            _previous = server_command.decompress(open(json_file, "rb").read(), json=True)
        else:
            _previous = None
        json_dump = {
            "command_list": [],
            "abi_version": HM_CURRENT_ABI_VERSION.name,
        }
        # json dump
        for cmd_name in sorted(local_mc.command_dict.keys()):
            cmd = local_mc[cmd_name]
            if cmd.Meta.create_mon_check_command:
                json_dump["command_list"].append(cmd.get_json_dump(_previous))
        print("Creating definition file '{}' in {}".format(JSON_DEFINITION_FILE, json_file))
        open(json_file, "wb").write(server_command.compress(json_dump, json=True))
        # import pprint
        # pprint.pprint(json_dump)
    else:
        to_show = []
        for com_name in sorted(local_mc.keys()):
            _show = True
            if options.args:
                _show = any([com_name.count(arg) for arg in options.args])
            if _show:
                to_show.append(com_name)
        print(
            "Modules defined            : {:d}".format(
                len(local_mc.module_list)
            )
        )
        print(
            "Commands defined / to show : {:d} / {:d}".format(
                len(local_mc.command_dict),
                len(to_show),
            )
        )
        valid_names = sorted(local_mc.command_dict.keys())
        if options.detail:
            _sep_len = 60
            for _idx, com_name in enumerate(to_show, 1):
                com = local_mc.command_dict[com_name]
                print(
                    "\n{}\n{}\n{}\n".format(
                        "-" * _sep_len,
                        "command {:3d} of {:3d}: {} (module {})".format(
                            _idx,
                            len(to_show),
                            com_name,
                            com.module.name,
                        ),
                        "-" * _sep_len,
                    )
                )
                print(
                    "Icinga Command: {}\n".format(
                        com.build_icinga_command(),
                    )
                )
                com.parser.print_help()
                print("\n")

        if options.overview:
            show_overview(local_mc, valid_names)
示例#28
0
    def __call__(self, srv_com, cur_ns):
        if PLATFORM_SYSTEM_TYPE == PlatformSystemTypeEnum.WINDOWS:
            uninstall_path1 = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall"
            uninstall_path2 = "SOFTWARE\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall"

            import winreg

            def get_installed_packages_for_keypath(keypath):
                key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, keypath, 0,
                                     winreg.KEY_READ)

                packages = []

                i = 0

                while True:
                    try:
                        subkey_str = winreg.EnumKey(key, i)
                        i += 1
                        subkey = winreg.OpenKey(
                            winreg.HKEY_LOCAL_MACHINE,
                            keypath + "\\" + subkey_str, 0,
                            winreg.KEY_READ | winreg.KEY_WOW64_64KEY)
                        # print subkey_str

                        j = 0

                        package = rpmlist_command.Package()
                        while True:
                            try:
                                subvalue = winreg.EnumValue(subkey, j)
                                j += 1

                                _val, _data, _type = subvalue

                                _data = str(_data).split("\\u0000")[0]

                                if _val == "DisplayName":
                                    package.displayName = _data
                                elif _val == "DisplayVersion":
                                    package.displayVersion = _data
                                elif _val == "EstimatedSize":
                                    package.estimatedSize = _data
                                elif _val == "InstallDate":
                                    package.installDate = _data

                            except WindowsError as e:
                                break

                        if package.displayName != "Unknown":
                            packages.append(package)

                    except WindowsError as e:
                        break

                return packages

            package_list1 = get_installed_packages_for_keypath(uninstall_path1)
            package_list2 = get_installed_packages_for_keypath(uninstall_path2)
            package_list1.extend(package_list2)

            package_list = list(set(package_list1))

            package_list.sort()

            srv_com["format"] = "windows"
            srv_com["pkg_list"] = server_command.compress(package_list,
                                                          pickle=True)
        else:
            if os.path.isfile("/etc/debian_version"):
                is_debian = True
            else:
                is_debian = False
            rpm_root_dir, re_strs = ("/", [])
            arguments = getattr(cur_ns, "arguments", None)
            if arguments:
                for arg in cur_ns.arguments:
                    if arg.startswith("/"):
                        rpm_root_dir = arg
                    else:
                        re_strs.append(arg)
            if is_debian:
                self.log(
                    "Starting dpkg -l command for root_dir '{}' ({:d} regexp_strs{})"
                    .format(
                        rpm_root_dir,
                        len(re_strs),
                        ": {}".format(", ".join(re_strs)) if re_strs else "",
                    ))
            else:
                self.log(
                    "Starting rpm-list command for root_dir '{}' ({:d} regexp_strs{})"
                    .format(
                        rpm_root_dir,
                        len(re_strs),
                        ": {}".format(", ".join(re_strs)) if re_strs else "",
                    ))
            s_time = time.time()
            log_list, ret_dict, cur_stat = rpmlist_int(rpm_root_dir, re_strs,
                                                       is_debian)
            e_time = time.time()
            for log in log_list:
                self.log(log)
            if not cur_stat:
                srv_com.set_result(
                    "ok got list in {}".format(
                        logging_tools.get_diff_time_str(e_time - s_time)), )
                srv_com["root_dir"] = rpm_root_dir
                srv_com["format"] = "deb" if is_debian else "rpm"
                srv_com["pkg_list"] = server_command.compress(ret_dict,
                                                              pickle=True)
            else:
                srv_com["result"].set_result(
                    "error getting list: {:d}".format(cur_stat),
                    server_command.SRV_REPLY_STATE_ERROR)
示例#29
0
 def interpret(self, srv_com, ns, *args, **kwargs):
     hosts = Host.deserialize(srv_com)
     # print etree.tostring(hosts, pretty_print=True)
     # print etree.tostring(sds)
     ret = ExtReturn()
     ret.feed_str(logging_tools.get_plural("Host", len(hosts.findall(".//host"))))
     ret.feed_str(logging_tools.reduce_list(hosts.xpath(".//host/name/text()")))
     ret.feed_str_state(
         *SimpleCounter(
             hosts.xpath(".//host/status/state/text()"),
             ok=["up"],
             prefix="State"
         ).result
     )
     ret.feed_str_state(
         *SimpleCounter(
             hosts.xpath(".//host/external_status/state/text()"),
             ok=["ok"],
             prefix="ExtStatus"
         ).result
     )
     ret.feed_str_state(
         *SimpleCounter(
             hosts.xpath(".//host/type/text()"),
             ok=["rhel"],
             prefix="Type"
         ).result
     )
     count_dict = {
         _key: sum(
             [
                 int(_val) for _val in hosts.xpath(".//host/summary/{}/text()".format(_key))
             ]
         ) for _key in [
             "active",
             "migrating",
             "total",
         ]
     }
     if ns.reference not in ["", "-"]:
         _ref = server_command.decompress(ns.reference, json=True)
         _passive_dict = {
             "source": "ovirt_overview",
             "prefix": ns.passive_check_prefix,
             "list": [],
         }
         for run_id, run_name in zip(_ref["run_ids"], _ref["run_names"]):
             _prefix = "ovirt Host {}".format(run_name)
             _host = hosts.xpath(".//host[@id='{}']".format(run_id))
             if len(_host):
                 _host = _host[0]
                 _state = _host.findtext(".//status/state")
                 _htype = _host.findtext("type")
                 if _state in ["up"]:
                     _nag_state = limits.mon_STATE_OK
                 else:
                     _nag_state = limits.mon_STATE_CRITICAL
                 _ret_f = [
                     "state is {}".format(_state),
                     "type is {}".format(_htype),
                 ]
                 if _host.find("summary") is not None:
                     _ret_f.extend(
                         [
                             "{}={:d}".format(
                                 _key,
                                 int(_host.findtext("summary/{}".format(_key)))
                             ) for _key in ["active", "migrating", "total"]
                         ]
                     )
                 if _host.find("memory") is not None:
                     _ret_f.append(
                         "mem {}".format(
                             logging_tools.get_size_str(int(_host.findtext("memory")))
                         )
                     )
                 _passive_dict["list"].append(
                     (
                         _prefix,
                         _nag_state,
                         ", ".join(_ret_f),
                     )
                 )
             else:
                 _passive_dict["list"].append(
                     (
                         _prefix,
                         limits.mon_STATE_CRITICAL,
                         "Host {} not found".format(run_name),
                     )
                 )
         # print _passive_dict
         ret.ascii_chunk = server_command.compress(_passive_dict, json=True)
     ret.feed_str(
         ", ".join(
             [
                 "{}: {}".format(
                     _key,
                     count_dict[_key]
                 ) for _key in sorted(count_dict.keys())
             ]
         )
     )
     return ret
示例#30
0
 def _get_satellite_info(self):
     return self._get_dist_master_srv_command(
         "satellite_info",
         satellite_info=server_command.compress(self.get_satellite_info(),
                                                json=True))