def main(): my_parser = argparse.ArgumentParser() my_parser.add_argument("-d", dest="detail", default=False, action="store_true", help="detailed mode [%(default)s]") my_parser.add_argument("hosts", nargs=2, help="Devices to check [%(default)s]") opts = my_parser.parse_args() host_1, host_2 = opts.hosts if host_1.count(":"): host_1, dir_1 = host_1.split(":", 1) else: dir_1 = "/" if host_2.count(":"): host_2, dir_2 = host_2.split(":", 1) else: dir_2 = "/" print("Comparing rpm_lists of %s (dir %s) and %s (dir %s)" % (host_1, dir_1, host_2, dir_2)) _ns1 = net_tools.SendCommandDefaults(host=host_1, arguments=["rpmlist", dir_1]) my_com = net_tools.SendCommand(_ns1) my_com.init_connection() if my_com.connect(): result_1 = my_com.send_and_receive() my_com.close() _ns2 = net_tools.SendCommandDefaults(host=host_2, arguments=["rpmlist", dir_2]) my_com = net_tools.SendCommand(_ns2) my_com.init_connection() if my_com.connect(): result_2 = my_com.send_and_receive() my_com.close() rpm_dict_1 = server_command.decompress(result_1["*pkg_list"], pickle=True) rpm_dict_2 = server_command.decompress(result_2["*pkg_list"], pickle=True) keys_1 = list(rpm_dict_1.keys()) keys_2 = list(rpm_dict_2.keys()) keys_1.sort() keys_2.sort() missing_in_1 = [x for x in keys_2 if x not in keys_1] missing_in_2 = [x for x in keys_1 if x not in keys_2] for missing_in, host, _dir in [ (missing_in_1, host_1, dir_1), (missing_in_2, host_2, dir_2), ]: if missing_in: print("{} missing on {} (dir {}):".format( logging_tools.get_plural("package", len(missing_in)), host, _dir)) if opts.detail: print("\n".join(missing_in)) else: print(" ".join(missing_in))
def deserialize(srv_com): _sds = E.storage_domains() # print(srv_com.pretty_print()) for _node in srv_com.xpath(".//ns:storagedomains"): for _entry in _node: _sds.append(etree.fromstring(server_command.decompress(_entry.text, json=True))) return _sds
def __init__(self): process_tools.ALLOW_MULTIPLE_INSTANCES = False threading_tools.icswProcessPool.__init__(self, "main") self.CC.init(icswServiceEnum.monitor_slave, global_config) self.CC.check_config() self.__verbose = global_config["VERBOSE"] self.read_config_store() # log config self.CC.log_config() self.ICH = IPCCommandHandler(self) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_exception("hup_error", self._hup_error) # from mixins self._icinga_pc = None self.register_timer(self._check_for_pc_control, 10, instant=True) self.VCM_check_md_version(global_config) self._init_network_sockets() self.add_process(StatusProcess("status"), start=True) self.register_func("send_command", self._send_command) self.__latest_status_query = None self.SH = SyncerHandler(self) if "distribute_info" in self.config_store: self.SH.distribute_info( server_command.decompress(self.config_store["distribute_info"], json=True)) self.register_timer(self._update, 30, instant=True)
def ocsp_lines(self, srv_com, **kwargs): # OCSP lines from md-config-server _ocsp_lines = server_command.decompress(srv_com["*ocsp_lines"], json=True) if self._icinga_pc: self._icinga_pc.write_external_cmd_file(_ocsp_lines) return None
def _generate_assets_update_hm(self, tree): blobs = tree.xpath('ns0:installed_updates', namespaces=tree.nsmap) if len(blobs): blob = blobs[0].text l = server_command.decompress(blob, pickle=True) self.asset_batch.installed_updates_status = 1 for (name, up_date, status) in l: asset_update_entry = AssetUpdateEntry.objects.filter( name=name, version="", release="", kb_idx=0, install_date=dateparse.parse_datetime(up_date), status=status, optional=False, installed=True, new_version="" ) if asset_update_entry: asset_update_entry = asset_update_entry[0] else: asset_update_entry = AssetUpdateEntry( name=name, install_date=dateparse.parse_datetime(up_date), status=status, optional=False, installed=True ) asset_update_entry.save() self.asset_batch.installed_updates.add(asset_update_entry) self.asset_batch.installed_updates_status = 2 self.asset_batch.save()
def distribute_info(self, srv_com, **kwargs): di_info = server_command.decompress(srv_com["*info"], marshal=True) self.config_store["distribute_info"] = server_command.compress( di_info, json=True, to_string=True) self.config_store.write() self.SH.distribute_info(di_info) return None
def _generate_assets_pending_update_hm(self, tree): blob = tree.xpath('ns0:update_list', namespaces=tree.nsmap)[0]\ .text l = server_command.decompress(blob, pickle=True) self.asset_batch.pending_updates_status = 1 for (name, version) in l: asset_update_entry = AssetUpdateEntry.objects.filter( name=name, version="", release="", kb_idx=0, install_date=None, status="", optional=True, installed=False, new_version=version ) if asset_update_entry: asset_update_entry = asset_update_entry[0] else: asset_update_entry = AssetUpdateEntry( name=name, # by definition linux updates are optional optional=True, installed=False, new_version=version, ) asset_update_entry.save() self.asset_batch.pending_updates.add(asset_update_entry) self.asset_batch.pending_updates_status = 2 self.asset_batch.save()
def _generate_assets_package_hm(self, tree): blob = tree.xpath('ns0:pkg_list', namespaces=tree.nsmap)[0].text assets = [] try: package_dict = server_command.decompress(blob, pickle=True) except: raise else: for package_name in package_dict: for versions_dict in package_dict[package_name]: installtimestamp = None if 'installtimestamp' in versions_dict: installtimestamp = versions_dict['installtimestamp'] size = 0 if 'size' in versions_dict: size = versions_dict['size'] assets.append( BaseAssetPackage( package_name, version=versions_dict['version'], size=size, release=versions_dict['release'], install_date=installtimestamp, package_type=PackageTypeEnum.LINUX ) ) self._generate_assets_package(assets)
def decompress_dmi_info(in_str): _dmi_bin = process_tools.find_file("dmidecode") with tempfile.NamedTemporaryFile() as tmp_file: file(tmp_file.name, "w").write(server_command.decompress(in_str)) _dmi_stat, dmi_result = commands.getstatusoutput( "{} --from-dump {}".format(_dmi_bin, tmp_file.name)) _xml = dmi_struct_to_xml(parse_dmi_output(dmi_result.split("\n"))) return _xml
def get_pure_data(lic_content): def _clean(_xml): for _sig in _xml.xpath(".//icsw:signature|.//icsw:license-file-meta/icsw:creation-datetime", namespaces=ICSW_XML_NS_MAP): _sig.text = "" _lic_xml = etree.fromstring(server_command.decompress(lic_content)) # .encode("utf-8"))) _clean(_lic_xml) _lic_stream = server_command.compress(etree.tostring(_lic_xml)) return _lic_stream
def deserialize(cls, in_str, deep=False): def _decode(in_dict): for key, value in in_dict.items(): if key == "fingerprints" and isinstance(value, list): in_dict[key] = [cls.deserialize(_val) for _val in value] if isinstance(value, dict): _decode(value) _res = server_command.decompress(in_str, json=True) if deep: _decode(_res) return _res
def interpret(self, srv_com, cur_ns): update_list = server_command.decompress(srv_com["update_list"].text, pickle=True) if update_list: return limits.mon_STATE_OK, "{}: {}".format( logging_tools.get_plural("update", len(update_list)), "\n".join([ "{:} {:}".format(_name, _vers) for _name, _vers in update_list ])) else: return limits.mon_STATE_OK, "No updates found"
def _pcrs_as_chunk(self, *args, **kwargs): in_com = server_command.srv_command(source=args[0]) _chunk = server_command.decompress(in_com["*ascii_chunk"], json=True) _source = _chunk.get("source", "unknown") _prefix = _chunk["prefix"] try: cur_dev = device.objects.get(Q(pk=_prefix.split(":")[1])) except: self.log( "error getting device from prefix '{}' (source {}): {}".format( _prefix, _source, process_tools.get_except_info(), ), logging_tools.LOG_LEVEL_ERROR) else: ocsp_lines = [] for _line in _chunk["list"]: if len(_line) != 3: self.log( "pcr line has wrong format (len {:d} != 3): '{}'". format( len(_line), str(_line), ), logging_tools.LOG_LEVEL_ERROR) else: _info, _ret_state, _result = _line try: _srv_info = "{}{}".format(_prefix, _info) ocsp_line = "[{:d}] PROCESS_SERVICE_CHECK_RESULT;{};{};{:d};{}".format( int(time.time()), cur_dev.full_name, _srv_info, _ret_state, _result, ) except: self.log( "error generating ocsp_result from '{}': {}". format( str(_line), process_tools.get_except_info(), ), logging_tools.LOG_LEVEL_ERROR) else: ocsp_lines.append(ocsp_line) if ocsp_lines: self.send_pool_message("ocsp_results", ocsp_lines) self.log("generated {} (source: {})".format( logging_tools.get_plural("passive check result", len(ocsp_lines)), _source, ))
def get_packages_for_ar(asset_run): blob = asset_run.raw_result_str runtype = asset_run.run_type scantype = asset_run.scan_type assets = [] if blob: if runtype == AssetType.PACKAGE: if scantype == ScanType.NRPE: if blob.startswith("b'"): _data = bz2.decompress(base64.b64decode(blob[2:-2])) else: _data = bz2.decompress(base64.b64decode(blob)) l = json.loads(_data) for (name, version, size, date) in l: if size == "Unknown": size = 0 assets.append( BaseAssetPackage(name, version=version, size=size, install_date=date, package_type=PackageTypeEnum.WINDOWS)) elif scantype == ScanType.HM: tree = etree.fromstring(blob) blob = tree.xpath('ns0:pkg_list', namespaces=tree.nsmap)[0].text try: package_dict = server_command.decompress(blob, pickle=True) except: raise else: for package_name in package_dict: for versions_dict in package_dict[package_name]: installtimestamp = None if 'installtimestamp' in versions_dict: installtimestamp = versions_dict[ 'installtimestamp'] assets.append( BaseAssetPackage( package_name, version=versions_dict['version'], size=versions_dict['size'], release=versions_dict['release'], install_date=installtimestamp, package_type=PackageTypeEnum.LINUX)) return assets
def feed_result(self, dc_action, srv_reply): _hints = [] VALID_STATES = {"up", "down"} if srv_reply is not None: # print srv_reply.pretty_print() info_dict = {key: 0 for key in VALID_STATES} info_dict["run_ids"] = [] info_dict["run_names"] = [] # print("-" * 20) # print(srv_reply.pretty_print()) # print("+" * 20) if "vms" in srv_reply: for vm in srv_reply.xpath(".//ns:vms")[0]: _xml = etree.fromstring( server_command.decompress(vm.text, json=True)) # print(etree.tostring(_xml, pretty_print=True)) # try state paths _state = _xml.xpath(".//status/state/text()") if not len(_state): _state = _xml.xpath(".//status/text()") _state = _state[0] if _state in VALID_STATES: info_dict[_state] += 1 if _state == "up": _dom_id = _xml.get("id") _dom_name = _xml.findtext("name") info_dict["run_ids"].append(_dom_id) info_dict["run_names"].append(_dom_name) _hints.append( monitoring_hint( key="domain_{}".format(_dom_id), v_type="s", info="ovirt Domain {}".format(_dom_name), value_string=_dom_name, persistent=True, is_active=False, )) _hints.append( monitoring_hint( key="overview", v_type="j", info="Domain overview", persistent=True, value_json=json.dumps(info_dict), is_active=True, )) self.store_hints(_hints) yield None
def interpret(self, srv_com, cur_ns): _mounts = server_command.decompress(srv_com["*mounts"], json=True) _mount = [ _entry for _entry in _mounts if _entry[1] == cur_ns.mountpoint ] if len(_mount): _mount = _mount[0] if _mount[2] == cur_ns.filesys: return limits.mon_STATE_OK, "mountpoint {} has filesystem {}".format( cur_ns.mountpoint, cur_ns.filesys) else: return limits.mon_STATE_CRITICAL, "mountpoint {} has wrong filesystem: {} != {}".format( cur_ns.mountpoint, _mount[2], cur_ns.filesys) else: return limits.mon_STATE_CRITICAL, "mountpoint {} not found".format( cur_ns.mountpoint)
def get_all(self, request): srv_com = server_command.srv_command(command="get_sys_info") result, _logs = contact_server(request, icswServiceEnum.monitor_server, srv_com) if "sys_info" in result: _raw_info = server_command.decompress(result["*sys_info"], json=True) _sys_info = { "master": [entry for entry in _raw_info if entry["master"]][0], "slaves": [_entry for _entry in _raw_info if not _entry["master"]], } else: _sys_info = {} _sys_info["num_builds"] = mon_dist_master.objects.all().count() # import pprint # pprint.pprint(_sys_info) return Response([_sys_info])
def interpret(self, srv_com, cur_ns): modules = server_command.decompress(srv_com["*modules"], json=True) if cur_ns.required: _required = set(cur_ns.required.split(",")) _found = set([_part[0] for _part in modules]) if _required & _found == _required: return limits.mon_STATE_OK, "{} found: {}".format( logging_tools.get_plural("required module", len(_required)), ", ".join(sorted(list(_required)))) else: _missing = _required - _found return limits.mon_STATE_CRITICAL, "{} required, {} missing: {}".format( logging_tools.get_plural("module", len(_required)), logging_tools.get_plural("module", len(_missing)), ", ".join(sorted(list(_missing)))) else: return limits.mon_STATE_OK, "loaded {}".format( logging_tools.get_plural("module", len(modules)))
def upload_command(options): if not os.path.exists(options.filename): print("File '{}' does not exist".format(options.filename)) sys.exit(-4) _content = open(options.filename, "rb").read() _size = len(_content) if options.mode == "cjson": _structure = server_command.decompress(_content, json=True) else: print("Unknown filemode '{}'".format(options.mode)) sys.exit(-5) from initat.cluster.backbone.models.internal import BackendConfigFileTypeEnum, \ BackendConfigFile from initat.tools import cluster_location _new = BackendConfigFile.store( structure=_structure, file_size=_size, file_type=BackendConfigFileTypeEnum(options.type), install_device=cluster_location.DeviceRecognition().device, ) print("Current instance: {}".format(str(_new), _new.idx))
def _read(self, file_content): # read content, raise an error if # - wrong format (decompression problem) # - XML not valid # - invalid signature try: signed_content_str = server_command.decompress(file_content) except: logger.error("Error reading uploaded license file: {}".format( process_tools.get_except_info())) raise LicenseFileReader.InvalidLicenseFile() signed_content_xml = etree.fromstring(signed_content_str) # noinspection PyUnresolvedReferences ng = etree.RelaxNG(etree.fromstring(LIC_FILE_RELAX_NG_DEFINITION)) if not ng.validate(signed_content_xml): raise LicenseFileReader.InvalidLicenseFile( "Invalid license file structure") content_xml = signed_content_xml.find('icsw:license-file', ICSW_XML_NS_MAP) signature_xml = signed_content_xml.find('icsw:signature', ICSW_XML_NS_MAP) signature_ok = self.verify_signature(content_xml, signature_xml) if not signature_ok: raise LicenseFileReader.InvalidLicenseFile("Invalid signature") # notes about the XML: # - one or more packages # - packages may have the same UUID # - only the packags with the highest date (when having the same UUID) is valid # print(etree.tostring(content_xml)) # print(ICSW_XML_NS_MAP) # print(content_xml.xpath(".//icsw:package-name/text()", namespaces=ICSW_XML_NS_MAP)) # print(content_xml.xpath(".//icsw:package-uuid/text()", namespaces=ICSW_XML_NS_MAP)) return content_xml
def _slave_info(self, *args, **kwargs): srv_com = server_command.srv_command(source=args[0]) action = srv_com["*action"] if action == "info_list": info_list = server_command.decompress(srv_com["*slave_info"], json=True) # import pprint # pprint.pprint(info_list) for info in info_list: if info["master"]: if self.__master_config is not None: self.__master_config.set_info(info) else: self.log("not master config set", logging_tools.LOG_LEVEL_WARN) else: _pure_uuid = routing.get_pure_uuid(info["slave_uuid"]) if _pure_uuid in self.__slave_lut: _pk = self.__slave_lut[_pure_uuid] self.__slave_configs[_pk].set_info(info) else: self.log( "got unknown UUID '{}' ({})".format( info["slave_uuid"], _pure_uuid), logging_tools.LOG_LEVEL_ERROR) else: _pure_uuid = routing.get_pure_uuid(srv_com["*slave_uuid"]) if _pure_uuid in self.__slave_lut: self.__slave_configs[ self.__slave_lut[_pure_uuid]].handle_info_action( action, srv_com) else: self.log( "got unknown UUID '{}' ({})".format( srv_com["*slave_uuid"], _pure_uuid), logging_tools.LOG_LEVEL_ERROR)
def interpret(self, srv_com, ns, *args, **kwargs): sds = StorageDomain.deserialize(srv_com) # print etree.tostring(sds) ret = ExtReturn() ret.feed_str(logging_tools.get_plural("Storagedomain", len(sds.findall(".//storage_domain")))) ret.feed_str_state( *SimpleCounter( sds.xpath(".//external_status/state/text()"), ok=["ok"], prefix="State" ).result ) ret.feed_str_state( *SimpleCounter( sds.xpath(".//storage_domain/type/text()"), ok=["data", "export", "image", "iso"], prefix="Domain Type" ).result ) ret.feed_str_state( *SimpleCounter( sds.xpath(".//storage_domain/storage/type/text()"), ok=["glance", "iscsi", "nfs", "fcp"], prefix="Storage Type" ).result ) size_dict = { _key: sum( [ int(_val) for _val in sds.xpath(".//storage_domain/{}/text()".format(_key)) ] ) for _key in [ "used", "available", "committed", ] } size_dict["size"] = size_dict["used"] + size_dict["available"] if ns.reference not in ["", "-"]: _ref = server_command.decompress(ns.reference, json=True) _passive_dict = { "source": "ovirt_overview", "prefix": ns.passive_check_prefix, "list": [], } for run_id, run_name in zip(_ref["run_ids"], _ref["run_names"]): _prefix = "ovirt StorageDomain {}".format(run_name) _sd = sds.xpath(".//storage_domain[@id='{}']".format(run_id)) if len(_sd): _sd = _sd[0] # print(etree.tostring(_sd, pretty_print=True)) _state = _sd.findtext(".//external_status/state") if _state is None: # new format _state = _sd.findtext(".//external_status") if _state in ["ok"]: _nag_state = limits.mon_STATE_OK else: _nag_state = limits.mon_STATE_CRITICAL _stype = _sd.findtext("type") _ret_f = [ "state is {}".format(_state), "type is {}".format(_stype), "storage type is {}".format(_sd.findtext("storage/type")), ] if _stype in ["data", "iso", "export"]: try: _avail = int(_sd.findtext("available")) _used = int(_sd.findtext("used")) _committed = int(_sd.findtext("committed")) _pused = 100. * _used / max(1, _avail + _used) _size_str = "size is {} (used {} [{:.2f}%], avail {}), commited {}".format( logging_tools.get_size_str(_avail + _used), logging_tools.get_size_str(_used), _pused, logging_tools.get_size_str(_avail), logging_tools.get_size_str(_committed), ) if _pused > 95: _nag_state = max(_nag_state, limits.mon_STATE_CRITICAL) elif _pused > 90: _nag_state = max(_nag_state, limits.mon_STATE_WARNING) except: _ret_f.append("cannot evaluate size") _nag_state = max(_nag_state, limits.mon_STATE_WARNING) else: _ret_f.append(_size_str) _passive_dict["list"].append( ( _prefix, _nag_state, ", ".join(_ret_f), ) ) else: _passive_dict["list"].append( ( _prefix, limits.mon_STATE_CRITICAL, "StorageDomain not found", ) ) ret.ascii_chunk = server_command.compress(_passive_dict, json=True) ret.feed_str( ", ".join( [ "{}: {}".format( _key, logging_tools.get_size_str(size_dict[_key]) ) for _key in sorted(size_dict.keys()) ] ) ) return ret
def deserialize(srv_com): _vms = E.vms() for _entry in srv_com.xpath(".//ns:vms")[0]: _vms.append(etree.fromstring(server_command.decompress(_entry.text, json=True))) return _vms
def interpret(self, srv_com, ns, *args, **kwargs): if ns.reference not in ["", "-"]: # reference is a compressed dict (base64 encoded) _ref = server_command.decompress(ns.reference, json=True) _passive_dict = { "source": "ovirt_overview", "prefix": ns.passive_check_prefix, "list": [], } else: _ref = None _passive_dict = {} _vms = VM.deserialize(srv_com) _num_vms = len(_vms) _states = _vms.xpath(".//vm/status/state/text()", smart_strings=False) _state_dict = {_state: _states.count(_state) for _state in set(_states)} if _ref: for run_name in _ref["run_names"]: _vm = _vms.xpath(".//vm[name[text()='{}']]".format(run_name)) _prefix = "ovirt Domain {}".format(run_name) if len(_vm): _vm = _vm[0] try: _memory = int(_vm.findtext("memory")) _topology = _vm.find("cpu/topology") if "sockets" in _topology.attrib: # old format _sockets = int(_vm.find("cpu/topology").get("sockets")) _cores = int(_vm.find("cpu/topology").get("cores")) else: # new format _sockets = int(_topology.findtext("sockets")) _cores = int(_topology.findtext("cores")) _state = _vm.findtext("status/state") if _state is None: # try new format _state = _vm.findtext("status") _ret_f = [ "state is {}".format(_state), "memory {}".format( logging_tools.get_size_str( _memory, long_format=True ) ), "CPU info: {}, {}".format( logging_tools.get_plural("socket", _sockets), logging_tools.get_plural("core", _cores), ) ] if _state in ["up"]: _nag_state = limits.mon_STATE_OK else: _nag_state = limits.mon_STATE_CRITICAL _passive_dict["list"].append( ( _prefix, _nag_state, ", ".join(_ret_f), ) ) except: _passive_dict["list"].append( ( _prefix, limits.mon_STATE_CRITICAL, process_tools.get_except_info() ) ) else: _passive_dict["list"].append( ( _prefix, limits.mon_STATE_CRITICAL, "domain not found", ) ) _error_list = [] ret_state = limits.mon_STATE_OK if _ref: ret_state = limits.mon_STATE_OK for _state in ["up", "down"]: _current = _state_dict.get(_state, 0) if _current != _ref[_state]: _error_list.append( "{} should by {:d} (found: {:d})".format( _state, _ref[_state], _current, ) ) ret_state = max(ret_state, limits.mon_STATE_WARNING) if _ref is None: ascii_chunk = "" else: ascii_chunk = server_command.compress(_passive_dict, json=True) return ExtReturn( ret_state, "{}, {}".format( logging_tools.get_plural("VM", _num_vms), ", ".join( ["{:d} {}".format(_state_dict[_key], _key) for _key in sorted(_state_dict)] + _error_list ), ), ascii_chunk=ascii_chunk, )
def deserialize(srv_com): _sds = E.hosts() for _node in srv_com.xpath(".//ns:hosts"): for _entry in _node: _sds.append(etree.fromstring(server_command.decompress(_entry.text, json=True))) return _sds
def interpret(self, srv_com, ns, *args, **kwargs): hosts = Host.deserialize(srv_com) # print etree.tostring(hosts, pretty_print=True) # print etree.tostring(sds) ret = ExtReturn() ret.feed_str(logging_tools.get_plural("Host", len(hosts.findall(".//host")))) ret.feed_str(logging_tools.reduce_list(hosts.xpath(".//host/name/text()"))) ret.feed_str_state( *SimpleCounter( hosts.xpath(".//host/status/state/text()"), ok=["up"], prefix="State" ).result ) ret.feed_str_state( *SimpleCounter( hosts.xpath(".//host/external_status/state/text()"), ok=["ok"], prefix="ExtStatus" ).result ) ret.feed_str_state( *SimpleCounter( hosts.xpath(".//host/type/text()"), ok=["rhel"], prefix="Type" ).result ) count_dict = { _key: sum( [ int(_val) for _val in hosts.xpath(".//host/summary/{}/text()".format(_key)) ] ) for _key in [ "active", "migrating", "total", ] } if ns.reference not in ["", "-"]: _ref = server_command.decompress(ns.reference, json=True) _passive_dict = { "source": "ovirt_overview", "prefix": ns.passive_check_prefix, "list": [], } for run_id, run_name in zip(_ref["run_ids"], _ref["run_names"]): _prefix = "ovirt Host {}".format(run_name) _host = hosts.xpath(".//host[@id='{}']".format(run_id)) if len(_host): _host = _host[0] _state = _host.findtext(".//status/state") _htype = _host.findtext("type") if _state in ["up"]: _nag_state = limits.mon_STATE_OK else: _nag_state = limits.mon_STATE_CRITICAL _ret_f = [ "state is {}".format(_state), "type is {}".format(_htype), ] if _host.find("summary") is not None: _ret_f.extend( [ "{}={:d}".format( _key, int(_host.findtext("summary/{}".format(_key))) ) for _key in ["active", "migrating", "total"] ] ) if _host.find("memory") is not None: _ret_f.append( "mem {}".format( logging_tools.get_size_str(int(_host.findtext("memory"))) ) ) _passive_dict["list"].append( ( _prefix, _nag_state, ", ".join(_ret_f), ) ) else: _passive_dict["list"].append( ( _prefix, limits.mon_STATE_CRITICAL, "Host {} not found".format(run_name), ) ) # print _passive_dict ret.ascii_chunk = server_command.compress(_passive_dict, json=True) ret.feed_str( ", ".join( [ "{}: {}".format( _key, count_dict[_key] ) for _key in sorted(count_dict.keys()) ] ) ) return ret
def deserialize(in_str): return server_command.decompress(in_str, json=True)
def device_syslog(opt_ns, cur_dev, j_logs): print( "Information about device '{}' (full name {}, devicegroup {})".format( str(cur_dev), str(cur_dev.full_name), str(cur_dev.device_group))) print("UUID is '{}', database-ID is {:d}".format(cur_dev.uuid, cur_dev.pk)) _cr = routing.SrvTypeRouting(force=True, ignore_errors=True) _ST = "logcheck-server" if _ST in _cr.service_types: _inst_xml = InstanceXML(quiet=True) # get logcheck-server IP _ls_ip = _cr[_ST][0][1] # get logcheck-server Port _ls_port = _inst_xml.get_port_dict(_ST, ptype="command") _sc = server_command.srv_command(command="get_syslog", ) _sc["devices"] = _sc.builder( "devices", *[ _sc.builder( "device", pk="{:d}".format(cur_dev.pk), lines="{:d}".format(opt_ns.loglines), minutes="{:d}".format(opt_ns.minutes), ) ]) _conn_str = "tcp://{}:{:d}".format(_ls_ip, _ls_port) _result = net_tools.ZMQConnection("icsw_state_{:d}".format( os.getpid())).add_connection( _conn_str, _sc, ) if _result is not None: _dev = _result.xpath(".//ns:devices/ns:device[@pk]")[0] _lines = _result.xpath("ns:lines", start_el=_dev)[0] _rates = _result.xpath("ns:rates", start_el=_dev) if _rates: _rates = { int(_el.get("timeframe")): float(_el.get("rate")) for _el in _rates[0] } print("rate info: {}".format(", ".join([ "{:.2f} lines/sec in {}".format( _rates[_seconds], logging_tools.get_diff_time_str(_seconds)) for _seconds in sorted(_rates) ]))) else: print("no rate info found") print(_rates) _out_lines = logging_tools.NewFormList() for _entry in server_command.decompress(_lines.text, json=True): _out_lines.append([ logging_tools.form_entry(_entry["line_id"], header="idx"), logging_tools.form_entry( "{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}".format( *_entry["line_datetime_parsed"]), header="Timestamp", ), ] + [ logging_tools.form_entry(_entry[_key], header=_key) for _key in ["hostname", "priority", "facility", "tag"] ] + [ logging_tools.form_entry(_entry["text"], header="text"), ]) print(str(_out_lines)) else: print("got no result from {} ({})".format(_conn_str, _ST)) else: print("No logcheck-server found, skipping syslog display")
def interpret(self, srv_com, cur_ns): _fe = logging_tools.form_entry def proc_line(_ps, **kwargs): nest = kwargs.get("nest", 0) if _psutil: _affinity = _ps["cpu_affinity"] if len(_affinity) == num_cores: _affinity = "-" else: _affinity = ",".join( ["{:d}".format(_core) for _core in _affinity]) pass else: _affinity = _ps.get("affinity", "-") return [ _fe("{}{:d}".format(" " * nest, _ps["pid"]), header="pid"), _fe(_ps["ppid"], header="ppid"), _fe(_ps["uids"][0] if _psutil else proc_stuff["uid"], header="uid"), _fe(_ps["gids"][0] if _psutil else proc_stuff["gid"], header="gid"), _fe(_ps["state"], header="state"), _fe(_ps.get("last_cpu", -1), header="cpu"), _fe(_affinity, header="aff"), _fe(_ps["out_name"], header="process"), ] def draw_tree(m_pid, nest=0): proc_stuff = result[m_pid] r_list = [proc_line(proc_stuff, nest=nest)] # _fe("%s%s" % (" " * nest, m_pid), header="pid"), for dt_entry in [ draw_tree(y, nest + 2) for y in result[m_pid]["childs"] ]: r_list.extend([z for z in dt_entry]) return r_list tree_view = cur_ns.tree comline_view = cur_ns.comline if cur_ns.filter: name_re = re.compile("^.*%s.*$" % ("|".join(cur_ns.filter)), re.IGNORECASE) tree_view = False else: name_re = re.compile(".*") result = srv_com["process_tree"] _psutil = "psutil" in srv_com if _psutil: num_cores = srv_com["*num_cores"] # unpack and cast pid to integer result = { int(key): value for key, value in server_command.decompress(result.text, json=True).items() } for _val in result.values(): _val["state"] = process_tools.PROC_STATUSES_REV[_val["status"]] # print etree.tostring(srv_com.tree, pretty_print=True) ret_state = limits.mon_STATE_CRITICAL pids = sorted([ key for key, value in result.items() if name_re.match(value["name"]) ]) for act_pid in pids: proc_stuff = result[act_pid] proc_name = proc_stuff["name"] if proc_stuff["exe"] else "[%s]" % ( proc_stuff["name"]) if comline_view: proc_name = " ".join(proc_stuff.get("cmdline")) or proc_name proc_stuff["out_name"] = proc_name ret_a = [ "found {} matching {}".format( logging_tools.get_plural("process", len(pids)), name_re.pattern) ] form_list = logging_tools.NewFormList() if tree_view: for act_pid in pids: result[act_pid]["childs"] = [ pid for pid in pids if result[pid]["ppid"] == int(act_pid) ] for init_pid in [pid for pid in pids if not result[pid]["ppid"]]: form_list.extend( [add_line for add_line in draw_tree(init_pid)]) else: form_list.extend([proc_line(result[_pid]) for _pid in pids]) if form_list: ret_a.extend(str(form_list).split("\n")) return ret_state, "\n".join(ret_a)
def interpret(self, srv_com, cur_ns): result = srv_com["process_tree"] # pprint.pprint(result) if isinstance(result, dict): # old version, gives a dict _form = 0 else: try: _form = int(result.get("format", "1")) if _form == 1: result = server_command.decompress(result.text, marshal=True) else: result = server_command.decompress(result.text, json=True) except: return limits.mon_STATE_CRITICAL, "cannot decompress: {}".format( process_tools.get_except_info()) # print result.text p_names = cur_ns.arguments zombie_ok_list = {"cron"} res_dict = { "ok": 0, "fail": 0, "kernel": 0, "userspace": 0, "zombie_ok": 0, } zombie_dict = {} if cur_ns.cmdre: _cmdre = re.compile(cur_ns.cmdre) else: _cmdre = None for _pid, value in result.items(): if _cmdre and not _cmdre.search(" ".join(value["cmdline"])): continue if _form < 2: # hm ... _is_zombie = value.get("state", value.get("status", "?")) == "Z" else: _is_zombie = value["status"] == psutil.STATUS_ZOMBIE if _is_zombie: zombie_dict.setdefault(value["name"], []).append(True) if value["name"].lower() in zombie_ok_list: res_dict["zombie_ok"] += 1 elif cur_ns.zombie: res_dict["ok"] += 1 else: res_dict["fail"] += 1 else: res_dict["ok"] += 1 if value["exe"]: res_dict["userspace"] += 1 else: res_dict["kernel"] += 1 if res_dict["fail"]: ret_state = limits.mon_STATE_CRITICAL elif res_dict["zombie_ok"]: ret_state = limits.mon_STATE_WARNING else: ret_state = limits.mon_STATE_OK if len(p_names) == 1 and len(result) == 1: found_name = list(result.values())[0]["name"] if found_name != p_names[0]: p_names[0] = "{} instead of {}".format(found_name, p_names[0]) # print p_names, result zombie_dict = {key: len(value) for key, value in zombie_dict.items()} ret_state = max( ret_state, limits.check_floor(res_dict["ok"], cur_ns.warn, cur_ns.crit)) ret_str = "{} running ({}{}{}{})".format( " + ".join([ logging_tools.get_plural("{} process".format(key), res_dict[key]) for key in ["userspace", "kernel"] if res_dict[key] ]) or "nothing", ", ".join(sorted(p_names)) if p_names else "all", ", {} [{}]".format( logging_tools.get_plural("zombie", res_dict["fail"]), ", ".join([ "{}{}".format( key, " (x {:d})".format(zombie_dict[key]) if zombie_dict[key] > 1 else "") for key in sorted(zombie_dict) ]), ) if res_dict["fail"] else "", ", {}".format( logging_tools.get_plural("accepted zombie", res_dict["zombie_ok"])) if res_dict["zombie_ok"] else "", ", cmdRE is {}".format(cur_ns.cmdre) if _cmdre else "", ) return ret_state, ret_str