def client_call(self, result, parsed_coms): u_dict = hm_classes.net_to_sys(result[3:]) # pprint.pprint(u_dict) ret_state = limits.mon_STATE_OK used = u_dict["total_size"] if "max_size" in u_dict: max_size = u_dict["max_size"] perc_used = 100. * float(used) / float(max_size) quota_perc = "%.2f %%" % (perc_used) quota_info = "%s of %s" % (quota_perc, logging_tools.get_size_str( max_size, long_version=True).strip()) if perc_used > 100: ret_state = limits.mon_STATE_CRITICAL used_info = "over quota (%s)" % (quota_info) elif perc_used > 80: ret_state = limits.mon_STATE_WARNING used_info = "reaching quota (%s)" % (quota_info) else: used_info = "quota ok (%s)" % (quota_info) else: used_info = "no quota info" account_stat = u_dict.get("mail_account", "unknown") if account_stat.lower() != "unlocked": ret_state = max(ret_state, limits.mon_STATE_WARNING) return ret_state, "%s %s (%s), used size is %s, %s" % ( limits.get_state_str(ret_state), (u_dict.get("user_name", "name not set").split("/")[0]).strip(), account_stat, used and logging_tools.get_size_str( used, long_version=True).strip() or "not known", used_info)
def _send_remote_call_reply(self, zmq_sock, src_id, reply, msg_type, add_log=None): add_log = " ({})".format(add_log) if add_log is not None else "" if msg_type == RemoteCallMessageType.xml: # set source reply.update_source() # send return _send_str = unicode(reply) try: zmq_sock.send_unicode(src_id, zmq.SNDMORE) zmq_sock.send_unicode(_send_str) except: self.log( "error sending reply to {} ({}): {}{}".format( src_id, logging_tools.get_size_str(len(_send_str)), process_tools.get_except_info(), add_log ), logging_tools.LOG_LEVEL_ERROR ) else: self.log( "sent {} to {}{}".format( logging_tools.get_size_str(len(_send_str)), src_id, add_log, ) )
def do_rewrite(self, _email, user_xml): _tmpdir = tempfile.mkdtemp() self.log("tempdir is {}".format(_tmpdir)) _src_mail = os.path.join(_tmpdir, "in") _dis_html = os.path.join(_tmpdir, "dis.html") _dis_text = os.path.join(_tmpdir, "dis.txt") open(_src_mail, "wb").write(self.src_mail) codecs.open(_dis_html, "wb", "utf-8").write(self.disclaimer_html(user_xml)) codecs.open(_dis_text, "wb", "utf-8").write(self.disclaimer_text(user_xml)) self.log("size of mail before processing is {}".format(logging_tools.get_size_str(len(self.src_mail)))) _call_args = [ "/usr/local/bin/altermime", "--input={}".format(_src_mail), "--disclaimer={}".format(_dis_text), "--disclaimer-html={}".format(_dis_html), # "--force-for-bad-html", # "--force-into-b64", ] self.log("call_args are {}".format(" ".join(_call_args))) _result = subprocess.call( _call_args ) self.log("result is {:d}".format(_result)) # rewind self.dst_mail = file(_src_mail, "r").read() self.log("size of mail after processing is {}".format(logging_tools.get_size_str(len(self.dst_mail))))
def store_nrpe_result(self, state, result): _stdout, _stderr = result self.log("stdout has {}, stderr has {} [{:d}]".format( logging_tools.get_size_str(len(_stdout)), logging_tools.get_size_str(len(_stderr)), state, )) s = _stdout if s is None or state != 0: res = RunResult.FAILED else: res = RunResult.SUCCESS self._store_result(res, "", s)
def _compress(self, *args, **kwargs): s_time = time.time() target_dir, system_dir, image_dir = args[0:3] if target_dir == SLASH_NAME: _dir_mode = False file_list = args[3] link_list = args[4] self.log( "compressing files '{}' and links '{}' (from dir {} to {})".format( ", ".join(file_list), ", ".join(link_list), system_dir, image_dir ) ) t_size = sum([os.stat(os.path.join(system_dir, entry))[stat.ST_SIZE] for entry in file_list]) else: _dir_mode = True file_list, link_list = ([], []) self.log("compressing directory '{}' (from dir {} to {})".format(target_dir, system_dir, image_dir)) t_size = int(self._call("du -sb {}".format(os.path.join(system_dir, target_dir))).split()[0]) t_file = os.path.join( image_dir, "{}.tar.bz2".format( target_dir, ) ) self.log( "size is {} (target file is {})".format( logging_tools.get_size_str(t_size), t_file ) ) # no direct compression, use external program _com = "cd {} ; tar -cf {} --use-compress-prog=/opt/cluster/bin/pbzip2 --preserve-permissions {} {}".format( system_dir, t_file, " ".join(file_list) if not _dir_mode else target_dir, " ".join(link_list) if not _dir_mode else "", ) self._call(_com) new_size = os.stat(t_file)[stat.ST_SIZE] self.log( "target size is {} (from 100 % to {:.2f} %)".format( logging_tools.get_size_str(new_size), 100. * new_size / t_size if t_size else 0, ) ) e_time = time.time() self.log("compressing {} took {}".format(target_dir, logging_tools.get_diff_time_str(e_time - s_time))) self.send_pool_message("compress_done", target_dir)
def send(self, val): cur_time = time.time() diff_time = abs(cur_time - self.__time) if diff_time > 30 * 60: self.log("sent / received in {}: {} / {}".format( logging_tools.get_diff_time_str(diff_time), logging_tools.get_size_str(self.__val["send"]), logging_tools.get_size_str(self.__val["recv"]), )) self.__time = cur_time self.__val = { "send": 0, "recv": 0 } self.__val["send"] += val
def create_prob_str(self, name, in_dict, block_s): p_f = [] if in_dict["soft"] and in_dict["used"] >= in_dict["soft"]: p_f.append("soft quota ({} > {})".format( logging_tools.get_size_str(in_dict["used"] * block_s, False, 1000), logging_tools.get_size_str(in_dict["soft"] * block_s, False, 1000))) if in_dict["hard"] and in_dict["used"] >= in_dict["hard"]: p_f.append("hard quota ({} > {})".format( logging_tools.get_size_str(in_dict["used"] * block_s, False, 1000), logging_tools.get_size_str(in_dict["hard"] * block_s, False, 1000))) return "{} for {}s".format(" and ".join(p_f), name)
def __init__(self, in_dict): eonstor_object.__init__(self, "disc", in_dict, state_key=11) disk_num = int(in_dict[13]) self.name = "Disc{:d}".format(disk_num) if self.state in self.lu_dict: state_str, state_val = self.lu_dict[self.state] if state_val == limits.mon_STATE_WARNING: self.set_warn(state_str) elif state_val == limits.mon_STATE_CRITICAL: self.set_error(state_str) elif self.state & int("80", 16) == int("80", 16): self.name = "SCSI Disc {:d}".format(self.state & ~int("80", 16)) else: self.set_warn("unknown state {:d}".format(self.state)) # generate long string # ignore SCSIid and SCSILun if 15 in in_dict: disk_size = (2**int(in_dict[8])) * int(in_dict[7]) vers_str = "{} ({})".format( (" ".join(in_dict[15].split())).strip(), in_dict[16].strip()) self.long_string = "{}, LC {:d}, PC {:d}, {}".format( logging_tools.get_size_str(disk_size, divider=1000), int(in_dict[2]), int(in_dict[3]), vers_str) else: self.long_string = "no disk"
def _interpret(self, in_dict, cur_ns): if "ctrls" in in_dict and in_dict["ctrls"]: ret_state = limits.mon_STATE_OK c_array = [] for c_name in sorted(in_dict["ctrls"]): ctrl_dict = in_dict["ctrls"][c_name] vol_list = [] for vol_key in sorted(ctrl_dict.get("volumes", {})): vol_dict = ctrl_dict["volumes"][vol_key] vol_stat = vol_dict["status_of_volume"].split()[0] vol_list.append("vol{}, RAID{}, {}, {}".format( vol_key, vol_dict["raid_level"], logging_tools.get_size_str(vol_dict["size"] * 1024 * 1024), vol_stat, )) if vol_stat.lower() != "okay": ret_state = max(ret_state, limits.mon_STATE_CRITICAL) c_array.append("{} ({}{}){}".format( c_name, ctrl_dict["controller_type"], ", {}".format( logging_tools.get_plural( "volume", len(ctrl_dict.get("volumes", {})))) if vol_list else "", ": {}".format(", ".join(vol_list)) if vol_list else "", )) return ret_state, "; ".join(c_array) else: return limits.mon_STATE_WARNING, "no controller found"
def init_webfrontend(opts): if False: for _what, _command, _target in [ ("collecting static", "collectstatic --noinput -c", None), ("building url_list", "show_icsw_urls", os.path.join(ICSW_ROOT, "initat", "cluster", "frontend", "templates", "all_urls.html")), ]: print(_what) _success, _output = call_manage(_command.split(), output=True) if _success and _target: print( " writing {} to {}".format( logging_tools.get_size_str(len(_output), long_format=True), _target, ) ) file(_target, "w").write(_output) for _what, _command, _target in [ ("modify app.js", "inject_addons --srcfile /srv/www/init.at/icsw/app.js --modify --with-addons=yes", None), ("modify main.html", "inject_addons --srcfile /srv/www/init.at/icsw/main.html --modify --with-addons=yes", None), ]: print(_what) _success, _output = call_manage(_command.split(), output=True) if not _success: print("Something went wrong ({:d}): {}".format(_success, _output)) # already configured; run collectstatic _RELOAD_FLAG = "/opt/cluster/etc/uwsgi/reload/webfrontend.touch" if os.path.exists("/opt/cluster/etc/uwsgi/reload"): print("touching reload flag {}".format(_RELOAD_FLAG)) file(_RELOAD_FLAG, "w").write("") else: print("no uwsgi reload-dir found, please (re)start uwsgi-init via") print("") print("icsw service restart uwsgi-init") print("")
def _interpret(self, dom_dict, cur_ns): ret_state, out_f = (limits.mon_STATE_OK, []) if cur_ns and cur_ns.arguments: if "desc" in dom_dict and dom_dict["desc"]: xml_doc = etree.fromstring(dom_dict["desc"]) # print etree.tostring(xml_doc, pretty_print=True) out_f.append("{}, memory {}, {}, {}, VNC port is {:d}".format( xml_doc.find(".//name").text, logging_tools.get_size_str( int(xml_doc.find(".//memory").text) * 1024), logging_tools.get_plural("disk", len(xml_doc.findall(".//disk"))), logging_tools.get_plural( "iface", len(xml_doc.findall(".//interface"))), int(xml_doc.find(".//graphics").attrib["port"]) - 5900)) else: if "cm" in dom_dict and dom_dict["cm"]: ret_state = limits.mon_STATE_CRITICAL out_f.append("domain '{}' not running".format( dom_dict["cm"])) else: ret_state = limits.mon_STATE_WARNING out_f.append( "no domain-info in result (domain {} not running)". format(", ".join(cur_ns.arguments))) else: ret_state = limits.mon_STATE_WARNING out_f.append("no domain-name give") return ret_state, ", ".join(out_f)
def write(self): dest = self.name self.debug("would wrote {} to {}".format( logging_tools.get_size_str(len(self._content)), dest, )) if self.modify: file(dest, "w").write(self._content)
def _start_logging(self, **kwargs): self.log("start logging") emitted = 0 for rep_num in range(self.__options.repeat): self.log("{:d}/{:d}: {}".format(rep_num + 1, self.__options.repeat, self.__log_str)) emitted += len(self.__log_str) self.log("bytes emitted: {}".format(logging_tools.get_size_str(emitted))) self.send_pool_message("stop_logging", emitted)
def _stop_logging(self, p_name, p_pid, num_bytes, **kwargs): self.__bytes_total += num_bytes self.__processes_running -= 1 if self.__processes_running: self.log("{} still logging".format(logging_tools.get_plural("process", self.__processes_running))) else: self.log("bytes emitted: {}".format(logging_tools.get_size_str(self.__bytes_total))) self["exit_requested"] = True
def handle_local_sync_slave(self, srv_com): # create send commands _to_send, _num_files, _size_data = self._get_send_commands() self.log("local sync, handling {} ({})".format( logging_tools.get_plural("file", _num_files), logging_tools.get_size_str(_size_data), )) # and process them for srv_com in _to_send: self.struct.handle_direct_action(srv_com["*action"], srv_com)
def get_quota_str(uqs): _info_f = [] if uqs.bytes_used > uqs.bytes_hard: _info_f.append("hard quota violated") elif uqs.bytes_used > uqs.bytes_soft: _info_f.append("soft quota violated") if uqs.bytes_gracetime: # seconds grace_time = uqs.bytes_gracetime cur_time = int(time.time()) _info_f.append("grace time left is {}".format(logging_tools.get_diff_time_str(grace_time - cur_time))) else: pass return " {}{} used ({} soft, {} hard)".format( "{}; ".format(", ".join(_info_f)) if _info_f else "", logging_tools.get_size_str(uqs.bytes_used, True, 1024, True), logging_tools.get_size_str(uqs.bytes_soft, True, 1024, True), logging_tools.get_size_str(uqs.bytes_hard, True, 1024, True), )
def _check_single_peer(self, clients, vpn_device, inst_name, peer_name, res_field): ret_state = limits.mon_STATE_OK p_ip = clients.get(peer_name, {}).get("client_ip", "") p_ip_str = " at {}".format(p_ip) if p_ip else "" if peer_name in clients and clients[peer_name].get("online", True): peer_dict = clients[peer_name] if "remote" in peer_dict: remote_ip = peer_dict["remote"] if p_ip: # has p_ip, compare with remote_ip if remote_ip == p_ip: # same, ok pass else: # differ, oh-oh p_ip_str = "{} != {}".format(p_ip_str, remote_ip) ret_state = max(ret_state, limits.mon_STATE_WARNING) else: # no p_ip, set p_ip_str according to p_ip_str = " at {}".format(remote_ip) if "rxs" in peer_dict: res_field.append( "{} (Srv on {}, client {}{} ok, {}/s {}/s) | rx={:d} tx={:d}" .format( inst_name, vpn_device, peer_name, p_ip_str, logging_tools.get_size_str(peer_dict["rxs"]), logging_tools.get_size_str(peer_dict["txs"]), int(peer_dict["rxs"]), int(peer_dict["txs"]), )) else: res_field.append("{} (Srv on {}, client {}{} ok)".format( inst_name, vpn_device, peer_name, p_ip_str)) else: res_field.append("{} (Srv via {}, client {}{} not found)".format( inst_name, vpn_device, peer_name, p_ip_str)) ret_state = max(ret_state, limits.mon_STATE_CRITICAL) return ret_state
def get_core_info(self, package_idx, core_idx): _pack = self.packages[package_idx] _core = _pack["cores"][core_idx] size_f = [] for _depth in sorted(_core.keys()): _size = sum(sum(_core[_depth].values(), [])) size_f.append( logging_tools.get_size_str(_size, strip_spaces=True, to_int=True)[:-1].replace(" ", "")) return "{}_{}".format("".join(size_f), _pack["id"])
def _check_size(self, cur_img): """ check size of target directory """ target_free_size = os.statvfs(self.__image_dir)[statvfs.F_BFREE] * os.statvfs(self.__image_dir)[statvfs.F_BSIZE] orig_size = int(self._call(cur_img, "du -sb {}".format(cur_img.source)).split()[0]) self.log( "size of image is {}, free space is {} (at {})".format( logging_tools.get_size_str(orig_size), logging_tools.get_size_str(target_free_size), self.__image_dir, ) ) cur_img.size = orig_size # size_string is automatically set in pre_save handler cur_img.save() if orig_size * 1.2 > target_free_size: raise ValueError( "not enough free space ({}, image has {})".format( logging_tools.get_size_str(target_free_size), logging_tools.get_size_str(orig_size) ) )
def _file_watch_content(self, *args, **kwargs): srv_com = server_command.srv_command(source=args[0]) job_id = srv_com["*send_id"].split(":")[0] file_name = srv_com["*name"] # in case of empty file content = srv_com["content"].text or "" last_update = int(float(srv_com["*update"])) self.log( "got content for '{}' (job {}), len {:d} bytes, update_ts {:d}". format( file_name, job_id, len(content), last_update, )) if len(job_id) and job_id[0].isdigit(): # job_id is ok try: if file_name not in self.__job_content_dict.get(job_id, {}): self.__job_content_dict.setdefault( job_id, {})[file_name] = E.file_content( name=file_name, last_update="{:d}".format(int(last_update)), cache_uuid="rms_fc_{}".format(uuid.uuid4()), size="{:d}".format(len(content)), ) # already present, replace file _cur_struct = self.__job_content_dict[job_id][file_name] # timeout: 5 hours cache.set(_cur_struct.attrib["cache_uuid"], content, 5 * 3600) except: self.log( "error settings content of file {}: {}".format( file_name, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: tot_files = sum( [len(value) for value in self.__job_content_dict.values()], 0) tot_length = sum([ sum([ int(cur_el.attrib["size"]) for _name, cur_el in _dict.items() ], 0) for job_id, _dict in self.__job_content_dict.items() ]) self.log("cached: {:d} files, {} ({:d} bytes)".format( tot_files, logging_tools.get_size_str(tot_length), tot_length)) else: self.log("job_id {} is suspicious, ignoring".format(job_id), logging_tools.LOG_LEVEL_WARN)
def __init__(self, in_dict): eonstor_object.__init__(self, "ld", in_dict, state_key=7) self.name = "LD" state_str, state_val = self.lu_dict[int(in_dict[6]) & 7] if state_val == limits.mon_STATE_WARNING: self.set_warn(state_str) elif state_val == limits.mon_STATE_CRITICAL: self.set_error(state_str) if self.state & 1: self.set_warn("rebuilding") if self.state & 2: self.set_warn("expanding") if self.state & 4: self.set_warn("adding drive(s)") if self.state & 64: self.set_warn("SCSI drives operation paused") # opmode op_mode = int(in_dict[5]) & 15 op_mode_str = { 0: "Single Drive", 1: "NON-RAID", 2: "RAID 0", 3: "RAID 1", 4: "RAID 3", 5: "RAID 4", 6: "RAID 5", 7: "RAID 6" }.get(op_mode, "NOT DEFINED") op_mode_extra_bits = int(in_dict[5]) - op_mode if isinstance(in_dict[3], str) and in_dict[3].lower().startswith("0x"): ld_size = int(in_dict[3][2:], 16) * 512 vers_str = "id %s" % (in_dict[2]) else: ld_size = (2 ** int(in_dict[4])) * (int(in_dict[3])) vers_str = "id %d" % (int(in_dict[2])) drv_total, drv_online, drv_spare, drv_failed = ( int(in_dict[8]), int(in_dict[9]), int(in_dict[10]), int(in_dict[11]), ) if drv_failed: self.set_error("%s failed" % (logging_tools.get_plural("drive", drv_failed))) drv_info = "%d total, %d online%s" % (drv_total, drv_online, ", %d spare" % (drv_spare) if drv_spare else "") self.long_string = "%s (0x%x) %s, %s, %s" % (op_mode_str, op_mode_extra_bits, logging_tools.get_size_str(ld_size, divider=1000), drv_info, vers_str)
def handle_local_sync_slave(self, srv_com): # copy config versions from srv_com (see handle_remote_sync_slave) self.config_version_build = int(srv_com["*config_version_build"]) self.config_version_send = self.config_version_build # create send commands _to_send, _num_files, _size_data = self._get_send_commands( json.loads(srv_com["*file_tuples"])) self.log("local sync, handling {} ({})".format( logging_tools.get_plural("file", _num_files), logging_tools.get_size_str(_size_data), )) # and process them for srv_com in _to_send: self.struct.handle_direct_action(srv_com["*action"], srv_com)
def salt_dict(self, in_dict: dict) -> dict: if self.has_keys(in_dict, self.Meta.main_keys): _v_dict = self.val_dict(in_dict, self.Meta.main_keys) return { "cmp_value": 100. * (_v_dict["avail.phys"] - _v_dict["free.phys"]) / _v_dict["avail.phys"], "display": { _k.split(".")[0]: logging_tools.get_size_str(_v_dict[_k]) for _k in self.Meta.main_keys } } else: return self.dummy_parsed
def interpret(self, srv_com, cur_ns): if "memcache_stats" in srv_com: mc_stats = srv_com["*memcache_stats"] ret_state = limits.mon_STATE_OK out_f = [] for t_srv, cur_stats in mc_stats: # pprint.pprint(mc_stats) used_bytes, max_bytes = ( int(cur_stats["bytes"]), int(cur_stats["limit_maxbytes"]), ) cur_perc = used_bytes * 100. / max_bytes out_f.append("{}: {} of {} used ({:.2f} %)".format( t_srv.strip(), logging_tools.get_size_str(used_bytes), logging_tools.get_size_str(max_bytes), cur_perc, )) ret_state = max( ret_state, limits.check_ceiling(cur_perc, cur_ns.warn, cur_ns.crit)) return ret_state, ", ".join(out_f) else: return limits.mon_STATE_CRITICAL, "no stats found"
def salt_dict(self, in_dict: dict) -> dict: if self.has_keys(in_dict, self.Meta.main_keys): _v_dict = self.val_dict(in_dict, self.Meta.main_keys) return { "cmp_value": max(_v_dict.values()), "display": { _k: logging_tools.get_size_str( _v_dict[_k], per_second=True, strip_spaces=True, ) for _k in self.Meta.main_keys } } else: return self.dummy_parsed
def _interpret(self, f_dict, cur_ns): ret_state = limits.mon_STATE_OK file_stat = f_dict["stat"] if isinstance(file_stat, dict): file_size = file_stat["st_size"] file_mtime = file_stat["st_mtime"] else: file_size = file_stat[stat.ST_SIZE] file_mtime = file_stat[stat.ST_MTIME] add_array = ["size %s" % (logging_tools.get_size_str(file_size))] act_time = time.localtime() act_time = (act_time.tm_wday + 1, act_time.tm_hour, act_time.tm_min) act_time = act_time[2] + 60 * (act_time[1] + 24 * act_time[0]) in_exclude_range = False if False and cur_ns.exclude_checkdate: for s_time, e_time in cur_ns.exclude_checkdate: if s_time: s_time = s_time[2] + 60 * (s_time[1] + 24 * s_time[0]) if e_time: e_time = e_time[2] + 60 * (e_time[1] + 24 * e_time[0]) if s_time and e_time: if s_time <= act_time and act_time <= e_time: in_exclude_range = True if s_time: if s_time <= act_time: in_exclude_range = True if e_time: if act_time <= e_time: in_exclude_range = True if in_exclude_range: add_array.append("in exclude_range") else: if cur_ns.mod_diff_time: md_time = abs(file_mtime - f_dict["local_time"]) if md_time > cur_ns.mod_diff_time: ret_state = max(ret_state, limits.mon_STATE_CRITICAL) add_array.append("changed %s ago > %s" % (logging_tools.get_diff_time_str(md_time), logging_tools.get_diff_time_str( cur_ns.mod_diff_time))) else: add_array.append("changed %s ago < %s" % (logging_tools.get_diff_time_str(md_time), logging_tools.get_diff_time_str( cur_ns.mod_diff_time))) return ret_state, "file %s %s" % (f_dict["file"], ", ".join(add_array))
def bg_send_to_server(self, conn_str, srv_uuid, srv_com, **kwargs): _success = True # only for local calls local = kwargs.get("local", False) if local: self._execute_command(srv_com) self.bg_notify_handle_result(srv_com) else: if conn_str not in self.__other_server_dict: self.log("connecting to {} (uuid {})".format( conn_str, srv_uuid)) self.__other_server_dict = srv_uuid self.main_socket.connect(conn_str) num_iters = 10 else: num_iters = 1 _cur_iter = 0 while True: _cur_iter += 1 try: self.main_socket.send_unicode( srv_uuid, zmq.SNDMORE) # @UndefinedVariable self.main_socket.send_unicode(unicode(srv_com)) except: self.log( "cannot send to {} [{:d}/{:d}]: {}".format( conn_str, _cur_iter, num_iters, process_tools.get_except_info()), logging_tools.LOG_LEVEL_CRITICAL) _success = False else: _success = True if _success: self.log("send {} to {} [{:d}/{:d}]".format( logging_tools.get_size_str(len(unicode(srv_com))), conn_str, _cur_iter, num_iters, )) break else: if _cur_iter < num_iters: time.sleep(0.2) else: break return _success
def stress_system(): from initat.tools import process_tools # stress sge info s_si = sge_tools.SGEInfo( server="localhost", default_pref=["server"], never_direct=True, run_initial_update=False, log_command=log_com, ) _iter = 0 while True: if not _iter % 20: print("iteration: {:3d}, memory usage: {}".format( _iter, logging_tools.get_size_str(process_tools.get_mem_info()))) s_si.update() _iter += 1 if _iter == 1000: break sys.exit(0)
def _store_file(self, t_file, new_vers, content): MON_TOP_DIR = global_config["MD_BASEDIR"] if not t_file.startswith(MON_TOP_DIR): self.log("refuse to operate outside '{}'".format(MON_TOP_DIR, ), logging_tools.LOG_LEVEL_CRITICAL) else: renew, log_str, file_info = self._check_version(t_file, new_vers) if renew: t_dir = os.path.dirname(t_file) if not os.path.exists(t_dir): try: os.makedirs(t_dir) except: self.log( "error creating directory {}: {}".format( t_dir, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: self.log("created directory {}".format(t_dir)) if os.path.exists(t_dir): try: codecs.open(t_file, "w", "utf-8").write(content) # we no longer chown because we are not running as root except: self.log( "error creating file {}: {}".format( t_file, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) file_info.log_error(process_tools.get_except_info()) else: self.log("created {} [{}, {}]".format( t_file, logging_tools.get_size_str(len(content)).strip(), log_str, )) file_info.log_success() else: self.log("file {} not newer [{}]".format(t_file, log_str), logging_tools.LOG_LEVEL_WARN)
def _update_xml(self): self.name = self.dom_handle.name() self.log("Instance name is '{}', ID is {}".format( self.name, self.inst_id)) self.xml_desc = etree.fromstring( self.dom_handle.XMLDesc(0)) # @UndefinedVariable self.memory = int( self.xml_desc.xpath(".//currentMemory", smart_strings=False)[0].text) * 1024 self.vcpus = int( self.xml_desc.xpath(".//vcpu", smart_strings=False)[0].text) self.log("memory is {}, {}".format( logging_tools.get_size_str(self.memory), logging_tools.get_plural("CPU", self.vcpus))) self.disk_dict, self.net_dict = ({}, {}) self.vnc_port = None vnc_entry = self.xml_desc.xpath(".//graphics[@type='vnc']", smart_strings=False) if vnc_entry: self.vnc_port = int(vnc_entry[0].attrib["port"]) - 5900 self.log("VNC port is {:d}".format(self.vnc_port)) else: self.log("no VNC-port defined", logging_tools.LOG_LEVEL_WARN) # print etree.tostring(self.xml_desc, pretty_print=True) for disk_entry in self.xml_desc.findall(".//disk[@device='disk']"): cur_disk_info = disk_info(disk_entry) self.disk_dict[disk_entry.xpath( ".//target", smart_strings=False)[0].attrib["dev"]] = cur_disk_info self.log(cur_disk_info.get_info()) for net_entry in self.xml_desc.findall(".//interface[@type='bridge']"): cur_net_info = net_info(net_entry) self.net_dict[net_entry.xpath( ".//target", smart_strings=False)[0].attrib["dev"]] = cur_net_info self.log(cur_net_info.get_info()) self.base_info = base_stats()