def _get_sys_info(self, *args, **kwargs): # to get the info to the frontend srv_com = server_command.srv_command(source=args[0]) _inst_list = self.__slave_configs.values() _info_list = [_slave.info for _slave in _inst_list] srv_com.set_result("ok set info for {}".format( logging_tools.get_plural("system", len(_inst_list)))) srv_com["sys_info"] = server_command.compress(_info_list, json=True) self.send_pool_message("remote_call_async_result", unicode(srv_com))
def clear_pending_scans(self): pending_locks = DeviceScanLock.objects.filter( Q(server=global_config["SERVER_IDX"]) & Q(active=True)) if pending_locks.count(): self.log("clearing {}".format( logging_tools.get_plural("active scan", pending_locks.count()))) for _lock in pending_locks: [self.log(_what, _level) for _what, _level in _lock.close()]
def _init_capabilities(self): self.__cap_list = [] if global_config["BACKUP_DATABASE"]: self.log("doing database backup, ignoring capabilities", logging_tools.LOG_LEVEL_WARN) else: # read caps _dir = os.path.dirname(__file__) self.log("init server capabilities from directory {}".format(_dir)) SRV_CAPS = [] for entry in os.listdir(_dir): if entry.endswith(".py") and entry not in ["__init__.py"]: _imp_name = "initat.cluster_server.capabilities.{}".format( entry.split(".")[0]) _mod = importlib.import_module(_imp_name) for _key in dir(_mod): _value = getattr(_mod, _key) if inspect.isclass(_value) and issubclass( _value, base.BackgroundBase ) and _value != base.BackgroundBase: SRV_CAPS.append(_value) self.log("checking {}".format( logging_tools.get_plural("capability", len(SRV_CAPS)))) self.__server_cap_dict = {} self.__cap_list = [] try: sys_cc = config_catalog.objects.get(Q(system_catalog=True)) except config_catalog.DoesNotExist: sys_cc = factories.ConfigCatalog(name="local", system_catalog=True) for _srv_cap in SRV_CAPS: cap_name = _srv_cap.Meta.name try: cap_descr = _srv_cap.Meta.description except: self.log( "capability {} has no description set, ignoring...". format(cap_name), logging_tools.LOG_LEVEL_ERROR) else: _new_c = factories.Config( name=cap_name, description=cap_descr, config_catalog=sys_cc, server_config=True, # system_config=True, ) _sql_info = config_tools.server_check(server_type=cap_name) if _sql_info.effective_device: self.__cap_list.append(cap_name) self.__server_cap_dict[cap_name] = _srv_cap( self, _sql_info) self.log("capability {} is enabled on {}".format( cap_name, unicode(_sql_info.effective_device), )) else: self.log("capability {} is disabled".format(cap_name))
def check(self, start=True): # return True if process is still running if self.running: self.result = self.__ec.finished() if self.result is None: if self.check_for_timeout(): self.log("terminating") self.terminate() self.running = False else: self.running = False stdout, stderr = self.__ec.communicate() if BackgroundJob.debug or self.result: self.log( "done (RC={:d}) in {} (stdout: {}{})".format( self.result, logging_tools.get_diff_time_str(self.__ec.end_time - self.__ec.start_time), logging_tools.get_plural("byte", len(stdout)), ", stderr: {}".format( logging_tools.get_plural("byte", len(stderr)) ) if stderr else "", ) ) if stdout and self.result == 0: if self.builder is not None: _tree, _mon_info = self.builder.build(stdout, name=self.device_name, uuid=self.uuid, time="{:d}".format(int(self.last_start))) # graphing BackgroundJob.bg_proc.process_data_xml(_tree, len(etree.tostring(_tree))) # @UndefinedVariable # monitoring BackgroundJob.bg_proc.send_to_remote_server( icswServiceEnum.monitor_server, unicode(server_command.srv_command(command="monitoring_info", mon_info=_mon_info)) ) else: BackgroundJob.log("no builder set", logging_tools.LOG_LEVEL_ERROR) if stderr: self.log("error output follows, cmdline was '{}'".format(self.comline)) for line_num, line in enumerate(stderr.strip().split("\n")): self.log(" {:3d} {}".format(line_num + 1, line), logging_tools.LOG_LEVEL_ERROR) else: if self.last_start is None or abs(int(time.time() - self.last_start)) >= self.run_every and not self.to_remove: if start: self._start_ext_com() return self.running
def _check_nfs_exports(self): if global_config["MODIFY_NFS_CONFIG"]: exp_file = "/etc/exports" if os.path.isfile(exp_file): act_exports = { part[0]: " ".join(part[1:]) for part in [ line.strip().split() for line in open(exp_file, "r").read().split("\n") ] if len(part) > 1 and part[0].startswith("/") } self.log("found /etc/exports file with {}:".format( logging_tools.get_plural("export entry", len(act_exports)))) exp_keys = sorted(act_exports.keys()) my_fm = logging_tools.form_list() for act_exp in exp_keys: where = act_exports[act_exp] my_fm.add_line([act_exp, where]) if my_fm: for line in str(my_fm).split("\n"): self.log(" - {}".format(line)) else: self.log("found no /etc/exports file, creating new one ...") act_exports = {} valid_nt_ids = ["p", "b"] valid_nets = network.objects.filter( Q(network_type__identifier__in=valid_nt_ids)) exp_dict = { "etherboot": "ro", "kernels": "ro", "images": "ro", "config": "rw" } new_exports = {} exp_nets = [ "{}/{}".format(cur_net.network, cur_net.netmask) for cur_net in valid_nets ] if exp_nets: for exp_dir, rws in exp_dict.items(): act_exp_dir = os.path.join(global_config["TFTP_DIR"], exp_dir) if act_exp_dir not in act_exports: new_exports[act_exp_dir] = " ".join([ "{}({},no_root_squash,async,no_subtree_check)". format(exp_net, rws) for exp_net in exp_nets ]) if new_exports: open(exp_file, "a").write("\n".join([ "{:<30s} {}".format(x, y) for x, y in new_exports.items() ] + [""])) # hm, dangerous, FIXME for _srv_name in self.srv_helper.find_services( ".*nfs.*serv.*"): self.srv_helper.service_command(_srv_name, "restart")
def main(): my_parser = argparse.ArgumentParser() my_parser.add_argument("-d", dest="detail", default=False, action="store_true", help="detailed mode [%(default)s]") my_parser.add_argument("hosts", nargs=2, help="Devices to check [%(default)s]") opts = my_parser.parse_args() host_1, host_2 = opts.hosts if host_1.count(":"): host_1, dir_1 = host_1.split(":", 1) else: dir_1 = "/" if host_2.count(":"): host_2, dir_2 = host_2.split(":", 1) else: dir_2 = "/" print("Comparing rpm_lists of %s (dir %s) and %s (dir %s)" % (host_1, dir_1, host_2, dir_2)) _ns1 = net_tools.SendCommandDefaults(host=host_1, arguments=["rpmlist", dir_1]) my_com = net_tools.SendCommand(_ns1) my_com.init_connection() if my_com.connect(): result_1 = my_com.send_and_receive() my_com.close() _ns2 = net_tools.SendCommandDefaults(host=host_2, arguments=["rpmlist", dir_2]) my_com = net_tools.SendCommand(_ns2) my_com.init_connection() if my_com.connect(): result_2 = my_com.send_and_receive() my_com.close() rpm_dict_1 = server_command.decompress(result_1["*pkg_list"], pickle=True) rpm_dict_2 = server_command.decompress(result_2["*pkg_list"], pickle=True) keys_1 = list(rpm_dict_1.keys()) keys_2 = list(rpm_dict_2.keys()) keys_1.sort() keys_2.sort() missing_in_1 = [x for x in keys_2 if x not in keys_1] missing_in_2 = [x for x in keys_1 if x not in keys_2] for missing_in, host, _dir in [ (missing_in_1, host_1, dir_1), (missing_in_2, host_2, dir_2), ]: if missing_in: print("{} missing on {} (dir {}):".format( logging_tools.get_plural("package", len(missing_in)), host, _dir)) if opts.detail: print("\n".join(missing_in)) else: print(" ".join(missing_in))
def main(opts): print("Has to be rewritten to use new SQLite-based backend") sys.exit(5) store = ConfigStore(CS_NAME) sink = HRSink(opts) for _key, _value in store.get_dict().items(): sink.feed(_key, _value) _addr_list = [ _entry.strip().lower() for _entry in opts.address.split(",") if _entry.strip() ] sink.filter([]) _changed = False if opts.mode == "dump": sink.filter(_addr_list) sink.dump() elif opts.mode == "remove": if not _addr_list: print("no addresses given to remove") sys.exit(-1) print("{}: {}".format( logging_tools.get_plural("remove address", len(_addr_list)), ", ".join(_addr_list), )) _del_keys = [] for _key in list(store.keys()): _proto, _addr, _port = parse_key(_key) if opts.port and _port != opts.port: continue if _addr.lower() in _addr_list: print("removing {} [UUID: {}]".format(_key, store[_key])) _del_keys.append(_key) if _del_keys: _changed = True print("{} to delete".format( logging_tools.get_plural("entry", len(_del_keys)))) for _key in _del_keys: del store[_key] store.write() else: print("nothing changed") if _changed: reload_relay()
def _check_for_finished_runs(self): # remove PlannedRuns which should be deleted _removed = 0 for _dev_idx, pdrf_list in self.__device_planned_runs.iteritems(): _keep = [entry for entry in pdrf_list if not entry.to_delete] _removed += len(pdrf_list) - len(_keep) self.__device_planned_runs[_dev_idx] = _keep if _removed: self.log("Removed {}".format( logging_tools.get_plural("PlannedRunsForDevice", _removed)))
def restore(self, idx=None): if idx is not None: __move_files = [ _entry for _entry in self.__move_files if int(_entry[0:4]) == idx ] else: __move_files = self.__move_files self.__move_files = [ _entry for _entry in self.__move_files if _entry not in __move_files ] print("moving back {} above {:04d}_* ({})".format( logging_tools.get_plural("migration", len(__move_files)), self.__min_idx, logging_tools.get_plural("file", len(__move_files)))) for _move_file in __move_files: shutil.move(os.path.join(self.__tmp_dir, _move_file), os.path.join(self.__dir_name, _move_file))
def interpret(self, srv_com, cur_ns): modules = server_command.decompress(srv_com["*modules"], json=True) if cur_ns.required: _required = set(cur_ns.required.split(",")) _found = set([_part[0] for _part in modules]) if _required & _found == _required: return limits.mon_STATE_OK, "{} found: {}".format( logging_tools.get_plural("required module", len(_required)), ", ".join(sorted(list(_required)))) else: _missing = _required - _found return limits.mon_STATE_CRITICAL, "{} required, {} missing: {}".format( logging_tools.get_plural("module", len(_required)), logging_tools.get_plural("module", len(_missing)), ", ".join(sorted(list(_missing)))) else: return limits.mon_STATE_OK, "loaded {}".format( logging_tools.get_plural("module", len(modules)))
def post(self, request): _post = request.POST num_ok, num_error = (0, 0) for pdc_pk in json.loads(_post["remove_list"]): try: cur_pdc = package_device_connection.objects.get(Q(pk=pdc_pk)) except package_device_connection.DoesNotExist: num_error += 1 else: cur_pdc.delete() num_ok += 1 if num_ok: request.xml_response.info( "{} removed".format( logging_tools.get_plural("connection", num_ok)), logger) if num_error: request.xml_response.error( "{} not there".format( logging_tools.get_plural("connection", num_error)), logger)
def send_passive_results_to_master(self, result_list): self.log("sending {} to master".format( logging_tools.get_plural("passive result", len(result_list)))) srv_com = server_command.srv_command(command="passive_check_results") _bldr = srv_com.builder() srv_com["results"] = _bldr.passive_results(*[ # FIXME, TODO _bldr.passive_result("d") ]) self.send_to_syncer(srv_com)
def _state_overview(opt_ns, result): _instances = result.xpath(".//ns:instances/ns:instance") print("instances reported: {}".format( logging_tools.get_plural("instance", len(_instances)))) for _inst in _instances: _states = [] last_states = None for _src_state in result.xpath(".//ns:state", start_el=_inst): # todo: remove duplicates _states.append(_src_state) _actions = result.xpath(".//ns:action", start_el=_inst) print( "{:<30s}, target state is {:<20s} [{}, {}], {} / {} in the last 24 hours" .format( _inst.get("name"), { 0: "stopped", 1: "started" }[int(_inst.attrib["target_state"])], "active" if int(_inst.attrib["active"]) else "inactive", "ignored" if int(_inst.attrib["ignore"]) else "watched", logging_tools.get_plural("state", len(_states)), logging_tools.get_plural("action", len(_actions)), )) if opt_ns.state: for _cur_s in _states: print(" {} pstate={}, cstate={}, license_state={} [{}]". format( time.ctime(int(_cur_s.attrib["created"])), STATE_DICT[int(_cur_s.attrib["pstate"])], CONF_STATE_DICT[int(_cur_s.attrib["cstate"])], LIC_STATE_DICT[int(_cur_s.attrib["license_state"])], _cur_s.attrib["proc_info_str"], )) if opt_ns.action: for _cur_a in _actions: print(" {} action={}, runtime={} [{} / {}]".format( time.ctime(int(_cur_a.attrib["created"])), _cur_a.attrib["action"], _cur_a.attrib["runtime"], _cur_a.attrib["finished"], _cur_a.attrib["success"], ))
def process_init(self): self.__log_template = logging_tools.get_logger( self.__options.log_name, get_log_path(icswLogHandleTypes(self.__options.handle)), context=self.zmq_context ) self.__log_template.log_command("set_max_line_length {:d}".format(256)) self.__log_str = self.__options.mult * (" ".join(self.__options.args)) self.log("log_str has {}".format(logging_tools.get_plural("byte", len(self.__log_str)))) self.register_func("start_logging", self._start_logging)
def handle_local_sync_slave(self, srv_com): # create send commands _to_send, _num_files, _size_data = self._get_send_commands() self.log("local sync, handling {} ({})".format( logging_tools.get_plural("file", _num_files), logging_tools.get_size_str(_size_data), )) # and process them for srv_com in _to_send: self.struct.handle_direct_action(srv_com["*action"], srv_com)
def delete_entry(self, request, **kwargs): dvs_entry = dvs_allowed_name.objects.get(Q(pk=kwargs["pk"])) can_delete_answer = can_delete_obj(dvs_entry, logger) if can_delete_answer: dvs_entry.delete() else: raise ValidationError("cannot delete: {}".format( logging_tools.get_plural( "reference", len(can_delete_answer.related_objects)))) return Response(status=status.HTTP_204_NO_CONTENT)
def _show_cache_info(self): if self.__cache: self.log("cache is present ({}, age is {}, timeout {}, {})".format( logging_tools.get_plural("entry", len(self.__cache)), logging_tools.get_diff_time_str(self.__cache_age), logging_tools.get_diff_time_str(self.Meta.cache_timeout), "valid" if self.__cache_valid else "invalid", )) else: self.log("no cache set")
def parse(self, srv_com): if "arg_list" in srv_com: args = srv_com["*arg_list"].strip().split() else: args = [] self.log("got {}: '{}'".format( logging_tools.get_plural("argument", len(args)), " ".join(args), )) return self.parser.parse_args(args)
def ext_command(self, srv_com, **kwargs): _lines = srv_com["*lines"] if self._icinga_pc: self.log("sending {} as external command".format( logging_tools.get_plural("line", len(_lines)))) self._icinga_pc.write_external_cmd_file(_lines) else: self.log("no icinga_pc defined", logging_tools.LOG_LEVEL_ERROR) srv_com.set_result("Ok got command") return None
def __repr__(self): return "share_map for level {:d} cache, {}: {}".format( self.cache_level, logging_tools.get_plural("cache", self.num_caches), ", ".join([ "{:d} [{}]".format( c_num, ":".join([ "{:d}".format(core_num) for core_num in self.__cache_lut[c_num] ])) for c_num in xrange(self.num_caches) ]))
def interpret(self, srv_com, cur_ns): if cur_ns.arguments: re_list = [re.compile(_arg) for _arg in cur_ns.arguments] else: re_list = [] cur_vector = srv_com["data:machine_vector"] if cur_ns.raw: return limits.mon_STATE_OK, etree.tostring( cur_vector) # @UndefinedVariable else: vector_keys = sorted( srv_com.xpath(".//ns:mve/@name", start_el=cur_vector, smart_strings=False)) used_keys = [ key for key in vector_keys if any([cur_re.search(key) for cur_re in re_list]) or not re_list ] ret_array = [ "Machinevector id {}, {}, {} shown:".format( cur_vector.attrib["version"], logging_tools.get_plural("key", len(vector_keys)), logging_tools.get_plural("key", len(used_keys)), ) ] out_list = logging_tools.new_form_list() max_num_keys = 0 _list = [] for mv_num, mv_key in enumerate(vector_keys): if mv_key in used_keys: cur_xml = srv_com.xpath( "//ns:mve[@name='{}']".format(mv_key), start_el=cur_vector, smart_strings=False)[0] _mv = hm_classes.mvect_entry(cur_xml.attrib.pop("name"), **cur_xml.attrib) _list.append((mv_num, _mv)) max_num_keys = max(max_num_keys, _mv.num_keys) for mv_num, entry in _list: out_list.append(entry.get_form_entry(mv_num, max_num_keys)) ret_array.extend(unicode(out_list).split("\n")) return limits.mon_STATE_OK, "\n".join(ret_array)
def post(self, request): _post = request.POST c_dict = json.loads(_post["change_dict"]) # import pprint # pprint.pprint(c_dict) edit_obj = c_dict["edit_obj"] changed = 0 for cur_pdc in package_device_connection.objects.filter( Q(pk__in=c_dict["pdc_list"])).prefetch_related( "kernel_list", "image_list", ): change = False # flags for f_name in ["force_flag", "nodeps_flag"]: if f_name in edit_obj and edit_obj[f_name]: # print "**", f_name, edit_obj[f_name], int(edit_obj[f_name]) t_flag = True if int(edit_obj[f_name]) else False if t_flag != getattr(cur_pdc, f_name): setattr(cur_pdc, f_name, t_flag) change = True # target state if edit_obj["target_state"] and edit_obj[ "target_state"] != cur_pdc.target_state: change = True cur_pdc.target_state = edit_obj["target_state"] # dependencies for dep, dep_obj in [("image", image), ("kernel", kernel)]: f_name = "{}_dep".format(dep) if edit_obj[f_name]: _set = True if int(edit_obj[f_name]) else False if _set != getattr(cur_pdc, f_name): setattr(cur_pdc, f_name, _set) change = True if edit_obj["{}_change".format(dep)]: l_name = "{}_list".format(dep) new_list = dep_obj.objects.filter( Q(pk__in=edit_obj[l_name])) setattr(cur_pdc, l_name, new_list) change = True if change: changed += 1 cur_pdc.save() request.xml_response.info( "{} updated".format(logging_tools.get_plural("PDC", changed)), logger) srv_com = server_command.srv_command(command="new_config") result = contact_server(request, icswServiceEnum.package_server, srv_com, timeout=10, log_result=False) if result: # print result.pretty_print() request.xml_response.info("sent sync to server", logger)
def check_md5_sums(self): self.__checks.append("md5") files_to_check = sorted([ os.path.normpath(os.path.join(self.path, f_name)) for f_name in ["bzImage", "initrd.gz", "xen.gz", "modules.tar.bz2"] + ["initrd_{}.gz".format(key) for key in KNOWN_INITRD_FLAVOURS] ]) md5s_to_check = { p_name: os.path.normpath( os.path.join(self.path, ".{}_md5".format(os.path.basename(p_name)))) for p_name in files_to_check if os.path.exists(p_name) } md5s_to_remove = sorted([ md5_file for md5_file in [ os.path.normpath( os.path.join(self.path, ".{}_md5".format( os.path.basename(p_name)))) for p_name in files_to_check if not os.path.exists(p_name) ] if os.path.exists(md5_file) ]) if md5s_to_remove: self.log( "removing {}: {}".format( logging_tools.get_plural("MD5 file", len(md5s_to_remove)), ", ".join(md5s_to_remove)), logging_tools.LOG_LEVEL_WARN) for md5_to_remove in md5s_to_remove: md5_name = os.path.basename(md5_to_remove)[1:] if md5_name in self.__option_dict: del self.__option_dict[md5_name] try: os.unlink(md5_to_remove) except: self.log( "error removing {}: {}".format( md5_to_remove, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) if md5s_to_check: for src_file, md5_file in md5s_to_check.items(): md5_name = os.path.basename(md5_file)[1:] new_bz5 = True if os.path.exists(md5_file): if os.stat(src_file)[stat.ST_MTIME] < os.stat(md5_file)[ stat.ST_MTIME]: new_bz5 = False if new_bz5: self.log( "doing MD5-sum for {} (stored in {})".format( os.path.basename(src_file), os.path.basename(md5_file)), ) self.__option_dict[md5_name] = (hashlib.md5( open(src_file, "rb").read())).hexdigest() open(md5_file, "w").write(self.__option_dict[md5_name]) else: self.__option_dict[md5_name] = open(md5_file, "r").read()
def _copy_image(self, cur_img): """ copy image """ self.log("copying {}".format(logging_tools.get_plural("directory", len(self.__dir_list)))) for dir_num, cur_dir in enumerate(self.__dir_list, 1): self.log( "[{:2d} of {:2d}] copying directory {}".format( dir_num, len(self.__dir_list), cur_dir, ) ) s_time = time.time() self._call( cur_img, "cp -a {} {}".format( os.path.join(cur_img.source, cur_dir), os.path.join(self.__system_dir, cur_dir) ) ) e_time = time.time() self.log( "copied directory {} in {}".format( cur_dir, logging_tools.get_diff_time_str(e_time - s_time) ) ) for cur_file in self.__file_list: s_time = time.time() shutil.copy2( os.path.join(cur_img.source, cur_file), os.path.join(self.__system_dir, cur_file), ) e_time = time.time() self.log( "copied file {} in {}".format( cur_file, logging_tools.get_diff_time_str(e_time - s_time) ) ) for cur_link in self.__link_list: s_time = time.time() self._call( cur_img, "cp -a {} {}".format( os.path.join(cur_img.source, cur_link), os.path.join(self.__system_dir, cur_link), ) ) e_time = time.time() self.log( "copied link {} in {}".format( cur_link, logging_tools.get_diff_time_str(e_time - s_time) ) )
def post(self, request): from initat.cluster.backbone.server_enums import icswServiceEnum _post = request.POST dev_pk_list = json.loads(_post["dev_pk_list"]) cur_devs = { _dev.pk: _dev for _dev in device.objects.filter(Q(pk__in=dev_pk_list)) } soft_state = _post["command"] logger.info("sending soft_control '{}' to {}: {}".format( soft_state, logging_tools.get_plural("device", len(dev_pk_list)), logging_tools.reduce_list( sorted([str(cur_dev) for cur_dev in cur_devs.values()])), )) srv_com = server_command.srv_command(command="soft_control") srv_com["devices"] = srv_com.builder( "devices", *[ srv_com.builder("device", soft_command=soft_state, pk="{:d}".format(cur_dev.pk)) for cur_dev in cur_devs.values() ]) result = contact_server(request, icswServiceEnum.mother_server, srv_com, timeout=10, log_result=False) _ok_list, _error_list = ( result.xpath(".//ns:device[@command_sent='1']/@pk"), result.xpath(".//ns:device[@command_sent='0']/@pk"), ) if result: if _ok_list: request.xml_response.info( "sent {} to {}".format( soft_state, logging_tools.reduce_list( sorted([ cur_devs[int(_pk)].full_name for _pk in _ok_list ])), ), logger) if _error_list: request.xml_response.warn( "unable to send {} to {}".format( soft_state, logging_tools.reduce_list( sorted([ cur_devs[int(_pk)].full_name for _pk in _error_list ])), ), logger) if not _ok_list and not _error_list: request.xml_response.warn("nothing to do")
def handle_snmp_basic_scan(self, errors, found, res_dict): _all_schemes = Schemes(self.log) if found: # any found, delete all present schemes self.device.snmp_schemes.clear() _added_pks = set() for _oid in found: _add_scheme = _all_schemes.get_scheme_by_oid(_oid) if _add_scheme is not None and _add_scheme.pk not in _added_pks: _added_pks.add(_add_scheme.pk) self.device.snmp_schemes.add(_add_scheme) if _added_pks: _scan_schemes = [ _all_schemes.get_scheme(_pk) for _pk in _added_pks if _all_schemes.get_scheme(_pk).initial ] if _scan_schemes: self.log("doing initial run with {}".format( logging_tools.get_plural("scheme", len(_scan_schemes)), )) for _scan_scheme in _scan_schemes: self.log(" {}".format(str(_scan_scheme))) self.init_run("snmp_initial_scan") for _scheme in _scan_schemes: self.new_run( True, 20, *[("T", [simple_snmp_oid(_tl_oid.oid)]) for _tl_oid in _scheme.snmp_scheme_tl_oid_set.all()]) else: self.log("found {}".format( logging_tools.get_plural("scheme", len(_added_pks))), result=True) self.finish() else: if errors: self.log(", ".join(errors), logging_tools.LOG_LEVEL_ERROR, result=True) else: self.log("initial scan was ok, but no schemes found", logging_tools.LOG_LEVEL_WARN, result=True) self.finish()
def license_info(self): _lic_info = self.raw_license_info _cluster_ids = set() # print("-" * 50) # import pprint # for _li in _lic_info: # print("*") # pprint.pprint(_li["date"]) # not unique _num = { "lics": 0, # global parameters "gparas": 0, # local parameters "lparas": 0, "packs": len(_lic_info), } for _list in [_lic_info]: for _lic_entry in _list: _cl_info = list(_lic_entry["lic_info"].keys()) for _cl_name in _cl_info: _cluster_ids.add(_cl_name) _cl_struct = _lic_entry["lic_info"][_cl_name] for _skey, _dkey in [("licenses", "lics"), ("parameters", "gparas")]: for _entry in _cl_struct.get(_skey, []): _num[_dkey] += 1 for _lic in _cl_struct.get("licenses", []): _num["lparas"] += len(_lic["parameters"].keys()) _num["cluster_ids"] = len(_cluster_ids) return "{} in {} for {}".format( ", ".join( [ logging_tools.get_plural(_long, _num[_short]) for _long, _short in [ ("License", "lics"), ("Global Parameter", "gparas"), ("Local Parameter", "lparas"), ] if _num[_short] ] ) or "nothing", logging_tools.get_plural("Package", _num["packs"]), logging_tools.get_plural("Cluster", _num["cluster_ids"]), )
def save(self): self.__move_files = [ _entry for _entry in os.listdir( self.__dir_name ) if _entry.endswith(".py") and self._match(_entry) ] _del_files = [ _entry for _entry in os.listdir( self.__dir_name ) if _entry.endswith(".pyc") or _entry.endswith(".pyo") ] print( "moving away migrations above {:04d}_* ({}) to {}, removing {}".format( self.__min_idx, logging_tools.get_plural("file", len(self.__move_files)), self.__tmp_dir, logging_tools.get_plural("file", len(_del_files)), ) ) if _del_files: for _del_file in _del_files: _path = os.path.join(self.__dir_name, _del_file) try: os.unlink(_path) except: print( "error removing {}: {}".format( _path, process_tools.get_except_info(), ) ) for _move_file in self.__move_files: shutil.move( os.path.join( self.__dir_name, _move_file ), os.path.join( self.__tmp_dir, _move_file ) )
def get_content(self): cur_hash = hashlib.new("md5") act_list = self.object_list self._content = [] _types = {} if act_list: for act_le in act_list: if self.ignore_content(act_le): continue _types.setdefault(act_le.obj_type, []).append(True) self._content.extend(act_le.emit_content()) [cur_hash.update(_line) for _line in self._content] _info_str = "created {} for {}: {}".format( logging_tools.get_plural("entry", len(act_list)), logging_tools.get_plural("object_type", len(_types)), ", ".join(sorted(_types.keys())), ) else: _info_str = "" return cur_hash.hexdigest(), _info_str
def show_content(self): # print "Passt" # return file_dict = { _v[1]: _v[2] for _v in self.__content_list if _v[0] == "f" } dir_dict = {_v[1]: _v[2] for _v in self.__content_list if _v[0] == "d"} file_keys, dir_keys = (sorted(file_dict.keys()), sorted(dir_dict.keys())) if file_keys: print("Content of file-list (source -> dest, {}):".format( logging_tools.get_plural("entry", len(file_keys)))) for sf in file_keys: print(" {:<40s} -> {}".format(sf, file_dict[sf])) if dir_keys: print("Content of dir-list (source -> dest, {}):".format( logging_tools.get_plural("entry", len(dir_keys)))) for sd in dir_keys: print(" {:<40s} -> {}".format(sd, dir_dict[sd]))