def _resolve_gids(self, gid_list): if gid_list: for db_rec in group.objects.filter(Q(gid__in=gid_list)): if db_rec.gid in self.__group_dict: # check for new settings for key, value in [ ("source", "SQL"), ("gid", db_rec.gid), ("groupname", db_rec.groupname), ("email", db_rec.email), ("firstname", db_rec.first_name), ("lastname", db_rec.last_name), ("db_rec", db_rec), ]: self.__group_dict[db_rec.gid][key] = value else: # new record self.__group_dict[db_rec.gid] = { "source": "SQL", "uid": db_rec.gid, "groupname": db_rec.groupname, "email": db_rec.email, "firstname": db_rec.first_name, "lastname": db_rec.last_name, "db_rec": db_rec, } act_dict = self.__group_dict[db_rec.gid] act_dict[ "info"] = u"gid {:d}, groupname {} (from SQL), ({} {}, {})".format( act_dict["uid"], act_dict["groupname"], act_dict["firstname"] or "<first_name not set>", act_dict["lastname"] or "<last_name not set>", act_dict["email"] or "<email not set>") missing_gids = set(gid_list) - set(self.__group_dict.keys()) for missing_gid in missing_gids: try: grp_stuff = grp.getgrgid(missing_gid) except: self.log( "Cannot get information for gid {:d}: {}".format( missing_gid, process_tools.get_except_info(), ), logging_tools.LOG_LEVEL_ERROR) self.__group_dict[missing_gid] = { "info": "group not found in SQL or grp" } else: self.__group_dict[missing_gid] = { "source": "grp", "groupname": grp_stuff[0], "info": "gid {:d}, groupname {} (from grp)".format( missing_gid, grp_stuff[0]) } # add missing keys for _gid, g_stuff in self.__group_dict.iteritems(): g_stuff.setdefault("last_mail_sent", None)
def write_node_config(self, cur_c, cur_bc): cur_c.log("creating tree") ConfigTreeNode.objects.filter( Q(device=cur_bc.conf_dict["device"])).delete() write_list = self.write_node(cur_c, cur_bc) nodes_written = len(write_list) # print write_list active_identifier = cur_bc.conf_dict["net"].identifier.replace( " ", "_") cur_c.log("writing config files for {} to {}".format( active_identifier, cur_c.node_dir)) config_dir = os.path.join(cur_c.node_dir, "content_{}".format(active_identifier)) if not os.path.isdir(config_dir): cur_c.log("creating directory {}".format(config_dir)) os.mkdir(config_dir) config_dict = { "f": os.path.join(cur_c.node_dir, "config_files_{}".format(active_identifier)), "l": os.path.join(cur_c.node_dir, "config_links_{}".format(active_identifier)), "d": os.path.join(cur_c.node_dir, "config_dirs_{}".format(active_identifier)), "e": os.path.join(cur_c.node_dir, "config_delete_{}".format(active_identifier)), } _line_dict = {} num_dict = dict([(key, 0) for key in config_dict.iterkeys()]) for cur_tn, cur_wc in write_list: if cur_wc.dest_type not in ["i", "?"] and not cur_tn.intermediate: eff_type = cur_tn.node.content_node.get_effective_type() _lines = _line_dict.setdefault(eff_type, []) num_dict[eff_type] += 1 out_name = os.path.join(config_dir, "{:d}".format(num_dict[eff_type])) try: add_line = cur_tn.node.content_node.write_object(out_name) except: cur_c.log( "error creating node {}: {}".format( cur_tn.node.content_node.dest, process_tools.get_except_info()), logging_tools.LOG_LEVEL_CRITICAL) else: _lines.append("{:d} {}".format(num_dict[eff_type], add_line)) for _key, _lines in _line_dict.iteritems(): file(config_dict[_key], "w").write("\n".join(_lines + [""])) cur_c.log("wrote {}".format( logging_tools.get_plural("file", len(_line_dict)))) # print cur_c.node_dir, dir(cur_c) # print cur_bc.conf_dict["net"] # pprint.pprint(cur_bc.conf_dict) cur_c.log("wrote {}".format( logging_tools.get_plural("node", nodes_written)))
def _scan_dir(self, _scan_user, _home_dir): _s_time = time.time() _prev_runs = list(user_scan_run.objects.filter(Q(user=_scan_user))) new_run = user_scan_run.objects.create(user=_scan_user, running=True, scan_depth=_scan_user.scan_depth) new_run.save() _size_dict = ScanSubDir(_home_dir) _start_dir = _home_dir _top_depth = _start_dir.count("/") try: nfs_mounts, nfs_ignore = (set(), []) _last_dir = "" for _main, _dirs, _files in os.walk(_start_dir): if os.path.ismount(_main): nfs_mounts.add(_main) continue elif any([_main.startswith(_nfs) for _nfs in nfs_mounts]): nfs_ignore.append(_main) continue _last_dir = _main _cur_depth = _main.count("/") _parts = _main.split("/") _max_depth = min(_top_depth + _scan_user.scan_depth, _cur_depth) _key = "/".join(_parts[:_max_depth + 1]) # print _parts, _key cur_dict = _size_dict for _skey in _parts[_top_depth:_max_depth + 1]: cur_dict = cur_dict.add_sub_dir(_skey) cur_dict.dirs += 1 for _file in _files: try: cur_dict.files += 1 cur_dict.size += os.stat(os.path.join(_main, _file))[stat.ST_SIZE] except: pass if nfs_mounts: self.log( "ignored {} on {}".format( logging_tools.get_plural("NFS directory", len(nfs_ignore)), logging_tools.get_plural("NFS mount", len(nfs_mounts)), ) ) except UnicodeDecodeError: self.log( "UnicodeDecode: {}, _last_dir is '{}'".format( process_tools.get_except_info(), _last_dir, ), logging_tools.LOG_LEVEL_ERROR ) if global_config["DEBUG"]: raise # store current _size_dict.create_db_entries(new_run) _e_time = time.time() new_run.current = True new_run.running = False new_run.run_time = abs((_e_time - _s_time) * 1000) new_run.save() [_prev_run.delete() for _prev_run in _prev_runs]
def get_partition(self, *args): part_name = args[0] loc_tree = GeneratedTree() loc_dev = device.objects.get(Q(pk=self.pk)) self.log("set act_partition_table and partdev to %s" % (part_name)) loc_dev.act_partition_table = loc_dev.partition_table loc_dev.partdev = part_name loc_dev.save() success = False dummy_cont = BuildContainer(self, {}, {"device": loc_dev}, loc_tree, None) try: loc_ps = icswPartitionSetup(dummy_cont, self.log) except: self.log( "cannot generate partition info: {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) for _line in process_tools.icswExceptionInfo().log_lines: self.log(" {}".format(_line), logging_tools.LOG_LEVEL_ERROR) else: base_dir = os.path.join(global_config["CONFIG_DIR"], loc_dev.name) pinfo_dir = os.path.join(base_dir, "pinfo") if not os.path.isdir(pinfo_dir): try: os.mkdir(pinfo_dir) except OSError: self.log( "cannot create pinfo_directory %s: %s" % (pinfo_dir, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: self.log("created pinfo directory %s" % (pinfo_dir)) if os.path.isdir(pinfo_dir): for file_name in os.listdir(pinfo_dir): try: os.unlink("%s/%s" % (pinfo_dir, file_name)) except: self.log( "error removing %s in %s: %s" % (file_name, pinfo_dir, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) loc_ps.create_part_files(pinfo_dir) success = True return success
def get_connection(self): try: import sqlite3 conn = sqlite3.connect(database=self["database"]) except: print("cannot connect: {}".format(process_tools.get_except_info())) conn = None return conn
def check_version_file(self): self.__checks.append("versionfile") kernel_version, k_ver, k_rel = (self.name.split("_")[0], 1, 1) if kernel_version == self.name: config_name = "" else: config_name = self.name[len(kernel_version) + 1:] build_mach = "" version_file = os.path.join(self.path, ".version") if os.path.isfile(version_file): try: version_dict = { _part.split("=", 1)[0]: _part.split("=", 1)[1] for _part in [ _line.strip() for _line in file(version_file, "r").read().split("\n") if _line.count("=") ] } except: self.log( "error parsing version-file '{}': {}".format( version_file, process_tools.get_except_info(), ), logging_tools.LOG_LEVEL_ERROR ) else: version_dict = { _key: _value for _key, _value in version_dict.iteritems() } if version_dict.get("kernelversion", kernel_version) != kernel_version: self.log( "warning: parsed kernel_version '{}' != version_file version '{}', using info from version_file".format( kernel_version, version_dict["kernelversion"] ), logging_tools.LOG_LEVEL_WARN ) kernel_version = version_dict["kernelversion"] if version_dict.get("configname", config_name) != config_name: self.log( "warning: parsed config_name '{}' != version_file config_name '{}', using info from version_file".format( config_name, version_dict["configname"] ), logging_tools.LOG_LEVEL_WARN ) config_name = version_dict["configname"] if "version" in version_dict: k_ver, k_rel = [int(x) for x in version_dict["version"].split(".", 1)] if "buildmachine" in version_dict: build_mach = version_dict["buildmachine"].split(".")[0] self.__option_dict["kernel_is_local"] = build_mach == self.__config["SERVER_SHORT_NAME"] if config_name: config_name = "/usr/src/configs/.config_{}".format(config_name) self.__values["kernel_version"] = kernel_version self.__values["version"] = k_ver self.__values["release"] = k_rel self.__values["config_name"] = config_name return build_mach
def __call__(self, srv_com, cur_ns): sge_dict = self.module.sge_dict if not cur_ns.sge_host: srv_com.set_result("need queue and host value", server_command.SRV_REPLY_STATE_ERROR) else: cur_stat, cur_out = commands.getstatusoutput( os.path.join(sge_dict["SGE_ROOT"], "bin", sge_dict["SGE_ARCH"], "qhost -q -xml")) if cur_stat: srv_com.set_result( "error getting qhost info ([:d}): {}".format( cur_stat, cur_out), server_command.SRV_REPLY_STATE_ERROR) else: try: cur_xml = etree.fromstring(cur_out) # @UndefinedVariable except: srv_com.set_result( "error building xml: {}".format( process_tools.get_except_info()), server_command.SRV_REPLY_STATE_ERROR) else: q_el = cur_xml.xpath(".//host[@name='{}']".format( cur_ns.sge_host), smart_strings=False) if not q_el: if not cur_ns.sge_host.count("."): # try with short name if no FQDN is given q_el = cur_xml.xpath( ".//host[starts-with(@name, '{}.')]".format( cur_ns.sge_host), smart_strings=False) if not q_el: # last try, only with short name q_el = cur_xml.xpath( ".//host[@name='{}')]".format( cur_ns.sge_host), smart_strings=False) else: # try short name of FQDN q_el = cur_xml.xpath( ".//host[starts-with(@name, '{}.')]".format( cur_ns.sge_host.split(".")[0]), smart_strings=False) if q_el: q_el = q_el[0] q_el.attrib["sge_host"] = cur_ns.sge_host if cur_ns.sge_queue: q_el.attrib["sge_queue"] = cur_ns.sge_queue srv_com["queue_result"] = q_el else: srv_com.set_result( "no host/queue element found for '{}'/'{}'".format( cur_ns.sge_host, cur_ns.sge_queue), server_command.SRV_REPLY_STATE_ERROR) return
def _parse(self, logger): logger.info("building is_allowed struct from {}".format(self.in_str)) self.__parts = [] for part in self.in_str.split(): try: self._feed_part(part, logger) except: logger.error("error parsing in_str '{}': {}".format( self.in_str, process_tools.get_except_info()))
def _call(self, cur_inst): source_dir = global_config["IMAGE_SOURCE_DIR"] if os.path.isdir(source_dir): t_dirs = [ os.path.join(source_dir, sub_dir) for sub_dir in os.listdir(source_dir) if os.path.isdir(os.path.join(source_dir, sub_dir)) ] valid_sys = {} for t_dir in t_dirs: dirs_found = os.listdir(t_dir) if len([x for x in dirs_found if x in NEEDED_IMAGE_DIRS]) == len(NEEDED_IMAGE_DIRS): try: _log_lines, sys_dict = process_tools.fetch_sysinfo( root_dir=t_dir) except: self.log( "error fetching sysinfo from {}: {}".format( t_dir, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: sys_dict["bitcount"] = { "i386": 32, "i486": 32, "i586": 32, "i686": 32, "x86_64": 64, "alpha": 64, "ia64": 64 }.get(sys_dict.get("arch", "???"), 64) valid_sys[os.path.basename(t_dir)] = sys_dict else: dirs_missing = [ x for x in NEEDED_IMAGE_DIRS if x not in dirs_found ] self.log(" ... skipping {} ({} [{}] missing)".format( t_dir, logging_tools.get_plural("subdirectory", len(dirs_missing)), ", ".join(dirs_missing))) cur_inst.srv_com.set_result("found {}".format( logging_tools.get_plural("image", len(list(valid_sys.keys()))))) if valid_sys: image_list = cur_inst.srv_com.builder("image_list", image_dir=source_dir) cur_inst.srv_com["result"] = image_list for image_name, sys_dict in valid_sys.items(): sys_dict["bitcount"] = "{:d}".format(sys_dict["bitcount"]) image_list.append( cur_inst.srv_com.builder("image", image_name, **sys_dict)) else: cur_inst.srv_com.set_result( "error image-source-dir '{}' not found".format(source_dir), server_command.SRV_REPLY_STATE_ERROR)
def raw_license_info(opts): if opts.delete: print("Deleting LicenseFile Entry from database with idx {:d}".format( opts.delete)) try: License.objects.get(Q(idx=opts.delete)).delete() except License.DoesNotExist: # ignore pass out_list = logging_tools.new_form_list() _to_save = [] _query = License.objects.all() if opts.only_valid: _query = _query.filter(Q(valid=True)) for lic in _query: try: _info = License.objects.get_license_info(lic) except: _info = process_tools.get_except_info() _raw_info = None _error = True else: _raw_info = License.objects.get_raw_license_info(lic) _error = False if _error: if opts.mark_error: _valid = False elif opts.unmark_all: _valid = True else: _valid = lic.valid else: _valid = True if lic.valid != _valid: lic.valid = _valid _to_save.append(lic) # todo, extract fingerprint info from raw_license_info # import pprint # pprint.pprint(_raw_info) out_list.append([ logging_tools.form_entry(lic.file_name, header="Filename"), logging_tools.form_entry(lic.date.isoformat(), header="created"), logging_tools.form_entry_right(lic.idx, header="idx"), logging_tools.form_entry_center( "valid" if lic.valid else "invalid", header="validity"), logging_tools.form_entry_center("error" if _error else "ok", header="error"), logging_tools.form_entry(_info, header="Info"), ]) print(str(out_list)) if len(_to_save): print("") print("Updating LicenseFile states ({:d})".format(len(_to_save))) for lic_to_save in _to_save: lic_to_save.save(update_fields=["valid"]) print("...done")
def send_reply(self, t_uid, srv_com): send_sock = self.main_socket _ok = True try: send_sock.send_unicode(t_uid, zmq.SNDMORE | zmq.NOBLOCK) send_sock.send_unicode(str(srv_com), zmq.NOBLOCK) except: self.log("error sending to {}: {}".format(t_uid, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) _ok = False return _ok
def _create_base_dir(opts): if not os.path.isdir(opts.base): try: os.makedirs(opts.base) except IOError: print("Error creating base_dir '{}': {}".format( opts.base, process_tools.get_except_info())) sys.exit(1) else: print("Successfully created base_dir '{}'".format(opts.base))
def _handle_module_command(self, srv_com): try: self.local_mc[srv_com["command"].text](srv_com) except: for log_line in process_tools.icswExceptionInfo().log_lines: self.log(log_line, logging_tools.LOG_LEVEL_ERROR) srv_com.set_result( "caught server exception '{}'".format(process_tools.get_except_info()), server_command.SRV_REPLY_STATE_CRITICAL, )
def _prepare_directories(self): for cur_dir in [global_config["SYSLOG_DIR"]]: if not os.path.isdir(cur_dir): try: os.mkdir(cur_dir) except: self.log( "error creating {}: {}".format( cur_dir, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR)
def send_to_syncer(self, srv_com): try: self.main_socket.send_unicode(self.__local_syncer_uuid, zmq.SNDMORE) self.main_socket.send_unicode(unicode(srv_com)) except: self.log( "cannot send to local syncer: {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR)
def update_status(self): if self.__last_status_update is None or abs(self.__last_status_update - time.time()) > 5: # update self.__last_status_update = time.time() found_inst = [] if os.path.isdir(OPENVPN_DIR): for entry in os.listdir(OPENVPN_DIR): if entry.endswith(".conf") and not entry.startswith("."): e_key = entry[:-5] if e_key in self.__inst_dict: try: self.__inst_dict[e_key].update() except: self.log( "unable to update instance {}: {}".format( entry, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: found_inst.append(e_key) else: try: new_inst = OpenVPNInstance(self.log, entry) except: self.log( "unable to create new OpenVPNInstance for {}: {}" .format(entry, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: self.__inst_dict[e_key] = new_inst found_inst.append(e_key) old_inst = [ key for key in list(self.__inst_dict.keys()) if key not in found_inst ] if old_inst: self.log("removing {}: {}".format( logging_tools.get_plural("instance", len(old_inst)), ", ".join(sorted(old_inst)))) for inst in old_inst: del self.__inst_dict[inst]
def do_import(cur_opts, log_com): from initat.cluster.backbone.serializers import user_flat_serializer # , group_flat_serializer from initat.cluster.backbone.models import group, home_export_list from django.db.models import Q if not os.path.exists(cur_opts.export): print("import file '{}' not found".format(cur_opts.export)) _imp = json.loads(open(cur_opts.export, "r").read()) if "version" in _imp: pass else: _imp = { "version": 0, "groups": [], "users": _imp } if cur_opts.default_group: default_group = group.objects.get(Q(groupname=cur_opts.default_group)) else: default_group = None hel = home_export_list() exp_dict = hel.exp_dict # todo, import groups for _user in _imp["users"]: data = user_flat_serializer(data=_user) if not data.is_valid(): if "group" in data.errors and default_group: _user["group"] = default_group.pk data = user_flat_serializer(data=_user) if not data.is_valid(): if "export" in data.errors and len(list(exp_dict.keys())) == 1: _user["export"] = list(exp_dict.keys())[0] data = user_flat_serializer(data=_user) if not data.is_valid(): log_com("") log_com("-" * 50) log_com("Cannot import user") log_com(str(_user)) log_com("errors:") log_com(str(data.errors)) log_com("-" * 50) log_com("") else: try: data.object.save() except: log_com( "Cannot create user '{}': {}".format( str(data.object), process_tools.get_except_info(), ) ) else: log_com( "created user '{}'".format(str(data.object)) )
def check_md5_sums(self): self.__checks.append("md5") files_to_check = sorted([ os.path.normpath(os.path.join(self.path, f_name)) for f_name in ["bzImage", "initrd.gz", "xen.gz", "modules.tar.bz2"] + ["initrd_{}.gz".format(key) for key in KNOWN_INITRD_FLAVOURS] ]) md5s_to_check = { p_name: os.path.normpath( os.path.join(self.path, ".{}_md5".format(os.path.basename(p_name)))) for p_name in files_to_check if os.path.exists(p_name) } md5s_to_remove = sorted([ md5_file for md5_file in [ os.path.normpath( os.path.join(self.path, ".{}_md5".format( os.path.basename(p_name)))) for p_name in files_to_check if not os.path.exists(p_name) ] if os.path.exists(md5_file) ]) if md5s_to_remove: self.log( "removing {}: {}".format( logging_tools.get_plural("MD5 file", len(md5s_to_remove)), ", ".join(md5s_to_remove)), logging_tools.LOG_LEVEL_WARN) for md5_to_remove in md5s_to_remove: md5_name = os.path.basename(md5_to_remove)[1:] if md5_name in self.__option_dict: del self.__option_dict[md5_name] try: os.unlink(md5_to_remove) except: self.log( "error removing {}: {}".format( md5_to_remove, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) if md5s_to_check: for src_file, md5_file in md5s_to_check.items(): md5_name = os.path.basename(md5_file)[1:] new_bz5 = True if os.path.exists(md5_file): if os.stat(src_file)[stat.ST_MTIME] < os.stat(md5_file)[ stat.ST_MTIME]: new_bz5 = False if new_bz5: self.log( "doing MD5-sum for {} (stored in {})".format( os.path.basename(src_file), os.path.basename(md5_file)), ) self.__option_dict[md5_name] = (hashlib.md5( open(src_file, "rb").read())).hexdigest() open(md5_file, "w").write(self.__option_dict[md5_name]) else: self.__option_dict[md5_name] = open(md5_file, "r").read()
def handle_result(self, mes_id, result): cur_mes = self.messages[mes_id] # default: nor reuse (detection not possible or not important) _reuse = False if self.zmq_id != DUMMY_0MQ_ID: if "machine_uuid" in result: mach_uuid, dyn_uuid = ( result["*machine_uuid"], result["*dynamic_uuid"], ) else: mach_uuid, dyn_uuid = ( self.zmq_id, "" ) # reuse detected ? _reuse = HostConnection.zmq_discovery.update_mapping( self.__conn_str, self.zmq_id, mach_uuid, dyn_uuid ) if cur_mes.sent: # ??? cur_mes.sent = False if len(result.xpath(".//ns:raw", smart_strings=False)): # raw response, no interpret cur_mes.srv_com = result self.send_result(cur_mes, None) # self.send_result(cur_mes, None) else: try: if _reuse: _map = HostConnection.zmq_discovery.get_mapping(self.__conn_str) print(id(_map)) ret = ExtReturn( limits.mon_STATE_CRITICAL, "0MQ-ID reuse detected ({})".format( _map.reuse_info, ) ) # _map.clear_reuse() else: ret = ExtReturn.get_ext_return(cur_mes.interpret(result)) except: ret = ExtReturn( limits.mon_STATE_CRITICAL, "error interpreting result: {}".format( process_tools.get_except_info() ) ) exc_info = process_tools.icswExceptionInfo() for line in exc_info.log_lines: HostConnection.relayer_process.log(line, logging_tools.LOG_LEVEL_CRITICAL) self.send_result(cur_mes, ret)
def send(self, host_mes, com_struct): try: host_mes.set_com_struct(com_struct) except: self.return_error( host_mes, "error parsing arguments: {}".format(process_tools.get_except_info()) ) else: if not self.tcp_con: try: self._open() except: self.return_error( host_mes, "error connecting to {}: {}".format( self.__conn_str, process_tools.get_except_info() ) ) else: send_str = unicode(host_mes.srv_com) try: HostConnection.zmq_socket.send_unicode(self.zmq_id, zmq.DONTWAIT | zmq.SNDMORE) # @UndefinedVariable HostConnection.zmq_socket.send_unicode(send_str, zmq.DONTWAIT) # @UndefinedVariable except: self.return_error( host_mes, "connection error ({})".format(process_tools.get_except_info()), ) else: self.sr_probe.send = len(send_str) host_mes.sr_probe = self.sr_probe host_mes.sent = True else: # send to socket-thread for old clients HostConnection.relayer_process.send_to_process( "socket", "connection", host_mes.src_id, unicode(host_mes.srv_com) )
def _parse_dict(self, ret_dict): self.lv_dict = {} for name in ["lv", "pv", "vg"]: for stuff in ret_dict.get(name, []): try: new_lv_obj = lvm_object(name, stuff) except: print(process_tools.get_except_info()) else: self.lv_dict.setdefault( name, {})[new_lv_obj["name"]] = new_lv_obj
def process(self): try: self.__ct_struct.process(self) except: exc_info = process_tools.icswExceptionInfo() for _line in exc_info.log_lines: self.log(_line, logging_tools.LOG_LEVEL_ERROR) self.srv_com.set_result( "error in process() call: {}".format( process_tools.get_except_info()), server_command.SRV_REPLY_STATE_CRITICAL)
def open(self): try: self.fd = codecs.open(self.path, "r", "utf-8") except: if self.opt_ns.verbose: print("Cannot open {}: {}".format( self.path, process_tools.get_except_info())) self.valid = False self.fd = None else: self.inode_num = os.stat(self.path)[stat.ST_INO]
def _close_con(self): if self.__conn: try: self.__conn.close() except: self.log( "error closing connection: {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) del self.__conn self.__conn = None
def _inotify_check(self, *args, **kwargs): try: self.__watcher.process() except: self.log( "exception occured in watcher.process(): {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) raise else: pass
def wait(self): try: pid = self.popen.pid self.popen.wait() except (OSError, ValueError): self.log( "error in popen.wait: {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: self.log("called wait (pid={:d})".format(pid))
def _send(self, sock): try: self.socket.send(self._send_str(self.srv_com).encode("utf-8")) except: _err_str = "error sending TCP: {}".format(process_tools.get_except_info()) self.log(_err_str, logging_tools.LOG_LEVEL_ERROR) self.__process.send_result(self.src_id, str(self.srv_com), _err_str, True) self.close() else: self.__process.unregister_socket(self.socket) self.__process.register_socket(self.socket, select.POLLIN, self._recv)
def inotify_event(*args, **kgwargs): try: Machine.inotify_watcher.process() except: Machine.g_log( "exception occured in Machine.inotify_event(): {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) # _e = process_tools.exception_info() # print _e.log_lines else: pass
def _fetch_hosts(self, mc): try: h_dict = json.loads(mc.get(CollectdMCKeyEnum.main_key.value)) except: self.log( "error fetching host_list: {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) h_dict = {} build_list = self.struct.match(h_dict) self.struct.set_last_update(h_dict) return build_list
def communicate(self): if self.popen: try: return self.popen.communicate() except OSError: self.log( "error in communicate: {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) return ("", "") else: return ("", "")