def rsync(self): log_lines = [] self["start_time"] = time.time() self["rsync_com"] = "rsync --stats -a --delete {} {}".format( self["source_path"], self["dest_path"]) if self["verbose"]: self.log("rsync target is {}".format(self["dest_path"])) self.log("starting rsync-command '{}' ...".format( self["rsync_com"])) sync_stat, sync_out = subprocess.getstatusoutput(self["rsync_com"]) self["call_stat"] = sync_stat self["call_log"] = sync_out.split("\n") e_time = time.time() self["run_time"] = e_time - self["start_time"] if self["verbose"]: self.log("syncing took {}".format( logging_tools.get_diff_time_str(self["run_time"]))) self._interpret_output() log_str = "rsync state is {}, {} of output, took {}".format( self._interpret_call_stat(self["call_stat"]), logging_tools.get_plural("line", len(self["call_log"])), logging_tools.get_diff_time_str(self["run_time"])) log_lines.append(log_str) self.log( log_str, logging_tools.LOG_LEVEL_ERROR if self["call_stat"] else logging_tools.LOG_LEVEL_OK) if self["verbose"]: for line in self["call_log"]: log_lines.append(line) self.log(" - {}".format(line)) # show it # pprint.pprint(self.__v_dict) return log_lines
def _hello_timeout(self): act_time = time.time() act_to = abs(act_time - self.__last_update) if act_to > self.__timeout: if os.path.isdir(self.dir_name): self.log( "timeout of {} > {} reached".format( logging_tools.get_diff_time_str(act_to), logging_tools.get_diff_time_str(self.__timeout))) # if os.path.isfile if os.path.isfile(self.__action.split()[0]): _start_ok, log_lines = process_tools.submit_at_command(self.__action) for line in log_lines: self.log(line) else: self.log( "cannot submit '{}' (command not found)".format( self.__action.split()[0] ), logging_tools.LOG_LEVEL_WARN ) self.__last_update = act_time else: self.log("watch_directory {} no longer present, exiting".format(self.dir_name), logging_tools.LOG_LEVEL_WARN) self.__exit_flag = True
def g_rotate_logs(): Machine.g_log("starting log rotation") s_time = time.time() g_res = LogRotateResult() for dev in Machine.devname_dict.itervalues(): g_res.feed(dev.rotate_logs()) Machine.g_log("rotated in {}, {} to compress".format( logging_tools.get_diff_time_str(time.time() - s_time), logging_tools.get_plural("file", len(g_res.compress_list)), )) g_res.stop() Machine.g_log(g_res.info_str()) if g_res.compress_list and Machine.c_binary: start_time = time.time() for _c_file in g_res.compress_list: # escape certain strings _bin = "{} {}".format(Machine.c_binary, _c_file.replace("(", r"\(")) retcode = subprocess.call(_bin, shell=True) if retcode: Machine.g_log("'{}' returned {:d}".format(_bin, retcode), logging_tools.LOG_LEVEL_WARN) end_time = time.time() Machine.g_log("compressed {} in {} (per item: {})".format( logging_tools.get_plural("file", len(g_res.compress_list)), logging_tools.get_diff_time_str(end_time - start_time), logging_tools.get_diff_time_str( (end_time - start_time) / len(g_res.compress_list)), ))
def receive(self): r_client = self.send_sock if not self.recv_sock else self.recv_sock if r_client.poll(self.args.timeout * 1000): recv_str = r_client.recv() if r_client.getsockopt(zmq.RCVMORE): # @UndefinedVariable recv_id = recv_str recv_str = r_client.recv() else: recv_id = None timeout = False else: print("error timeout in receive() from {} after {}".format( self.recv_conn_str or self.conn_str, logging_tools.get_plural("second", self.args.timeout))) timeout = True recv_id, recv_str = (None, None) self.e_time = time.time() if timeout: self.verbose("communication took {}".format( logging_tools.get_diff_time_str(self.e_time - self.s_time), )) else: self.verbose("communication took {}, received {:d} bytes".format( logging_tools.get_diff_time_str(self.e_time - self.s_time), len(recv_str), )) return timeout, recv_id, recv_str
def show_info(self): return "{:<30s} : CF {:<10s}, {:6d} rows, {:4d} pdp ({:12s}), length is {:18s}".format( self.name, self.cf, self.rows, self.pdp_per_row, logging_tools.get_diff_time_str(self.pdp_per_row * self.step), logging_tools.get_diff_time_str(self.length), )
def _show_cache_info(self): if self.__cache: self.log("cache is present ({}, age is {}, timeout {}, {})".format( logging_tools.get_plural("entry", len(self.__cache)), logging_tools.get_diff_time_str(self.__cache_age), logging_tools.get_diff_time_str(self.Meta.cache_timeout), "valid" if self.__cache_valid else "invalid", )) else: self.log("no cache set")
def _copy_image(self, cur_img): """ copy image """ self.log("copying {}".format(logging_tools.get_plural("directory", len(self.__dir_list)))) for dir_num, cur_dir in enumerate(self.__dir_list, 1): self.log( "[{:2d} of {:2d}] copying directory {}".format( dir_num, len(self.__dir_list), cur_dir, ) ) s_time = time.time() self._call( cur_img, "cp -a {} {}".format( os.path.join(cur_img.source, cur_dir), os.path.join(self.__system_dir, cur_dir) ) ) e_time = time.time() self.log( "copied directory {} in {}".format( cur_dir, logging_tools.get_diff_time_str(e_time - s_time) ) ) for cur_file in self.__file_list: s_time = time.time() shutil.copy2( os.path.join(cur_img.source, cur_file), os.path.join(self.__system_dir, cur_file), ) e_time = time.time() self.log( "copied file {} in {}".format( cur_file, logging_tools.get_diff_time_str(e_time - s_time) ) ) for cur_link in self.__link_list: s_time = time.time() self._call( cur_img, "cp -a {} {}".format( os.path.join(cur_img.source, cur_link), os.path.join(self.__system_dir, cur_link), ) ) e_time = time.time() self.log( "copied link {} in {}".format( cur_link, logging_tools.get_diff_time_str(e_time - s_time) ) )
def check_for_timeout(self): diff_time = abs(time.time() - self.last_start) if diff_time > self.max_runtime: self.log( "timeout ({} > {})".format( logging_tools.get_diff_time_str(diff_time), logging_tools.get_diff_time_str(self.max_runtime)), logging_tools.LOG_LEVEL_WARN) self.bg_proc.spc.trigger_timeout(self.waiting_for) return True else: return False
def call_ext_programm(args, **kwargs): prog = kwargs.pop("prog") _output = kwargs.get("output", False) _show_output = kwargs.get("show_output", False) if prog == "manage": command = [ os.path.join(ICSW_ROOT, "initat", "cluster", "manage.py") ] + args else: command = [ os.path.join(ICSW_ROOT, "initat", "icsw", "main.py") ] + args com_str = " ".join(command) s_time = time.time() c_stat = 0 try: c_out = subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: c_stat = e.returncode c_out = e.output c_out = c_out.decode("utf-8") e_time = time.time() if c_stat == 256 and c_out.lower().count("nothing seems to have changed"): c_stat = 0 if c_stat: print( "something went wrong calling '{}' in {} ({:d}):".format( com_str, logging_tools.get_diff_time_str(e_time - s_time), c_stat ) ) for _line in c_out.split("\n"): print(" {}".format(_line)) if _output: return False, c_out else: return False else: print( "success calling '{}' in {}".format( com_str, logging_tools.get_diff_time_str(e_time - s_time), ) ) if _show_output: print(c_out) if _output: return True, c_out else: return True
def get_stream_info(in_dict): return ", ".join([ "{:.2f} lines/s [{}]".format( in_dict[_key], logging_tools.get_diff_time_str(_key, int=False), ) for _key in sorted(in_dict.keys()) ])
def _process_assets(self, asset_run_id, **kwargs): s_time = time.time() asset_run = AssetRun.objects.get(pk=asset_run_id) asset_batch_id = asset_run.asset_batch.idx self.log("start processing of assetrun [AssetBatch.idx={:d}]".format( asset_batch_id)) try: asset_run.generate_assets() except: _err = process_tools.get_except_info() self.log("error in asset_run.generate_assets: {}".format(_err), logging_tools.LOG_LEVEL_ERROR) asset_run.interpret_error_string = _err asset_run.save() finally: e_time = time.time() self.log( "generate_asset_run for [AssetBatch.idx={:d}] took {}".format( asset_batch_id, logging_tools.get_diff_time_str(e_time - s_time), )) asset_run.generate_duration = e_time - s_time asset_run.save(update_fields=["generate_duration"]) self.send_pool_message( "process_assets_finished", asset_run.idx, )
def __call__(self, *args, **kwargs): s_time = time.time() display_name = getattr(args[0], "display_name", None) # get: head.im_class.__name__ (contains class name for django class views) view_class_name = getattr( getattr(getattr(args[0], 'head', None), 'im_class', None), '__name__', None) if hasattr(args[0], "model") and args[0].model is not None: self.__obj_name = args[0].model._meta.object_name elif display_name is not None: self.__obj_name = display_name elif view_class_name is not None: self.__obj_name = view_class_name else: self.__obj_name = "unknown" try: result = self._func(*args, **kwargs) except: exc_info = process_tools.exception_info() _err_str = process_tools.get_except_info() self.log(u"exception: {}".format(_err_str), logging_tools.LOG_LEVEL_ERROR) for line in exc_info.log_lines: self.log(u" {}".format(line)) result = Response(_err_str, status=status.HTTP_406_NOT_ACCEPTABLE) # raise e_time = time.time() self.log("call took {}".format( logging_tools.get_diff_time_str(e_time - s_time))) return result
def _check_error_dict(self, force=False): c_name = process_tools.get_cluster_name() mails_sent = 0 s_time = time.time() ep_dels = [] for ep, es in self.__eg_dict.items(): t_diff = s_time - es["last_update"] if force or (t_diff < 0 or t_diff > 60): subject = "Python error for pid {:d} on {}@{} ({}, {})".format( ep, process_tools.get_fqdn()[0], c_name, process_tools.get_machine_name(), clusterid.get_cluster_id() or "N/A", ) err_lines = "".join(es["error_str"]).split("\n") msg_body = "\n".join( ["Processinfo {}".format(self._get_process_info(es))] + [ "{:3d} {}".format(line_num + 1, line) for line_num, line in enumerate(err_lines) ]) if self.CC.CS["log.send.errormails"]: self._send_mail(subject, msg_body) mails_sent += 1 ep_dels.append(ep) for epd in ep_dels: del self.__eg_dict[epd] e_time = time.time() if mails_sent: self.log("Sent {} in {}".format( logging_tools.get_plural("mail", mails_sent), logging_tools.get_diff_time_str(e_time - s_time)))
def _start_backup(self, *args, **kwargs): self.log("starting backup") bu_dir = global_config["DATABASE_DUMP_DIR"] if not os.path.isdir(bu_dir): self.log("creating bu_dir {}".format(bu_dir)) os.mkdir(bu_dir) # delete old files for entry in os.listdir(bu_dir): if entry.count(".") and entry.split(".")[-1] in [ "zip", "bz2", "psql" ]: f_name = os.path.join(bu_dir, entry) # _stat = os.stat(f_name) diff_dt = datetime.datetime.now( ) - datetime.datetime.fromtimestamp( os.stat(f_name)[stat.ST_CTIME]) if diff_dt.days > global_config["DATABASE_KEEP_DAYS"]: self.log("removing backup %s" % (f_name)) os.unlink(f_name) for bu_type, bu_call in [ ("database", self._database_backup), ("normal", self._normal_backup), ]: self.log("--------- backup type {} -------------".format(bu_type)) s_time = time.time() bu_call(bu_dir) e_time = time.time() self.log("{} backup finished in {}".format( bu_type, logging_tools.get_diff_time_str(e_time - s_time))) self._exit_process()
def call_command(command, log_com=None): start_time = time.time() stat, out = subprocess.getstatusoutput(command) end_time = time.time() log_lines = [ "calling '{}' took {}, result (stat {:d}) is {} ({})".format( command, logging_tools.get_diff_time_str(end_time - start_time), stat, logging_tools.get_plural("byte", len(out)), logging_tools.get_plural("line", len(out.split("\n"))) ) ] if log_com: for log_line in log_lines: log_com(" - {}".format(log_line)) if stat: for log_line in out.split("\n"): log_com(" - {}".format(log_line)) return stat, out else: if stat: # append output to log_lines if error log_lines.extend([" - {}".format(line) for line in out.split("\n")]) return stat, out, log_lines
def flush_rrdcached(self, f_names): if f_names: f_names -= self.__flush_cache if f_names: self.__flush_cache |= f_names if self.__rrdcached_socket: _s_time = time.time() self.log("sending flush() to rrdcached for {}".format( logging_tools.get_plural("file", len(f_names)))) _lines = ["BATCH"] + [ "FLUSH {}".format(_f_name) for _f_name in f_names ] + [ ".", "", ] self.__rrdcached_socket.send( ("\n".join(_lines)).encode("utf-8")) _read, _write, _exc = select.select( [self.__rrdcached_socket.fileno()], [], [], 5000) _e_time = time.time() if not _read: self.log( "read list is empty after {}".format( logging_tools.get_diff_time_str(_e_time - _s_time)), logging_tools.LOG_LEVEL_ERROR) else: _recv = self.__rrdcached_socket.recv(16384) else: self.log("no valid rrdcached_socket, skipping flush()", logging_tools.LOG_LEVEL_ERROR) else: self.log("no file names given, skipping flush()", logging_tools.LOG_LEVEL_WARN)
def _parse_actual_license_usage(self, actual_licenses, act_conf): # build different license-server calls # see loadsensor.py all_server_addrs = set([ "{:d}@{}".format(act_lic.get_port(), act_lic.get_host()) for act_lic in actual_licenses.values() if act_lic.license_type == "simple" ]) # print "asa:", all_server_addrs q_s_time = time.time() for server_addr in all_server_addrs: if server_addr not in self.__lc_dict: self.log("init new license_check object for server {}".format( server_addr)) self.__lc_dict[server_addr] = sge_license_tools.license_check( lmutil_path=os.path.join(act_conf["LMUTIL_PATH"]), port=int(server_addr.split("@")[0]), server=server_addr.split("@")[1], log_com=self.log) lic_xml = self.__lc_dict[server_addr].check( license_names=actual_licenses) # FIXME, srv_result should be stored in a list and merged q_e_time = time.time() self.log("{} to query, took {}: {}".format( logging_tools.get_plural("license server", len(all_server_addrs)), logging_tools.get_diff_time_str(q_e_time - q_s_time), ", ".join(all_server_addrs))) return lic_xml
def _send_return(self, zmq_sock, src_id, srv_com): c_time = time.time() srv_com["result"].attrib["end_time"] = TIME_FORMAT.format(c_time) info_str = "got command '{}' from '{}', took {}".format( srv_com["command"].text, srv_com["source"].attrib["host"], logging_tools.get_diff_time_str( abs(c_time - float(srv_com["result"].attrib["start_time"])))) if int(srv_com["result"].attrib["state"] ) != server_command.SRV_REPLY_STATE_OK: info_str = "{}, result is {} ({})".format( info_str, srv_com["result"].attrib["reply"], srv_com["result"].attrib["state"]) log_level = logging_tools.LOG_LEVEL_WARN else: log_level = logging_tools.LOG_LEVEL_OK if self.__debug: self.log(info_str, log_level) srv_com.update_source() try: zmq_sock.send_unicode(src_id, zmq.SNDMORE) zmq_sock.send_string(str(srv_com)) except: self.log("error sending return to {}".format(src_id), logging_tools.LOG_LEVEL_ERROR) self.log("srv_com: {}".format(str(srv_com))) del srv_com
def get_info_line(self): _consumers = self.get_num_consumers() return [ logging_tools.form_entry(self.action, header="action"), logging_tools.form_entry(str(self.config_service_enum), header="ConfigService"), logging_tools.form_entry_right(self.multiplier, header="Weight"), logging_tools.form_entry_center("yes" if self.ghost else "no", header="Ghost"), logging_tools.form_entry_center(str(self.content_type), header="ContentType"), logging_tools.form_entry_center(self.license_id_name or "global", header="License"), logging_tools.form_entry_right( logging_tools.get_diff_time_str(self.timeframe_secs) if self.timeframe_secs else "---", header="timeframe", ), logging_tools.form_entry_right(_consumers, header="entries"), logging_tools.form_entry_right(self.consumed, header="consumed"), logging_tools.form_entry_right( "{:.2f}".format(float(self.consumed) / float(_consumers)) if _consumers else "-", header="mean"), ]
def result(self, srv_com): if "async_helper_id" not in srv_com: self.log( "no asnyc_helper_id found in srv_com, discarding message", logging_tools.LOG_LEVEL_ERROR ) return None, None, None, None else: async_id = int(srv_com["*async_helper_id"]) if async_id not in self.__lut: self.log( "asnyc_id {:d} not defined in lut, discarding message".format( async_id ), logging_tools.LOG_LEVEL_ERROR ) return None, None, None, None else: func_name, src_id, zmq_sock, msg_type, s_time = self.__lut[async_id] e_time = time.time() del self.__lut[async_id] del srv_com["async_helper_id"] _log_str = "finished async call {} ({:d}) in {}".format( func_name, async_id, logging_tools.get_diff_time_str(e_time - s_time), ) if zmq_sock is None: self.log(_log_str) return zmq_sock, src_id, srv_com, msg_type, _log_str
def _scan_users(self): _hel = home_export_list() _scanned_ok, _scanned_error = (0, 0) for _key, _value in _hel.exp_dict.iteritems(): if _value["entry"].device.pk == self.sql_info.effective_device.pk: for _scan_user in user.objects.filter( Q(export=_value["entry"]) & Q(scan_user_home=True)): # @UndefinedVariable _h_dir = os.path.join(_value["createdir"], _scan_user.home or _scan_user.login) if os.path.isdir(_h_dir): s_time = time.time() self.log(u"scanning user '{}' in {}".format( _scan_user, _h_dir)) self.step(blocking=False, handle_timer=True) self._scan_dir(_scan_user, _h_dir) e_time = time.time() self.log("... took {}".format( logging_tools.get_diff_time_str(e_time - s_time))) _scanned_ok += 1 else: self.log( u"homedir {} doest not exist for user '{}'".format( _h_dir, unicode(_scan_user)), logging_tools.LOG_LEVEL_ERROR) _scanned_error += 1 self.log("scan info: {:d} ok, {:d} with errors".format( _scanned_ok, _scanned_error))
def _check_error_dict(self, force=False): mails_sent = 0 s_time = time.time() ep_dels = [] for ep, es in list(self.__eg_dict.items()): t_diff = s_time - es.last_update if force or (t_diff < 0 or t_diff > 60): subject = "An error occured, PID={:d} on {}@{} ({})".format( es.pid, process_tools.get_fqdn()[0], clusterid.get_safe_cluster_id("N/A"), clusterid.get_safe_cluster_name("N/A"), ) err_lines = es.error_str.split("\n") msg_body = "\n".join( ["Processinfo {}".format(es.get_process_info())] + [ "{:3d} {}".format(line_num, line) for line_num, line in enumerate(err_lines, 1) ]) if self.CC.CS["log.send.errormails"]: self._send_mail(subject, msg_body) mails_sent += 1 ep_dels.append(ep) for epd in ep_dels: del self.__eg_dict[epd] e_time = time.time() if mails_sent: self.log("Sent {} in {}".format( logging_tools.get_plural("mail", mails_sent), logging_tools.get_diff_time_str(e_time - s_time)))
def __call__(self, *args, **kwargs): SetupLogger.nest_level += 1 _pf = " " * SetupLogger.nest_level self.debug( "{}[{:d}] Entering {} ({}{}, {}{})".format( _pf, SetupLogger.nest_level, self.__name__, logging_tools.get_plural("arg", len(args)), " [{}]".format(", ".join([str(_val) for _val in args])) if args else "", logging_tools.get_plural("kwarg", len(kwargs)), " [{}]".format(", ".join(list(kwargs.keys()))) if kwargs else "", ) ) s_time = time.time() ret_value = self._func(*args, **kwargs) e_time = time.time() self.debug( "{}[{:d}] Leaving {}, call took {}".format( _pf, SetupLogger.nest_level, self.__name__, logging_tools.get_diff_time_str(e_time - s_time), ) ) SetupLogger.nest_level -= 1 return ret_value
def handle_nodeinfo(self, src_id, node_text): s_time = time.time() s_req = simple_request(self, src_id, node_text) com_call = self.__com_dict.get(s_req.command, None) if com_call: config_control.update_router() try: ret_str = com_call(s_req) except: exc_info = process_tools.exception_info() ret_str = "error interpreting command {}: {}".format( node_text, process_tools.get_except_info(), ) for _line in exc_info.log_lines: self.log(" {}".format(_line), logging_tools.LOG_LEVEL_ERROR) else: ret_str = "error unknown command '{}'".format(node_text) if ret_str is None: self.log("waiting for answer") else: e_time = time.time() self.log( "handled nodeinfo '%s' (src_ip %s) in %s, returning %s" % (s_req.node_text, s_req.src_ip, logging_tools.get_diff_time_str(e_time - s_time), ret_str)) config_control.srv_process._send_simple_return( s_req.zmq_id, ret_str) del s_req
def _show_pending_info(self, dist_master): cur_time = time.time() pend_keys = [ key for key, value in self.__file_dict.items() if value.is_pending ] error_keys = [ key for key, value in self.__file_dict.items() if value.is_error ] self.log( "{:d} total, {} pending, {} error".format( len(self.__file_dict), logging_tools.get_plural("remote file", len(pend_keys)), logging_tools.get_plural("remote file", len(error_keys))), ) if not pend_keys and not error_keys: _dist_time = abs(cur_time - self.send_time) self.log( "actual distribution_set {:d} is OK (in {}, {:.2f} / sec)". format( int(self.config_version_send), logging_tools.get_diff_time_str(_dist_time), self.num_send[self.config_version_send] / _dist_time, )) self.config_version_installed = self.config_version_send self.dist_ok = True # self.__md_struct.sync_end = cluster_timezone.localize(datetime.datetime.now()) # self.__md_struct.save() # this makes only sense on slave # self._check_for_ras() dist_master.send_to_config_server( self._get_config_srv_command("sync_end", )) # clear file_dict self.__file_dict = {}
def get_node_rrd(self, srv_com, **kwargs): # print("got", unicode(srv_com)) # database debug self._db_debug.start_call("node_rrd") node_results = [] s_time = time.time() dev_list = srv_com.xpath(".//device_list", smart_strings=False)[0] pk_list = [int(cur_pk) for cur_pk in dev_list.xpath(".//device/@pk", smart_strings=False)] for dev_pk in pk_list: cur_res = {"pk": dev_pk} if DataStore.has_machine_vector(dev_pk): # web mode (sorts entries) _struct = DataStore.get_instance(dev_pk).vector_struct() _struct.extend(DataStore.compound_struct(_struct)) cur_res["struct"] = _struct else: self.log("no MachineVector found for device {:d}".format(dev_pk), logging_tools.LOG_LEVEL_WARN) node_results.append(cur_res) e_time = time.time() self.log( "node_rrd for {} took {}".format( logging_tools.get_plural("device", len(pk_list)), logging_tools.get_diff_time_str(e_time - s_time), ) ) # _json = self._to_json(node_results, set(["info", "active", "key", "name", "part", "pk"])) # pprint.pprint(node_results, depth=5) self._db_debug.end_call() srv_com["result"] = json.dumps(node_results) srv_com.set_result("set results for {}".format(logging_tools.get_plural("node", len(node_results)))) return srv_com
def _call(self, cur_time, builder): sep_str = "-" * 64 # vector to report my_vector = None _quota_bin = process_tools.find_file("repquota") if _quota_bin is None: self.log("No repquota binary found", logging_tools.LOG_LEVEL_ERROR) else: self.log(sep_str) self.log("starting quotacheck") q_cmd = "{} -aniugp".format(_quota_bin) q_stat, q_out = subprocess.getstatusoutput(q_cmd) if q_stat: self.log( "Cannot call '{}' (stat={:d}): {}".format( q_cmd, q_stat, str(q_out)), logging_tools.LOG_LEVEL_ERROR) else: q_dict, dev_dict = self._scan_repquota_output(q_out) qcb_dict = self._create_base_db_entries(dev_dict) prob_devs, prob_objs, quota_cache = self._check_for_violations( q_dict) self._write_quota_usage(qcb_dict, quota_cache) if prob_devs: self._send_quota_mails(prob_devs, prob_objs, dev_dict) if self.Meta.creates_machvector: my_vector = self._create_machvector( builder, cur_time, quota_cache) qc_etime = time.time() self.log("quotacheck took {}".format( logging_tools.get_diff_time_str(qc_etime - cur_time))) self.log(sep_str) return my_vector
def handle_action(self, action, srv_com, src, dst): s_time = time.time() # signature _sig = "{}{}".format(src, dst) _type_map = { # slave to remote (dist slave to dist master) "SR": "remote", # mon server to remote "MR": "remote", # mon server to dist master "MD": "local", # dist master to slave "DS": "direct", } if _sig in _type_map: _type = _type_map[_sig] getattr(self, "handle_{}_action".format(_type))(action, srv_com) e_time = time.time() self.log("{} action {} took {}".format( action, _type, logging_tools.get_diff_time_str(e_time - s_time), )) else: self.log( "Unknown signature '{}' (action is {})".format( _sig, action, ), logging_tools.LOG_LEVEL_CRITICAL)
def process_init(self): global_config.close() self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], zmq=True, context=self.zmq_context, init_logger=True) db_tools.close_connection() self.rrd_cache_socket = global_config["RRD_CACHED_SOCKET"] self.rrd_root = global_config["RRD_DIR"] cov_keys = [ _key for _key in global_config.keys() if _key.startswith("RRD_COVERAGE") ] self.rrd_coverage = [global_config[_key] for _key in cov_keys] self.log("RRD coverage: {}".format(", ".join(self.rrd_coverage))) self.register_timer(self.check_size, 6 * 3600, first_timeout=1) self.__verbose = global_config["VERBOSE"] self._setup_rsync() if self.do_rsync: self.log( "enabling periodic RAM-to-disk sync from {} to {} every {}". format( global_config["RRD_DIR"], global_config["RRD_DISK_CACHE"], logging_tools.get_diff_time_str( global_config["RRD_DISK_CACHE_SYNC"]), )) self.register_timer(self.sync_from_ram_to_disk, global_config["RRD_DISK_CACHE_SYNC"])
def check_for_result(self): if all(self.__snmp_results.values()): self.__end_time = time.time() # unify results # pprint.pprint(self.__snmp_results) # unify dict _errors, _found, _res_dict = ([], set(), {}) for _key, _value in self.__snmp_results.items(): _errors.extend(_value[0]) _found |= _value[1] _res_dict.update(_value[2]) self.log("finished batch in {} ({}, {})".format( logging_tools.get_diff_time_str(self.__end_time - self.__start_time), logging_tools.get_plural("run", len(self.__snmp_results)), logging_tools.get_plural("error", len(_errors)), )) attr_name = "handle_{}".format(self.command) if hasattr(self, attr_name): getattr(self, attr_name)(_errors, _found, _res_dict) else: self.log("dont know how to handle {}".format(self.command), logging_tools.LOG_LEVEL_ERROR, result=True) self.finish()