def _exec(self): """with退出处理 """ try: self._args_set() except ValueError as e: log_format(self, error_info=e) msg = "参数错误,请检查参数后再请求! {}".format(e) return self.send_obj({"status": 1000, "msg": msg}) try: res = self.response() except OperationalError as e: # Mysql 连接错误 log.warning("Mysql OperationalError: %s", e) reconnect() # 如果连接失败重新加载当前请求页面 return self.redirect(self.request.uri) except Exception: import traceback e = traceback.format_exc() log.error("Internal Error: %s", e) return self.send_obj({"status": 10001, "msg": str(e)}) if not self._finished: return self.send_obj(res)
def distribute_request(self,http_req_handler): ''' 根据URL匹配规则路由请求到相应地处理器 ''' path = urlparse(http_req_handler.path).path handled = False #代理支持 if C('enable_proxy') and utils.isDict(C('proxy')): for reg,target in C('proxy').items(): target_path = get_proxy_url(http_req_handler.path,reg,target) if target_path: log.info('[proxy](%s) to (%s)'%(http_req_handler.path,target_path)) return proxy(target_path,Request(http_req_handler),Response(http_req_handler)) for h in self.handlers: if 'ALL' == h.get('method') or h.get('method') == http_req_handler.command and re.findall(h.get('pattern'),path): handled = True ret = (h.get('handler'))(Request(http_req_handler),Response(http_req_handler)) if True == ret: continue else: break #if not handled by any handlers,405 if not handled: log.error('%s is not handled'%path) http_req_handler.send_header(405,'%s not supported'%path) http_req_handler.end_headers() self.http_req_handler.wfile.close()
def get_zoo_client(cluster_name="qconf"): """get zoo client by cluster_name """ global ZOO_CLIENTS if cluster_name not in ZOO_CLIENTS: # get zookeeper hosts info zookeeper = ZdZookeeper.one(cluster_name=cluster_name, deleted="0") if not zookeeper: raise ZookeeperConfError( "Zookeeper not configured for cluster: {}!".format( cluster_name)) # connect to zookeeper try: client = KazooClient(hosts=zookeeper.hosts, connection_retry={ "max_tries": 3, "backoff": 2 }) client.start(3) ZOO_CLIENTS[cluster_name] = client except KazooTimeoutError as exc: log.error('Failed to connnect zookeeper, %s', str(exc)) return # check connection's state, if not connected, reconect zoo_client = ZOO_CLIENTS[cluster_name] if not zoo_client.connected: zoo_client.start() return zoo_client
def _mark_task_start(self, taskName: str) -> int: """ create record in datastore :return: taskId """ startTaskTime = dtime.datetime.now().strftime("%d.%m.%Y %H:%M:%S") taskId = None try: resMem = ds.execute( f"INSERT INTO tasks values(NULL,'{taskName}', " f"'{startTaskTime}',NULL,'run',0,0,0,0,0,0,0,NULL)") if resMem != 0: raise Exception( f'Failed to create an entry in taskstore: {resMem}') for i in ds.select( f"SELECT id FROM tasks WHERE name='{taskName}' AND start_time='{startTaskTime}'" ): taskId = i[0] break if taskId is None: raise Exception('Fail to create taskId for this task') return taskId except Exception as e: if log.level == 10: e = f'Task start failed: {traceback.format_exc()}' log.error(e) raise Exception(e)
def delay_calc(taskStartTime): startTime = dtime.datetime.now() if taskStartTime.lower() != 'now': now = dtime.datetime.now() now = now.hour * 3600 + now.minute * 60 + now.second try: nextStart = dtime.datetime.strptime(taskStartTime, '%H:%M:%S') nextStart = nextStart.hour * 3600 + nextStart.minute * 60 + nextStart.second if now > nextStart: delay = 86400 - now + nextStart # сегодня = что прошло+время завтра до старта startTime += dtime.timedelta(seconds=delay) if onStart: log.info(f"Tasks will start at {taskStartTime}") else: delay = nextStart - now startTime += dtime.timedelta(seconds=delay) if onStart: log.info(f"Tasks will start today at {taskStartTime}") except Exception as e: log.error( f'Check parameter taskStartTime: {e}. Correct format used HH:MM:SS' ) time.sleep(2) shutdown_me(1, '') return startTime
def _html(self): ''' generate html HTML直接以输出好的模板文件做渲染。 由于数据原因个别子模板单独渲染会失败,这里用{html_force_output}变量 可以跳过这类模板。 TODO:考虑支持require_html_modules ''' fs = utils.FileSearcher(r'\.%s$'%C('template_ext'),self._build_tpl_dir) tpls = fs.search() for tpl in tpls: if C('ignore_parents') and tpl.endswith('parent.'+C('template_ext')): continue try: tr = TokenRender(re.sub(r'\.%s$'%C('template_ext'),'',tpl)) html = tr.render( build = True) target_dir= os.path.join(self._build_html_dir,os.path.dirname(tpl)) if not os.path.exists(target_dir): os.makedirs(target_dir) dst_file = re.sub(r'\.%s$'%C('template_ext'),'.html',os.path.join(self._build_html_dir,tpl)) utils.writefile(dst_file,html) except Exception,e: if not C('html_force_output') and not self._force: raise e else: log.error(e)
def _js(self): ''' handle js JS文件不同于CSS,其本身不能引用其它相对路径的静态资源,因此可以实现 先合并再替换、加时间戳,无需预先处理所有js文件。 ''' js_modules = C('require_js_modules') if not utils.isList(js_modules): js_modules = C('require_modules') if not utils.isList(js_modules): js_modules = ['main'] for js in js_modules: try: if not utils.isStr(js): continue; js = re.sub(r'^\/+','',js) if not js.endswith('.js'): js += '.js' js_realpath = os.path.join(self._build_js_dir,js) self.build_js(js_realpath,js_realpath,self._build_js_dir) except Exception,e: if self._force: log.error('[less]%s'%e) else: raise e
def _tpl(self): ''' handle tempaltes 模板仅需加时间戳和变量替换。 这里需要加入额外的{compile_dir}文件夹下的文本文件。 ''' fs = utils.FileSearcher(r'\.%s$'%C('template_ext'),self._build_tpl_dir,relative = False) tpls = fs.search() if self._compile_dir: nfs = utils.FileSearcher(r'.+',self._build_compile_dir,relative = False) compile_files = nfs.search() for f in compile_files: if not utils.isBinary(f): tpls.insert(0,f) for tpl in tpls: try: content = utils.readfile(tpl) #模板的静态资源相对目录应该写死为cwd,即资源路径应该始终是绝对路径 content = allt(content,self._build_dir,force_abspath = False) content = replace(content,self._target) content = removeCssDepsDeclaration(content) utils.writefile(tpl,content) except Exception,e: if self._force: log.error('[tpl]%s'%e) else: raise e
def _exec(self): '''with退出处理 ''' try: self._args_set() except ValueError as e: log_format(self, error_info=e) msg = "参数错误,请检查参数后再请求! {}".format(e) return self.send_obj({'status': 1000, 'msg': msg}) try: clz = self.__class__.__name__ if clz == 'LoginMainHandler' or clz == 'LogoutMainHandler': res = self.response() elif self.current_user == None: return self.render('login.html', message='') else: res = self.response() except Exception: import traceback e = traceback.format_exc() log.error('Internal Error: %s', e) return self.send_obj({'status': 10001, 'msg': str(e)}) if not self._finished: return self.send_obj(res)
def _css(self): ''' handle css r.js会对不同目录下CSS合并时的URL进行修正,因而对于@something@开头的路径会被认为是相对路径, 产生修正错误,解决方案是先对所有CSS文件进行变量替换,时间戳添加,再由r.js合并。这会降低处理速 度但可以解决该问题。 考虑到速度,此过程仅支持在build时进行,开发服务器访问时不能使用。 所有静态资源路径都应该使用绝对路径,避免在CSS中引用相对路径的图像资源。 ''' #搜索所有CSS文件 all_css_files = utils.FileSearcher(r'\.css$',self._build_css_dir,relative = False).search() #替换和加时间戳 for dst in all_css_files: try: content = utils.readfile(dst) content = all_url(content,os.path.dirname(dst)) content = replace(content,self._target) utils.writefile(dst,content) except Exception,e: if self._force: log.error('[css]%s'%e) else: raise e
def log_format(instance, func_name=None, params=None, error_info=None): """格式化log信息 Args: instance: 类实例,当前业务环境下针对Handler类 func_name: 类中调用返回为空的方法 params: str 需要在log中说明的参数 error_info: error级log的错误信息 """ if inspect.isclass(type(instance)): module_name = instance.__module__ class_name = instance.__class__.__name__ if not params: params = instance.request.uri end_time = time.time() spend_time = round((end_time - instance._start_time) * 1000, 2) if error_info: log.error('%s.%s faild spend_time:%sms params:(%s) error info:%s', module_name, class_name, spend_time, params, error_info) return if func_name: log.warning('%s.%s call %s faild spend_time:%sms params:(%s)', module_name, class_name, func_name, spend_time, params) else: log.warning('%s.%s faild spend_time:%sms params:(%s)', module_name, class_name, spend_time, params)
def send_notify(taskName: str, event: str, body: str): try: now = dtime.datetime.now() if taskName not in sendedNotify: sendedNotify[taskName] = {} if event not in sendedNotify[taskName]: sendedNotify[taskName][event] = {"dtm": now, "body": body} else: delta = now - sendedNotify[taskName][event]['dtm'] if delta < resendTime: log.info( f"Reject report of an event {event}: is already sent.") return if cfg['notify']['onlyChanges'] and sendedNotify[taskName][event][ 'body'] == body: log.info( f"Reject report of an event {event}: is not changed from last sent" ) return log.debug(f"New report of an event {taskName}: {event}") if notify.send_notify(taskName, event, body): # update sendedNotify[taskName][event] = {"dtm": now, "body": body} else: del sendedNotify[taskName][event] except Exception as e: log.error(f"Fail send notify: {e}") del sendedNotify[taskName][event]
def load_config(self, config: configparser, proxy: dict = None) -> dict: self.cfg['proxy'] = proxy log.info(f"Connecting to {self.name} webhook") try: self.cfg["url"] = config.get(self.name, "url") except Exception as e: e = f"Bad {self.name} configuration: {e}" log.error(e) raise Exception(e) try: r = requests.get(self.cfg["url"], timeout=10, proxies=proxy) if r.status_code != 200: raise ConnectionError j = r.json() self.cfg['token'] = j['token'] self.cfg['name'] = j['name'] log.info(f"Connected to Discord webhook: {self.cfg['name']}") except ConnectionError: raise Exception("Bad answer from Discord. Check WEBHOOK URL") except KeyError: raise Exception("WEBHOOK doesn't return token") if 'token' not in self.cfg: e = f"Discord WEBHOOK didn't return token" log.error(e) raise Exception(e) return self.cfg
def get_zoo_client(cluster_name="qconf"): """get zoo client by cluster_name """ global ZOO_CLIENTS if cluster_name not in ZOO_CLIENTS: # get zookeeper hosts info zookeeper = ZdZookeeper.one(cluster_name=cluster_name, deleted="0") if not zookeeper: raise ZookeeperConfError("Zookeeper not configured for cluster: {}!".format(cluster_name)) # connect to zookeeper try: client = KazooClient(hosts=zookeeper.hosts, connection_retry={"max_tries": 3, "backoff": 2}) client.start(3) ZOO_CLIENTS[cluster_name] = client except KazooTimeoutError as exc: log.error('Failed to connnect zookeeper, %s', str(exc)) return # check connection's state, if not connected, reconect zoo_client = ZOO_CLIENTS[cluster_name] if not zoo_client.connected: zoo_client.restart() return zoo_client
def get_zoo_client(cluster_name="qconf"): """get zoo client by cluster_name """ global ZOO_CLIENTS if cluster_name not in ZOO_CLIENTS: # get zookeeper hosts info zookeeper = ZdZookeeper.one(cluster_name=cluster_name, deleted="0") if not zookeeper: raise ZookeeperConfError("Zookeeper not configured for cluster: {}!".format(cluster_name)) # connect to zookeeper try: username = ZK_AUTH.get('user') pwd = ZK_AUTH.get('pwd') client = KazooClient(hosts=zookeeper.hosts, connection_retry={"max_tries": 3, "backoff": 2}, default_acl=make_digest_acl(username, pwd, read=True, write=True, create=True, delete=True, admin=True), auth_data=[("digest", '%s:%s' % (username, pwd))] ) client.start(timeout=3) ZOO_CLIENTS[cluster_name] = client except KazooTimeoutError as exc: log.error('Failed to connnect zookeeper, %s', str(exc)) return # check connection's state, if not connected, reconect zoo_client = ZOO_CLIENTS[cluster_name] if not zoo_client.connected: zoo_client.start() return zoo_client
def execute_event_script(log: logging.Logger, script: FunctionType, taskName: str, event: str, body: str) -> Tuple[bool, str]: try: return script(log, taskName, event, body) except Exception as e: log.error(f"Event script: {traceback.format_exc()}") return True, body
def response(self): """delete """ try: ZdSnapshot.one(id=self.id).delete_instance() return self.ajax_ok(close_current=True) except Exception as exc: log.error("error occurred while delete snapshot, id: %s\n%s", self.id, str(exc)) return self.ajax_popup(code=300, msg="删除快照出错啦!")
def shutdown_me(signum=1, frame=1): """ Останавливает модули в нужном порядке """ log.warning( f'Lootnika stopping on {cfg["rest"]["host"]}:{cfg["rest"]["port"]}') if selfControl.exit: return selfControl.exit = True selfControl.rate = 0.3 n = 0 try: while True: time.sleep(0.3) if not bool(selfControl.myThreads): break if selfControl.myThreads['RestServer']: if n < 1: log.debug("Stopping REST server") try: if cfg["rest"]["host"] in ['::1', '0.0.0.0']: host = '127.0.0.1' else: host = cfg["rest"]["host"] cnx = httpClient.HTTPConnection(host, cfg["rest"]["port"], timeout=12) cnx.request(method="GET", url='/a=stop?stop') cnx.getresponse() except Exception: pass n = 1 continue elif selfControl.myThreads['Scheduler']: if n < 2: log.debug("Stopping Scheduler thread") scheduler.cmd = 'stop' n = 2 elif selfControl.myThreads['Datastore']: if n < 3: log.debug("Stopping Datastore thread") ds.close() n = 3 else: break except Exception as e: log.error(f'Shutdown failed: {traceback.format_exc()}') finally: selfControl.stop = True log.info("Lootnika stopped") if not stillWork: os._exit(1)
def response(self): '''delete ''' try: ZdSnapshot.one(id=self.id).delete_instance() return self.ajax_ok(close_current=True) except Exception as exc: log.error("error occurred while delete snapshot, id: %s\n%s", self.id, str(exc)) return self.ajax_popup(code=300, msg="删除快照出错啦!")
def get_split_DNSList(search_result): # 清洗es获得的数据 split_DNSList = [] if 'aggregations' not in search_result: log.error('[mal_dns] Index {0} not exists.'.format( ES_config["dns_index"])) return [] for item in search_result[u'aggregations'][u'domain'][u'buckets']: split_DNSList.append(item[u'key'].encode('unicode-escape').split('.')) return split_DNSList
def store_run(storeDate): try: log.info("[mal_dns] Download started.") merge_blacklist.main(storeDate) log.info("[mal_dns] Download done.") except Exception, e: log.error("[mal_dns] Download failed.\n{0}".format(e))
def get_blacklist_module(): parse_blacklist = moudle_name for file_name in parse_blacklist: module = __import__('get_blacklist.{0}'.format(file_name), fromlist=True) log.debug('Downloading {0}.'.format(file_name)) try: module.main() log.debug('Download {0} completed.'.format(file_name)) except Exception as e: log.error('Download {0} failed.'.format(file_name))
def writefile(filename , content): ''' write conent to a file ''' try: f = codecs.open(filename , 'w' , C('encoding')) f.write(content) f.close() except: log.error("write to %s failed" % filename) raise
def get_stat(host): """get status of a single node in zookeeper cluster """ cluster_info = dict() try: zoo_client = KazooClient(hosts=host, connection_retry={"max_tries": 1, "backoff": 1}) zoo_client.start(timeout=3) cluster_info[host] = zoo_client.command("mntr") except KazooTimeoutError as exc: log.error("Failed to connnect zookeeper, %s", str(exc)) cluster_info[host] = str(exc) return cluster_info
def get_file_timestamp(fpath): ''' 取得文件时间戳 fpath:绝对路径 ''' try: f = readfile(fpath , 'rb') m = hashlib.md5() m.update(f) md5 = md5toInt(m.hexdigest()) return md5 except Exception,e: log.error('[TimeStamp]%s'%e)
def load_config(self, config: configparser, proxy: dict = None) -> dict: self.cfg['proxy'] = proxy log.info(f"Connecting to {self.name} webhook") try: self.cfg["url"] = config.get(self.name, "url") log.info(f"Slack using WEBHOOK {self.cfg['url']}") except Exception as e: e = f"Bad {self.name} configuration: {e}" log.error(e) raise Exception(e) return self.cfg
def new_toast(title: str, msg: str): if PLATFORM != "nt": return if len(msg) > 255: msg = msg[:256] try: notification.notify(title=title, message=msg, app_name='AppWatch', app_icon=f'{dataDir}notifier/chat_ava.ico', timeout=10) except Exception as e: log.error(f"Fail to show windows notification: {e}")
def response(self): '''delete ''' if not self.info_ids: return self.ajax_popup(close_current=False, code=300, msg="请选择某条记录进行删除") id_list = self.info_ids.split(',') try: del_query = ZdQconfAgent.delete().where(ZdQconfAgent.id << id_list) del_query.execute() except OperationalError as exc: log.error("error occurred while delete agents, ids: %s\n%s", id_list, str(exc)) return self.ajax_popup(close_current=False, code=300, msg="删除失败!") return self.ajax_ok(close_current=False)
def _less(self): ''' handle less files to css ''' all_less_files = utils.FileSearcher(r'\.less$',self._build_css_dir,relative = False).search() for less in all_less_files: try: subprocess.call('lessc %s %s'%(less,re.sub(r"\.less",".css",less)),shell = True) os.remove(less) except Exception,e: if self._force: log.error('[less]%s'%e) else: raise e
def get_stat(host): """get status of a single node in zookeeper cluster """ cluster_info = dict() try: zoo_client = KazooClient( hosts=host, connection_retry={"max_tries": 1, "backoff": 1} ) zoo_client.start(timeout=3) cluster_info[host] = zoo_client.command("mntr") except KazooTimeoutError as exc: log.error('Failed to connnect zookeeper, %s', str(exc)) cluster_info[host] = str(exc) return cluster_info
def get_stat(hosts): """get stat of zookeeper cluster of given idc """ cluster_info = dict() for host in hosts.split(","): try: zoo_client = KazooClient( hosts=host, connection_retry={"max_tries": 1, "backoff": 1} ) zoo_client.start(3) cluster_info[host] = zoo_client.command("mntr") except KazooTimeoutError as exc: log.error('Failed to connnect zookeeper, %s', str(exc)) cluster_info[host] = str(exc) return cluster_info
def send_notify(self, app: str, event: str, body: str) -> bool: try: data = {"username": "******", "content": body} res = requests.post(self.cfg['url'], json=data, timeout=10, proxies=self.cfg['proxy']) if res.ok: log.info(f"Report sent") return True else: raise Exception("Server return status %s" % res.status_code) except Exception as e: log.error("Fail sent report by Discord %s" % e) return False
def send(self,content=None,content_len=None,code=200,headers={}): ''' ''' self.http_req_handler.send_response(code) headers['connection'] = 'close' for k in headers.keys(): self.http_req_handler.send_header(k,headers.get(k)) if not headers.get('Content-Type'): headers['Content-Type']='text/html;charset='+C('encoding') if content is not None and not re.match(utils.BINARY_CONTENT_TYPE_KEYWORDS,headers['Content-Type'],re.IGNORECASE): try: content=unicode(content).encode(C('encoding'),'ignore') except Exception, e: log.error('[send]%s'%(e))
def log_inspector(): log.debug("log_inspector started") selfName = 'log_inspector' while True: try: for taskName, task in cfg['tasks']['logTask'].items(): log.info(f"Check log {taskName}") logFile = task['file'] templates = task['tmpl'] try: # TODO open if file is changed with open(logFile, encoding='utf-8') as f: cnt = f.read() for tmplName in templates: tmpl = templater.get_tmpl(selfName, tmplName) if tmpl in cnt: ev = f"Found log expression {taskName}: {tmplName}" log.warning(ev) body = templater.tmpl_fill(selfName, 'error').replace( '{{taskName}}', taskName, -1) event = 'error' new_toast('log_inspector', event) if 'eventScript' in task: allowSend, body = execute_event_script( log, task['eventScript'], taskName, event, body) else: allowSend = True if allowSend: send_notify(taskName, event, body) except FileNotFoundError: log.error(f"Not found log file {taskName}") except Exception as e: log.error(f"Fail to parse log file {taskName}: {e}") sleep(intervalCheckMin * 2) except Exception: e = traceback.format_exc() log.critical(str(e)) break
def _replace(self): ''' 替换所有文本的变量 ''' files = utils.FileSearcher(r'.+',self._build_dir).search() for f in files: f = os.path.join(self._build_dir,f) if not utils.isBinary(f): try: content = utils.readfile(f) content = replace(content,self._target) utils.writefile(f,content) except Exception,e: if self._force: log.error('[replace][%s]%s'%(f,e)) else: e
def get_stat(hosts): """get stat of zookeeper cluster of given idc """ cluster_info = dict() for host in hosts.split(","): try: zoo_client = KazooClient(hosts=host, connection_retry={ "max_tries": 1, "backoff": 1 }) zoo_client.start(3) cluster_info[host] = zoo_client.command("mntr") except KazooTimeoutError as exc: log.error('Failed to connnect zookeeper, %s', str(exc)) cluster_info[host] = str(exc) return cluster_info
def _exec(self): '''with退出处理 ''' try: self._args_set() except ValueError as e: log_format(self, error_info=e) msg = "参数错误,请检查参数后再请求! {}".format(e) return self.send_obj({'status': 1000, 'msg': msg}) try: res = self.response() except Exception: import traceback e = traceback.format_exc() log.error('Internal Error: %s', e) return self.send_obj({'status': 10001, 'msg': str(e)}) if not self._finished: return self.send_obj(res)
def get_zoo_client(cluster_name="qconf"): """get zoo client by cluster_name """ global ZOO_CLIENTS zookeeper = ZdZookeeper.one(cluster_name=cluster_name, deleted="0") if not zookeeper: raise ZookeeperConfError("Zookeeper not configured for cluster {}!".format(cluster_name)) if cluster_name not in ZOO_CLIENTS: try: client = KazooClient(hosts=zookeeper.hosts, connection_retry={"max_tries": 1, "backoff": 1}) client.start(timeout=3) ZOO_CLIENTS[cluster_name] = client except KazooTimeoutError as exc: log.error('Failed to connnect zookeeper, %s', str(exc)) return return ZOO_CLIENTS[cluster_name]
def check_whitelist(match_DNSList, match_blacklist): pattern = re.compile('\r\n|\n') try: with open(os.path.join(data_path, "local_Whitelist.txt"), 'r') as f: text = pattern.split(f.read())[6:-1] except Exception as e: log.error("Get whitelist failed.\n{0}".format(e)) raise e split_DNSList = [] for domain in text: split_DNSList.append(domain.split('.')) white_Trie = create_Trie(split_DNSList) ret_DNSList = [] ret_blacklist = [] for i in range(len(match_DNSList)): if not isMatch(white_Trie, match_DNSList[i], domain=[]): ret_DNSList.append(match_DNSList[i]) ret_blacklist.append(match_blacklist[i]) return ret_DNSList, ret_blacklist
def _start_task(self, taskName: str): self.curTask = taskName log.info(f'Start task {taskName}') try: lg = create_task_logger(taskName, console) ts = TaskStore(taskName, lg, self.taskList[taskName]['overwriteTaskstore']) taskId = self._mark_task_start(taskName) # [total ,seen, new, differ, delete, task error, export error, last doc id] self.syncCount[taskId] = [-1, 0, 0, 0, 0, 0, 0, ''] cf = self.taskList[taskName] fc = Factory(taskName, lg, cfg['exporters'][cf['exporter']], self.syncCount[taskId]) picker = self.Picker(taskId, taskName, cf, lg, ts, fc, self.syncCount[taskId]) picker.run() tab = '\n' + '\t' * 5 lg.info(f"Task done" f"{tab}Total objects: {self.syncCount[taskId][0]}" f"{tab}Seen: {self.syncCount[taskId][1]}" f"{tab}New: {self.syncCount[taskId][2]}" f"{tab}Differ: {self.syncCount[taskId][3]}" f"{tab}Deleted: {self.syncCount[taskId][4]}" f"{tab}Task errors: {self.syncCount[taskId][5]}" f"{tab}Export errors: {self.syncCount[taskId][6]}") if self.syncCount[taskId][5] != 0: lg.warning('Task done with some errors. Check logs') if self.syncCount[taskId][6] != 0: log.warning( 'Task had errors with sending documents. ' f'Documents that were not sent are saved in a folder {picker.factory.failPath}' ) self.check_point(taskId, 'complete') except Exception as e: if log.level == 10: e = traceback.format_exc() log.error(f"Fail with task {taskName}: {e}")
def run(self): cnx = self._create_db() cur = cnx.cursor() self.isReady = True while True: try: req, arg, res, token = self.requestQ.get() # print(token, self.status[token]) # print(token,req) if req == '--close--': if self.status[token] == -1: self.status[token] = 0 self.requestQ.task_done() break elif req == '--commit--': cnx.commit() if self.status[token] == -1: self.status[token] = 0 self.requestQ.task_done() continue # print(token,'!#run-run') cur.execute(req, arg) if res: for row in cur: res.put(row) res.put('--no more--') if self.status[token] == -1: self.status[token] = 0 self.requestQ.task_done() except Exception as e: # print(token, 'error') e = f'Unable to access to {self.name}: {e}' log.error(e) self.status[token] = e self.requestQ.task_done() cnx.close() self.requestQ.task_done() log.debug("Stopped Datastore thread")
def set_znode(cluster_name, path, data, znode_type='0', business=''): """更新或增加znode节点,包括存储于mysql的元数据和存储于zookeeper上的data """ path = normalize_path(path) ZookeeperService.set_or_create(cluster_name, path, data) # 在mysql上存储znode的相关元数据,节点类型和业务说明 znode = ZdZnode.one(cluster_name=cluster_name, path=path, deleted="0") if znode is None: znode = ZdZnode(cluster_name=cluster_name, path=path) znode.type = znode_type znode.business = business znode.save() try: # 自动快照(如果配置信息没有变更,实际不会进行快照) SnapshotService.make_snapshot(cluster_name, path, data) except MakeSnapshotError as exc: log.error('make snapshot error: %s', str(exc))
def proxy(target_url,req,res): ''' ''' if not target_url: return res.send(code = 500,content = 'Empty url not supported') #二进制资源直接重定向 parsed_url = urlparse(target_url) if utils.isBinary(parsed_url.path,strict = True): return res.redirect(target_url) if 'GET' == req.method: request = R.get elif 'POST' == req.method: request = R.post try: #通知远端服务器不要压缩 if req.headers.get('accept-encoding'): del req.headers['accept-encoding'] if req.headers.get('host'): del req.headers['host'] log.info('[proxy]requesting %s'%target_url) r = request(target_url,headers = req.headers) #本地服务器覆写Date和Server if r.headers.get('date'): del r.headers['date'] if r.headers.get('server'): del r.headers['server'] if r.headers.get('transfer-encoding'): del r.headers['transfer-encoding'] log.info('[proxy] status=%d'%r.status_code) return res.send(code = r.status_code,content = r.content or '',headers = r.headers) except Exception, e: log.error('[proxy]%s'%e) return res.send(code = 500,content = '%s'%e)
def send_notify(self, app:str, event:str, body:str) -> bool: try: # Формирует заголовок письма msg = MIMEMultipart('mixed') msg['Subject'] = templater.tmpl_fill(self.name, 'subject') msg['From'] = self.cfg['fromHeader'] msg['To'] = self.cfg['sendTo'] msg.attach(MIMEText(body)) except Exception as e: log.error(str(e)) log.debug(f"Connecting to email server {self.cfg['server']}") try: if self.cfg["useSSL"]: s = smtplib.SMTP_SSL(host=self.cfg['server'], port=self.cfg['port']) s.ehlo() s.login(self.cfg['user'], self.cfg['password']) s.auth_plain() else: s = smtplib.SMTP(self.cfg['server'], self.cfg['port']) s.ehlo().starttls().ehlo().login(self.cfg['user'], self.cfg['password']) # Рукопожатие, обязательно log.debug(f"Sending report") s.sendmail(self.cfg["fromHeader"], self.cfg["sendTo"], msg.as_string()) log.info(f"Report of an event {app} sent") return True except Exception as e: if e.errno == 11004: log.error("Fail to connect to email server") else: log.error("Fail to send report: %s" % e) return False
def send_notify(self, app: str, event: str, body: str) -> bool: try: data = json.dumps({"text": body}) headers = { "Content-type": "application/json", 'Content-Length': len(body) } res = requests.post(self.cfg['url'], data, headers, timeout=10, proxies=self.cfg['proxy']) if res.status_code != 200: raise Exception("Server return status %s" % res.status_code) log.info(f"Report sent") return True except Exception as e: log.error("Fail sent report by Slack %s" % e) return False
def _search(self,tpl): ''' 递归搜索 ''' try: abspath = utils.abspath(os.path.join(C('template_dir'),tpl)) content = utils.readfile(abspath) iters = re.finditer(self._pattern,content) for i in reversed(list(iters)): tpl = utils.filterRelPath(i.group(3)) if C('ignore_parents') and tpl.endswith('parent.'+C('template_ext')): continue if self._history.get(tpl) is None: self._result.append(tpl) self._history[tpl] = 1 if 'include' == i.group(1): self._include_result.append(tpl) self._search(tpl) except Exception, e: log.error('[deps]%s'%e)
def sync_fetch(url, method="POST", data=None, timeout=REQ_TIMEOUT): """同步请求 @method, str, 请求方式 @data, dict, 请求数据 """ if data: if method == "GET": url += '?' + urllib.urlencode(data) data = None else: data = urllib.urlencode(data) content = None for _ in range(3): try: content = urllib2.urlopen(url, data, timeout=timeout) break except Exception: continue if not content: log.error("url:%s failed to fetch!" % url) return content
def load_config(self, config: configparser, proxy:dict = None) -> dict: self.cfg['proxy'] = proxy try: self.cfg["sendTo"] = config.get(self.name, "sendTo") self.cfg["server"] = config.get(self.name, "server") self.cfg["port"] = config.getint(self.name, "port") self.cfg["useSSL"] = config.getboolean(self.name, "useSSL") self.cfg["user"] = config.get(self.name, "user") self.cfg["password"] = config.get(self.name, "password") self.cfg["fromHeader"] = config.get(self.name, "fromHeader") log.info(f'Recipient mail address {self.cfg["sendTo"]}') except Exception as e: e = f"Bad {self.name} configuration: {e}" log.error(e) raise Exception(e) if re.findall(r'\w+@\w+.\w+', self.cfg["sendTo"]): log.debug(f'Recipient mail address: {self.cfg["sendTo"]}') else: log.error("Wrong email sendTo.") raise SystemExit(1) return self.cfg
def main(es,gte,lte,time_zone,dip): # 根据第一次检查的结果,获取当前时间的指定 dip 的 TCP 连接的所有 sip ,得到 sip_list result = query_last(es=es,gte=gte,lte=lte,time_zone=time_zone,dip=dip) sip_list = [] if 'aggregations' not in result: log.error('[mal_dns] Index {0} not exists.'.format(ES_config["tcp_index"])) return[] for sip in result["aggregations"]["sip"]["buckets"]: sip_list.append(sip["key"]) # 检查的时间范围扩大到 72h temp_lte = datetime.datetime.strptime(lte,'%Y-%m-%d %H:%M:%S') gt = (temp_lte - datetime.timedelta(hours = 72)).strftime('%Y-%m-%d %H:%M:%S') # 根据 dip 和 sip_list 进行过滤,按每个 sip 每 5m 聚合,获取每组 sip-dip 的流量的时间序列 res = get_date_flow(es=es,gte=gt,lte=lte,time_zone=time_zone,dip=dip,sip_list=sip_list) ret_siplist = [] # 循环对每组 sip-dip 进行分析 for sip_item in res["aggregations"]["sip"]["buckets"]: # 该组 sip-dip 的数据量太小则不再进一步分析 if len(sip_item["date"]["buckets"])<3: continue # 将流量的时间序列拆分成流量列表 flowlist 和日期列表 datelist datelist = [] flowlist = [] for item in sip_item["date"]["buckets"]: datelist.append(item["key"]) flowlist.append(item["flow"]["value"]) # 计算日期列表 datelist 相邻两个时间戳的时间差,获得时间差列表 date_dev date_dev = [datelist[i+1]-datelist[i] for i in range(len(datelist)-1)] # 当且仅当时间差列表 date_dev 及流量列表 flowlist 各自的数据超过一半相等,即 date_dev 和 flowlist 的 MAD 均为 0 时,认为该组 sip-dip 之间的通讯是有规律的 if (calc_MAD(date_dev) == 0) and (calc_MAD(flowlist) == 0): ret_siplist.append(sip_item["key"]) return ret_siplist
def log_format(instance, func_name=None, params=None, error_info=None): """格式化log信息 Args: instance: 类实例,当前业务环境下针对Handler类 func_name: 类中调用返回为空的方法 params: str 需要在log中说明的参数 error_info: error级log的错误信息 """ if inspect.isclass(type(instance)): module_name = instance.__module__ class_name = instance.__class__.__name__ if not params: params = instance.request.uri end_time = time.time() spend_time = round((end_time - instance._start_time) * 1000, 2) if error_info: log.error( "%s.%s faild spend_time:%sms params:(%s) error info:%s", module_name, class_name, spend_time, params, error_info, ) return if func_name: log.warning( "%s.%s call %s faild spend_time:%sms params:(%s)", module_name, class_name, func_name, spend_time, params, ) else: log.warning("%s.%s faild spend_time:%sms params:(%s)", module_name, class_name, spend_time, params)
if not utils.isList(css_modules): css_modules = ['main'] for css in css_modules: try: if not utils.isStr(css): continue; css = re.sub(r'\/*','',css) if not css.endswith('.css'): css += '.css' css_realpath = os.path.join(self._build_css_dir,css) self.build_css(css_realpath,css_realpath) continue except Exception,e: if self._force: log.error('[less]%s'%e) else: raise e @classmethod def build_css(self,src,dst): ''' handle one css src to dst 合并和按需压缩 ''' subprocess.call('node %s -o cssIn=%s out=%s'%(RJS_PATH,src,dst),shell = True) if self._compress: subprocess.call( 'java -jar ' + YC_PATH + ' --type css --charset ' + C('encoding') + ' ' + dst + ' -o ' + dst , shell = True ) @classmethod