def load_config(self, config: configparser, proxy: dict = None) -> dict: self.cfg['proxy'] = proxy log.info(f"Connecting to {self.name} webhook") try: self.cfg["url"] = config.get(self.name, "url") except Exception as e: e = f"Bad {self.name} configuration: {e}" log.error(e) raise Exception(e) try: r = requests.get(self.cfg["url"], timeout=10, proxies=proxy) if r.status_code != 200: raise ConnectionError j = r.json() self.cfg['token'] = j['token'] self.cfg['name'] = j['name'] log.info(f"Connected to Discord webhook: {self.cfg['name']}") except ConnectionError: raise Exception("Bad answer from Discord. Check WEBHOOK URL") except KeyError: raise Exception("WEBHOOK doesn't return token") if 'token' not in self.cfg: e = f"Discord WEBHOOK didn't return token" log.error(e) raise Exception(e) return self.cfg
def send_notify(self, app:str, event:str, body:str) -> bool: try: # Формирует заголовок письма msg = MIMEMultipart('mixed') msg['Subject'] = templater.tmpl_fill(self.name, 'subject') msg['From'] = self.cfg['fromHeader'] msg['To'] = self.cfg['sendTo'] msg.attach(MIMEText(body)) except Exception as e: log.error(str(e)) log.debug(f"Connecting to email server {self.cfg['server']}") try: if self.cfg["useSSL"]: s = smtplib.SMTP_SSL(host=self.cfg['server'], port=self.cfg['port']) s.ehlo() s.login(self.cfg['user'], self.cfg['password']) s.auth_plain() else: s = smtplib.SMTP(self.cfg['server'], self.cfg['port']) s.ehlo().starttls().ehlo().login(self.cfg['user'], self.cfg['password']) # Рукопожатие, обязательно log.debug(f"Sending report") s.sendmail(self.cfg["fromHeader"], self.cfg["sendTo"], msg.as_string()) log.info(f"Report of an event {app} sent") return True except Exception as e: if e.errno == 11004: log.error("Fail to connect to email server") else: log.error("Fail to send report: %s" % e) return False
def delay_calc(taskStartTime): startTime = dtime.datetime.now() if taskStartTime.lower() != 'now': now = dtime.datetime.now() now = now.hour * 3600 + now.minute * 60 + now.second try: nextStart = dtime.datetime.strptime(taskStartTime, '%H:%M:%S') nextStart = nextStart.hour * 3600 + nextStart.minute * 60 + nextStart.second if now > nextStart: delay = 86400 - now + nextStart # сегодня = что прошло+время завтра до старта startTime += dtime.timedelta(seconds=delay) if onStart: log.info(f"Tasks will start at {taskStartTime}") else: delay = nextStart - now startTime += dtime.timedelta(seconds=delay) if onStart: log.info(f"Tasks will start today at {taskStartTime}") except Exception as e: log.error( f'Check parameter taskStartTime: {e}. Correct format used HH:MM:SS' ) time.sleep(2) shutdown_me(1, '') return startTime
def send_notify(taskName: str, event: str, body: str): try: now = dtime.datetime.now() if taskName not in sendedNotify: sendedNotify[taskName] = {} if event not in sendedNotify[taskName]: sendedNotify[taskName][event] = {"dtm": now, "body": body} else: delta = now - sendedNotify[taskName][event]['dtm'] if delta < resendTime: log.info( f"Reject report of an event {event}: is already sent.") return if cfg['notify']['onlyChanges'] and sendedNotify[taskName][event][ 'body'] == body: log.info( f"Reject report of an event {event}: is not changed from last sent" ) return log.debug(f"New report of an event {taskName}: {event}") if notify.send_notify(taskName, event, body): # update sendedNotify[taskName][event] = {"dtm": now, "body": body} else: del sendedNotify[taskName][event] except Exception as e: log.error(f"Fail send notify: {e}") del sendedNotify[taskName][event]
def distribute_request(self,http_req_handler): ''' 根据URL匹配规则路由请求到相应地处理器 ''' path = urlparse(http_req_handler.path).path handled = False #代理支持 if C('enable_proxy') and utils.isDict(C('proxy')): for reg,target in C('proxy').items(): target_path = get_proxy_url(http_req_handler.path,reg,target) if target_path: log.info('[proxy](%s) to (%s)'%(http_req_handler.path,target_path)) return proxy(target_path,Request(http_req_handler),Response(http_req_handler)) for h in self.handlers: if 'ALL' == h.get('method') or h.get('method') == http_req_handler.command and re.findall(h.get('pattern'),path): handled = True ret = (h.get('handler'))(Request(http_req_handler),Response(http_req_handler)) if True == ret: continue else: break #if not handled by any handlers,405 if not handled: log.error('%s is not handled'%path) http_req_handler.send_header(405,'%s not supported'%path) http_req_handler.end_headers() self.http_req_handler.wfile.close()
def dir_to_uri(self, module_name): """将路径转换为uri Args: module_name: 路径名称 """ assert self._uri.startswith('/'), "URI must startswith '/'" log.info('request uri auto generate: {} ---> {}'.format(module_name, self._uri)) return self._uri
def post_delete_handler(sender, instance): '''所有的model删除后回调函数 ''' if Database.current_user is None: current_user = '******' else: current_user = Database.current_user['account'] log.info('account=%s,command=delete,table=%s,record_id=%s', current_user, instance._meta.db_table, instance.id)
def shutdown_me(signum, frame, appServerSvc=None): # ловит ctrl-C. Останавливает модули в нужном порядке log.warning('Stopping...') if appServerSvc: appServerSvc.daemon.exit() log.info("Shutdown is successful") os._exit(0)
def shutdown_me(signum=1, frame=1): """ Останавливает модули в нужном порядке """ log.warning( f'Lootnika stopping on {cfg["rest"]["host"]}:{cfg["rest"]["port"]}') if selfControl.exit: return selfControl.exit = True selfControl.rate = 0.3 n = 0 try: while True: time.sleep(0.3) if not bool(selfControl.myThreads): break if selfControl.myThreads['RestServer']: if n < 1: log.debug("Stopping REST server") try: if cfg["rest"]["host"] in ['::1', '0.0.0.0']: host = '127.0.0.1' else: host = cfg["rest"]["host"] cnx = httpClient.HTTPConnection(host, cfg["rest"]["port"], timeout=12) cnx.request(method="GET", url='/a=stop?stop') cnx.getresponse() except Exception: pass n = 1 continue elif selfControl.myThreads['Scheduler']: if n < 2: log.debug("Stopping Scheduler thread") scheduler.cmd = 'stop' n = 2 elif selfControl.myThreads['Datastore']: if n < 3: log.debug("Stopping Datastore thread") ds.close() n = 3 else: break except Exception as e: log.error(f'Shutdown failed: {traceback.format_exc()}') finally: selfControl.stop = True log.info("Lootnika stopped") if not stillWork: os._exit(1)
def store_run(storeDate): try: log.info("[mal_dns] Download started.") merge_blacklist.main(storeDate) log.info("[mal_dns] Download done.") except Exception, e: log.error("[mal_dns] Download failed.\n{0}".format(e))
def _work_manager(self, taskName: str = '', lastTask: str = '', cmd=False): """ Обёртка исполнителя задания (Picker). Работает как таймер чтобы отложить запуск до заданного времени. Потому проверка статуса планировщика так же выполняется здесь. NOTE: сейчас планировщик сам проверяет время старта и запускает задание сразу """ self._update_startTime() if not (self.status == 'ready' or self.status == 'wait'): log.warning( f'Previous task is still running. Next start will be at {self.startTime}' ) return self.status = 'work' # он должен работатьт только при ready if self.taskCycles > 0: self.taskCycles -= 1 # if self.taskCycles==0: self.startTime = None # task может быть только при cmd=True if taskName != '': self._start_task(taskName) else: if not cmd: log.info('New tasks cycle') else: log.info('Start all tasks') for taskName in self.taskList: # в случае отмены не продолжать if self.status == 'cancel': # далее уже сам воркер следит даже если пауза self.curTask = '' self.status = 'ready' return else: self._start_task(taskName) self.curTask = '' if self.taskCycles > 0: self.status = 'wait' else: self.status = 'ready' if cmd: log.info('All tasks completed') else: if self.startTime is None: log.info('Tasks cycle done. Task replays are over') else: log.info(f'Tasks cycle done. Left: {self.taskCycles}')
def load_config(self, config: configparser, proxy: dict = None) -> dict: self.cfg['proxy'] = proxy log.info(f"Connecting to {self.name} webhook") try: self.cfg["url"] = config.get(self.name, "url") log.info(f"Slack using WEBHOOK {self.cfg['url']}") except Exception as e: e = f"Bad {self.name} configuration: {e}" log.error(e) raise Exception(e) return self.cfg
def send_notify(self, app: str, event: str, body: str) -> bool: try: data = {"username": "******", "content": body} res = requests.post(self.cfg['url'], json=data, timeout=10, proxies=self.cfg['proxy']) if res.ok: log.info(f"Report sent") return True else: raise Exception("Server return status %s" % res.status_code) except Exception as e: log.error("Fail sent report by Discord %s" % e) return False
def run(self, Picker): log.debug("Starting Scheduler thread") self.Picker = Picker while not selfControl.started and self.cmd != 'stop': time.sleep(0.2) while self.cmd != 'stop': self._get_workers() # print('%s >= %s is %s' %(dtime.datetime.now(), self.startTime, self._isTaskTime())) # message(('self.status', self.status), clrSun) if self.status == 'ready': # если расписание не включено или всё выполнилось, # то переходит в ждущий режим if self.taskCycles > 0: self.status = 'wait' if self._isTaskTime(): ht = Thread(name='work_manager', target=self._work_manager) ht.start() self.workers.append(ht) # все последующие повторы отсчитываются от первого else: if self.taskCycles > 0: if self._isTaskTime(): ht = Thread(name='work_manager', target=self._work_manager) ht.start() self.workers.append(ht) elif self.taskCycles == 0: # -1 означает выкл. расписание if self.status == 'ready': log.info('Tasks cycle done') # print('!#cycle Status', self.status) time.sleep(1) # при выходе из цикла ждёт завершения работы рабочих и отменяет таймеры self.status = 'stop' self._get_workers() for ht in self.workers: # message(ht,clrSun) ht.join() log.debug("Stopped Scheduler thread") return
def log_inspector(): log.debug("log_inspector started") selfName = 'log_inspector' while True: try: for taskName, task in cfg['tasks']['logTask'].items(): log.info(f"Check log {taskName}") logFile = task['file'] templates = task['tmpl'] try: # TODO open if file is changed with open(logFile, encoding='utf-8') as f: cnt = f.read() for tmplName in templates: tmpl = templater.get_tmpl(selfName, tmplName) if tmpl in cnt: ev = f"Found log expression {taskName}: {tmplName}" log.warning(ev) body = templater.tmpl_fill(selfName, 'error').replace( '{{taskName}}', taskName, -1) event = 'error' new_toast('log_inspector', event) if 'eventScript' in task: allowSend, body = execute_event_script( log, task['eventScript'], taskName, event, body) else: allowSend = True if allowSend: send_notify(taskName, event, body) except FileNotFoundError: log.error(f"Not found log file {taskName}") except Exception as e: log.error(f"Fail to parse log file {taskName}: {e}") sleep(intervalCheckMin * 2) except Exception: e = traceback.format_exc() log.critical(str(e)) break
def _start_task(self, taskName: str): self.curTask = taskName log.info(f'Start task {taskName}') try: lg = create_task_logger(taskName, console) ts = TaskStore(taskName, lg, self.taskList[taskName]['overwriteTaskstore']) taskId = self._mark_task_start(taskName) # [total ,seen, new, differ, delete, task error, export error, last doc id] self.syncCount[taskId] = [-1, 0, 0, 0, 0, 0, 0, ''] cf = self.taskList[taskName] fc = Factory(taskName, lg, cfg['exporters'][cf['exporter']], self.syncCount[taskId]) picker = self.Picker(taskId, taskName, cf, lg, ts, fc, self.syncCount[taskId]) picker.run() tab = '\n' + '\t' * 5 lg.info(f"Task done" f"{tab}Total objects: {self.syncCount[taskId][0]}" f"{tab}Seen: {self.syncCount[taskId][1]}" f"{tab}New: {self.syncCount[taskId][2]}" f"{tab}Differ: {self.syncCount[taskId][3]}" f"{tab}Deleted: {self.syncCount[taskId][4]}" f"{tab}Task errors: {self.syncCount[taskId][5]}" f"{tab}Export errors: {self.syncCount[taskId][6]}") if self.syncCount[taskId][5] != 0: lg.warning('Task done with some errors. Check logs') if self.syncCount[taskId][6] != 0: log.warning( 'Task had errors with sending documents. ' f'Documents that were not sent are saved in a folder {picker.factory.failPath}' ) self.check_point(taskId, 'complete') except Exception as e: if log.level == 10: e = traceback.format_exc() log.error(f"Fail with task {taskName}: {e}")
def proxy(target_url,req,res): ''' ''' if not target_url: return res.send(code = 500,content = 'Empty url not supported') #二进制资源直接重定向 parsed_url = urlparse(target_url) if utils.isBinary(parsed_url.path,strict = True): return res.redirect(target_url) if 'GET' == req.method: request = R.get elif 'POST' == req.method: request = R.post try: #通知远端服务器不要压缩 if req.headers.get('accept-encoding'): del req.headers['accept-encoding'] if req.headers.get('host'): del req.headers['host'] log.info('[proxy]requesting %s'%target_url) r = request(target_url,headers = req.headers) #本地服务器覆写Date和Server if r.headers.get('date'): del r.headers['date'] if r.headers.get('server'): del r.headers['server'] if r.headers.get('transfer-encoding'): del r.headers['transfer-encoding'] log.info('[proxy] status=%d'%r.status_code) return res.send(code = r.status_code,content = r.content or '',headers = r.headers) except Exception, e: log.error('[proxy]%s'%e) return res.send(code = 500,content = '%s'%e)
def send_notify(self, app: str, event: str, body: str) -> bool: try: data = json.dumps({"text": body}) headers = { "Content-type": "application/json", 'Content-Length': len(body) } res = requests.post(self.cfg['url'], data, headers, timeout=10, proxies=self.cfg['proxy']) if res.status_code != 200: raise Exception("Server return status %s" % res.status_code) log.info(f"Report sent") return True except Exception as e: log.error("Fail sent report by Slack %s" % e) return False
def load_config(self, config: configparser, proxy:dict = None) -> dict: self.cfg['proxy'] = proxy try: self.cfg["sendTo"] = config.get(self.name, "sendTo") self.cfg["server"] = config.get(self.name, "server") self.cfg["port"] = config.getint(self.name, "port") self.cfg["useSSL"] = config.getboolean(self.name, "useSSL") self.cfg["user"] = config.get(self.name, "user") self.cfg["password"] = config.get(self.name, "password") self.cfg["fromHeader"] = config.get(self.name, "fromHeader") log.info(f'Recipient mail address {self.cfg["sendTo"]}') except Exception as e: e = f"Bad {self.name} configuration: {e}" log.error(e) raise Exception(e) if re.findall(r'\w+@\w+.\w+', self.cfg["sendTo"]): log.debug(f'Recipient mail address: {self.cfg["sendTo"]}') else: log.error("Wrong email sendTo.") raise SystemExit(1) return self.cfg
def run(self): n = 1 crash = False # при краше нужно только обновлтяь статусы потоков while True: self.resources_usage() self.threads_names() # for i in get_threads(): # sout.print(f'{i} {i.isAlive()}', 'green') # print('------') for i in self.myThreads: if i in self.allThreads: # message(i+ ' Run',clrSun) self.myThreads[i] = True else: self.myThreads[i] = False # отмечает какие модули запустились self.isVerified = True for i in self.myThreads: if not self.myThreads[i]: self.isVerified = False if not crash: # ждёт запуска всех модулей if not self.started: if self.isVerified: if sys.argv[0].lower().endswith('.exe'): log.info( f"Lootnika started - Executable version: {__version__}_{platform}" ) else: log.info( f"Lootnika started - Source version: {__version__}_{platform}" ) log.info( f"Welcome to http://localhost:{cfg['rest']['port']}/admin" ) ds.execute("UPDATE lootnika SET self_status='working'") self.started = True self.rate = 2 # уже можно реже смотреть else: n += 1 if n == 20: # ограничение времени запуска crash = self.crash( 'One of the modules does not work correctly') elif n == 10: log.warning("detected slow Lootnika startup") # иначе следит за их работой else: if not self.isVerified and not self.exit: crash = self.crash( "One of the modules does not work correctly") time.sleep(self.rate)
def _deco(ch, method, properties, body): try: context.RequestContext() log.info("开始执行业务方法") func(ch, method, properties, body) except Exception as e: print(e) if conf.env == 'conf': mail_title = Utils.currentTime() + " " + sys.argv[0] + "执行异常,异常内容见邮件" Utils.sendMail(mail_title, traceback.format_exc(), [ '邮箱地址' ]) else: traceback.print_exc() log.info("业务方法异常") finally: log.info("队列消息确认消费") ch.basic_ack(delivery_tag=method.delivery_tag)
# ht = Thread(target=tray_icon, name='tray_icon') # ht.start() if cfg['tasks']['diskTask'] != {}: ht3 = Thread(target=disk_inspector, name='disk_inspector') ht3.start() if cfg['tasks']['logTask'] != {}: ht2 = Thread(target=log_inspector, name='log_inspector') ht2.start() if cfg['tasks']['jobList'] != {}: jobList = cfg['tasks']['jobList'] ht1 = Thread(target=process_inspector, name='process_inspector') ht1.start() log.info( f"AppWatch started. Version: {__version__}_{PLATFORM}. " f"Python version: {sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}" ) if 'run' in sys.argv: signal.signal(signal.SIGINT, shutdown_me) signal.signal(signal.SIGTERM, shutdown_me) if PLATFORM != 'nt': signal.signal(signal.SIGQUIT, shutdown_me) input() while True: input('Use Ctrl+C to stop me\n')
pass for i in os.listdir(f'{dataDir}docs'): shutil.copy2(f'{dataDir}docs/{i}', f'{homeDir}docs/{i}') elif 'help' in sys.argv: raise Exception('Show help') elif 'run' in sys.argv: from conf import log, logRest, console if devMode: print('\n!#RUNNING IN DEVELOPER MODE\n') log.setLevel(10) logRest.setLevel(10) console.setLevel(10) log.info(f"Starting...") import core elif 'make-doc' in sys.argv: from conf import log, logRest, console log.info(f"Compile documentation") sys.argv = sys.argv[:1] import sphinxbuilder sphinxbuilder.build() else: raise Exception('Show help') except Exception as e: e = traceback.format_exc() print(f'Fail to start main thread: {e}')
def __init__(self, name: str): log.info(f"lucky-slacky v{__version__}") self.name = name self.cfg = {} self.defaultCfg = {"url": "YOUR_WEBHOOK_URL_HERE"}
def _deco(*args, **kwargs): with timeutils.StopWatch() as w: func(*args, **kwargs) log.info("脚本运行结束,共计耗时 : {expense} 秒".format(expense=int(w.elapsed()))) sys.stdout.flush()
def reconnect(): """重连数据库 """ log.info("重连数据库...") ZKDASH_DB.connect()
def reconnect(): '''重连数据库 ''' log.info('重连数据库...') ZKDASH_DB.connect()
def process_inspector(): def get_pid(exe: str, exePath: str, workDir: str = None) -> int: # if give workDir, will check only it for p in psutil.process_iter(["name", 'exe', 'cwd']): # if 'calc1' in p.info['name']: # sout(f"{p.pid} | {p.info['name']} | {p.info['cwd']} | {p.info['exe']}", 'violet' ) if exe == p.info['name'].lower(): if workDir: if not p.info['cwd'].endswith('/'): p.info['cwd'] = f"{p.info['cwd']}/" if workDir.lower() == p.info['cwd'].replace('\\', '/', -1).lower(): return p.pid else: if PLATFORM == 'nt': exePath = f"{exePath}{exe}" else: exePath = exePath[:-1] if exePath.lower() == p.info['exe'].replace('\\', '/', -1).lower(): return p.pid def restart(job: dict, exePid: int = None, killRecursive: bool = False) -> str: data = "" status = 0 failList[taskName]['attemp'] += 1 if exePid: try: assert exePid != os.getpid(), "won't kill myself" parent = psutil.Process(exePid) children = parent.children(killRecursive) children.append(parent) # TODO try soft kill before hard for p in children: try: # p.send_signal(signal.SIGTERM) p.kill() except psutil.NoSuchProcess: pass _, alive = psutil.wait_procs(children, timeout=60) if alive: raise Exception( f"Fail to kill process {exe} (PID {exePid})") except Exception as e: data = f'Fail to restart process {exe}: {e}\n' log.error(data) status = 2 if status == 0: log.debug(f"Launch application {taskName}") whatStart = job['whatStart'] if whatStart == 'command': target = job['command'] elif whatStart == 'exe': target = f"{job['exePath']}{exe} {job['exeKey']}" else: target = None if target: log.info(f"Starting {taskName}") try: if PLATFORM == 'nt': os.system(f"start cmd /c {target}") else: os.system(f"command {target} &") except Exception as e: data = f"Fail to restart application: {exe} ({taskName}): {e}\n" status = 3 else: log.info(f"Starting service {job['service']}") try: if PLATFORM == 'nt': win32serviceutil.StartService(job['service']) else: os.system(f"systemctl start {job['service']}") except Exception as e: e = traceback.format_exc() log.error(str(e)) status = 3 data = f"Fail to start service: {job['service']} ({taskName}): {e}\n" # проверка что он снова не упал # TODO отсчёт времени падения после старта if status == 0: sleep(restartTime) if get_pid(exe, checkPath, workDir): data += 'Successfully restarted application' failList[taskName]['isAlive'] = False failList[taskName]['attemp'] -= 1 log.info(data) else: data += f'Fail to start {taskName}' log.error(data) else: log.error(data) new_toast(taskName, data) return data sleep(3) selfName = "process_inspector" failList = {} for job in jobList: failList[job] = {'isAlive': False, "attemp": 0} while True: try: for job in jobList.values(): taskName = job['task'] exe = job['exe'].lower() checkPath = job['checkPath'] exePath = job['exePath'] workDir = job['workDir'] doRestart = job['doRestart'] alwaysWork = job['alwaysWork'] restartTime = job['restartTime'] respTime = job['respTime'] status = 0 body = '' log.info(f'Check app {taskName}') exePid = get_pid(exe, checkPath, workDir) if exePid and not job['checkUrl']: log.debug(f"{taskName} is fine.") elif exePid and job['checkUrl']: log.debug(f"Found {taskName}. Check http status") try: res = requests.get(job['url'], timeout=respTime) if res.status_code != 200: raise Exception( f"Server return status {res.status_code}") log.debug(f"{taskName} is fine.") if not failList[taskName]['isAlive']: continue else: failList[taskName]['isAlive'] = False data = templater.tmpl_fill(selfName, 'alive') except Exception: status = 1 data = f"{taskName} didn't respond or return wrong answer. Trying to restart application\n" new_toast(f'Restarting {taskName}', data) log.warning(data) body = templater.tmpl_fill(selfName, "badAnswer").replace( "{{taskName}}", taskName, -1) failList[taskName]['isAlive'] = True if status != 0 and doRestart: data += restart(job, exePid) body += data if 'eventScript' in job: allowSend, body = execute_event_script( log, job['eventScript'], taskName, 'badAnswer', body) else: allowSend = True if allowSend: send_notify(taskName, 'badAnswer', body) elif not exePid and alwaysWork: body = templater.tmpl_fill(selfName, 'notFound').replace( "{{taskName}}", taskName, -1) data = f"Not found required application {taskName}. Trying to restart\n" log.warning(data) new_toast(f'Starting {taskName}', data) data += restart(job, exePid) body += data new_toast('log_inspector', 'notFound') if 'eventScript' in job: allowSend, body = execute_event_script( log, job['eventScript'], taskName, 'notFound', body) else: allowSend = True if allowSend: send_notify(taskName, 'notFound', body) sleep(intervalCheckMin) except Exception: e = traceback.format_exc() log.critical(str(e)) break
def build(self): ''' do build ''' tmbegin = time.time() self._check(); log.info ('copying directories') self._dir(); log.info('handling less...'); self._less(); log.info ('handling css...') self._css(); log.info ('handling javascript...') self._js(); log.info ('handling template...') self._tpl(); if self._generate_html: log.info ('handling html...') self._html(); log.info ('replacing all token') self._replace(); log.info ('Time cost %s s.' % (time.time()-tmbegin) )
def disk_inspector(): def fill_tmpl(event: str) -> str: body = templater.tmpl_fill(selfName, event) body = body.replace('{{critFree}}', str(critFree), -1) body = body.replace('{{diskFree}}', str(diskFree), -1) body = body.replace('{{diskUsage}}', diskUsage, -1) body = body.replace('{{taskName}}', taskName, -1) return body.replace('{{diskWarn}}', str(diskWarn), -1) log.debug("disk_inspector started") selfName = 'disk_inspector' while True: for taskName, task in cfg['tasks']['diskTask'].items(): critFree = task['critFree'] diskUsage = task['diskUsage'] diskWarn = task['diskWarn'] try: diskFree = round( shutil.disk_usage(diskUsage).free / 1073741824, 2) if diskFree < critFree: log.error( f"Free disk space is critically small on {diskUsage}: {diskFree}" ) event = 'critFree' body = fill_tmpl(event) new_toast( diskUsage, f"Free disk space is critically small: {diskFree}") if 'eventScript' in task: allowSend, body = execute_event_script( log, task['eventScript'], taskName, event, body) else: allowSend = True if allowSend: send_notify(taskName, event, body) elif diskFree < diskWarn: log.warning( f"Free disk space is ends {diskUsage}: {diskFree}GB") event = 'diskWarn' body = fill_tmpl(event) new_toast(diskUsage, f"Free disk space is ends: {diskFree}GB") if 'eventScript' in task: allowSend, body = execute_event_script( log, task['eventScript'], taskName, event, body) else: allowSend = True if allowSend: send_notify(taskName, event, body) elif diskFree > diskWarn: log.info(f"disk {diskUsage}: {diskFree}GB free") except FileNotFoundError: log.error(f'disk_inspector: wrong path: {diskUsage}') except Exception as e: log.critical(f'disk_inspector: {traceback.format_exc()}') shutdown_me(9, 9) sleep(intervalCheckMin)
def __init__(self, name: str): log.info(f"Discord connector v{__version__}") self.name = name self.cfg = {} self.defaultCfg = {"url": "YOUR_WEBHOOK_URL_HERE"}
def check_rst(ds: Datastore) -> dict: #TODO need refactoring log.debug("Check documentation sources") changed = False try: rows = ds.select('SELECT * FROM sphinxbuilder', ) oldRst = { 'lootnika': { 'path': "docs/rst/", 'type': 'lootnika', 'rst': {} } } for row in rows: if row[1] not in oldRst: oldRst[row[1]] = {'rst': {}} oldRst[row[1]]['rst'][row[3]] = {'file': row[2], 'hash': row[4]} newRst = { 'lootnika': { 'path': "docs/rst/", 'type': 'lootnika', 'rst': {} } } for exporter in os.listdir(f'{homeDir}exporters'): path = f"exporters/{exporter}/docs/rst/" ls = os.listdir(f"{homeDir}{path}") if ls == []: log.warning(f"No documentation sources found for {exporter}") continue if exporter not in oldRst: log.info(f"Found new exporter docs: {exporter}") oldRst[exporter] = { 'path': path, 'type': 'exporter', 'rst': {} } newRst[exporter] = {'path': path, 'type': 'exporter', 'rst': {}} for file in ls: rst = f"{path}{file}" with open(f"{homeDir}{rst}", encoding='utf-8', mode='r') as cnt: hsh = f"{cityhash.CityHash64(cnt.read())}" newRst[exporter]['rst'][rst] = {'file': file, 'hash': hsh} if rst in oldRst[exporter]['rst']: if not oldRst[exporter]['rst'][rst]['hash'] == hsh: changed = True else: changed = True for picker in os.listdir(f'{homeDir}pickers'): path = f"pickers/{picker}/docs/rst/" ls = os.listdir(f"{homeDir}{path}") if ls == []: log.warning(f"No documentation sources found for {picker}") continue if picker not in oldRst: log.info(f"Found new picker docs: {picker}") oldRst[picker] = {'path': path, 'type': 'exporter', 'rst': {}} newRst[picker] = {'path': path, 'type': 'picker', 'rst': {}} for file in ls: rst = f"{path}{file}" with open(f"{homeDir}{rst}", encoding='utf-8', mode='r') as cnt: hsh = f"{cityhash.CityHash64(cnt.read())}" newRst[picker]['rst'][rst] = {'file': file, 'hash': hsh} if rst in oldRst[picker]['rst']: if not oldRst[picker]['rst'][rst]['hash'] == hsh: changed = True else: changed = True exporter = "lootnika" path = newRst[exporter]['path'] ls = os.listdir(f"{homeDir}{path}") for file in ls: rst = f"{path}{file}" with open(f"{homeDir}{rst}", encoding='utf-8', mode='r') as cnt: hsh = f"{cityhash.CityHash64(cnt.read())}" newRst[exporter]['rst'][rst] = {'file': file, 'hash': hsh} if rst in oldRst[exporter]['rst']: if not oldRst[exporter]['rst'][rst]['hash'] == hsh: changed = True else: changed = True if changed: log.warning( "Found changes in documentations. Start me with <make-doc> key." ) return newRst except Exception as e: raise Exception( f"Fail check sources for help documentation: {traceback.format_exc()}" )
def restart(job: dict, exePid: int = None, killRecursive: bool = False) -> str: data = "" status = 0 failList[taskName]['attemp'] += 1 if exePid: try: assert exePid != os.getpid(), "won't kill myself" parent = psutil.Process(exePid) children = parent.children(killRecursive) children.append(parent) # TODO try soft kill before hard for p in children: try: # p.send_signal(signal.SIGTERM) p.kill() except psutil.NoSuchProcess: pass _, alive = psutil.wait_procs(children, timeout=60) if alive: raise Exception( f"Fail to kill process {exe} (PID {exePid})") except Exception as e: data = f'Fail to restart process {exe}: {e}\n' log.error(data) status = 2 if status == 0: log.debug(f"Launch application {taskName}") whatStart = job['whatStart'] if whatStart == 'command': target = job['command'] elif whatStart == 'exe': target = f"{job['exePath']}{exe} {job['exeKey']}" else: target = None if target: log.info(f"Starting {taskName}") try: if PLATFORM == 'nt': os.system(f"start cmd /c {target}") else: os.system(f"command {target} &") except Exception as e: data = f"Fail to restart application: {exe} ({taskName}): {e}\n" status = 3 else: log.info(f"Starting service {job['service']}") try: if PLATFORM == 'nt': win32serviceutil.StartService(job['service']) else: os.system(f"systemctl start {job['service']}") except Exception as e: e = traceback.format_exc() log.error(str(e)) status = 3 data = f"Fail to start service: {job['service']} ({taskName}): {e}\n" # проверка что он снова не упал # TODO отсчёт времени падения после старта if status == 0: sleep(restartTime) if get_pid(exe, checkPath, workDir): data += 'Successfully restarted application' failList[taskName]['isAlive'] = False failList[taskName]['attemp'] -= 1 log.info(data) else: data += f'Fail to start {taskName}' log.error(data) else: log.error(data) new_toast(taskName, data) return data
# -*- coding: utf-8 -*- from app import create_app from conf import SERVER, log application = create_app() if __name__ == '__main__': log.info("start_i_manager_server") application.run(host=SERVER.get('host', '0.0.0.0'), port=SERVER.get('port', 8919), debug=SERVER.get('debug', True))