def is_terminated(self) -> bool: if scheduler.status == 'pause': self.log.info('Task paused') scheduler.check_point(self.taskId, 'pause') while scheduler.status == 'pause': time.sleep(1) # pause может сменить stop if scheduler.status == 'cancel': self.log.warning('Task is interrupted by the user') scheduler.check_point(self.taskId, self.taskName, 'cancel') return True else: self.log.info('Task resumed') return False elif scheduler.status == 'work': return False elif scheduler.status == 'cancel': self.log.warning('Task is interrupted by the user') scheduler.check_point(self.taskId, 'cancel') return True else: self.log.warning('Task refused. Sending collected changes') scheduler.check_point(self.taskId, 'cancel') self.factory.put('--send--') self.factory.put('--stop--') self.factory.join() return True
def delay_calc(taskStartTime): startTime = dtime.datetime.now() if taskStartTime.lower() != 'now': now = dtime.datetime.now() now = now.hour * 3600 + now.minute * 60 + now.second try: nextStart = dtime.datetime.strptime(taskStartTime, '%H:%M:%S') nextStart = nextStart.hour * 3600 + nextStart.minute * 60 + nextStart.second if now > nextStart: delay = 86400 - now + nextStart # сегодня = что прошло+время завтра до старта startTime += dtime.timedelta(seconds=delay) if onStart: log.info(f"Tasks will start at {taskStartTime}") else: delay = nextStart - now startTime += dtime.timedelta(seconds=delay) if onStart: log.info(f"Tasks will start today at {taskStartTime}") except Exception as e: log.error( f'Check parameter taskStartTime: {e}. Correct format used HH:MM:SS' ) time.sleep(2) shutdown_me(1, '') return startTime
def run(self): while True: try: doc = self.docs.get() if doc == '--stop--': self.log.debug("Stopping Factory thread") while self.status != 'work': time.sleep(1) break elif doc == '--send--': if self.parcelSize == 0: continue self.send() self.parcelSize = 0 else: self.parcelSize += 1 doc.raw['exporter'] = self.exporter.type doc.raw['format'] = self.converter.type self.converter.add(doc) if self.parcelSize >= self.batchSize: self.send() self.parcelSize = 0 except Exception as e: self.syncCount[6] += 1 if self.log.level == 10: e = traceback.format_exc() self.log.error(f"Factory: {e}") finally: self.docs.task_done() self.status = 'work'
def create_logger( config: configparser.RawConfigParser ) -> (logging.Logger, logging.Logger, logging.StreamHandler): level = logging.INFO logSize = 10240 logCount = 5 try: if not config.getboolean("logging", "enable"): level = 0 else: logLevel = config.get("logging", "loglevel").lower() if logLevel == "full": level = logging.DEBUG logSize = config.getint("logging", "logmaxsizekbs") logCount = config.getint("logging", "logmaxfiles") except Exception as e: print("WARNING: Check parameters for Logging.", str(e)) time.sleep(3) raise SystemExit(1) log_formatter = logging.Formatter( '%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') my_handler = RotatingFileHandler(f"{homeDir}logs/lootnika.log", maxBytes=logSize * 1024, backupCount=logCount, encoding='utf-8') my_handler.setFormatter(log_formatter) my_handler2 = RotatingFileHandler(f"{homeDir}logs/rest.log", maxBytes=logSize * 1024, backupCount=logCount, encoding='utf-8') my_handler2.setFormatter(log_formatter) console = logging.StreamHandler( stream=sys.stdout) # вывод в основной поток console.setFormatter(log_formatter) console.setLevel(level) # logging.getLogger('root').addHandler(console) log = logging.getLogger('Lootnika') log.addHandler(my_handler) log.setLevel(level) log.addHandler(console) logRest = logging.getLogger('RestServer') logRest.addHandler(my_handler2) logRest.setLevel(level) logRest.addHandler(console) # disable requests logging logging.getLogger('urllib3.connectionpool').setLevel(logging.CRITICAL) return log, logRest, console
def run(self): n = 1 crash = False # при краше нужно только обновлтяь статусы потоков while True: self.resources_usage() self.threads_names() # for i in get_threads(): # sout.print(f'{i} {i.isAlive()}', 'green') # print('------') for i in self.myThreads: if i in self.allThreads: # message(i+ ' Run',clrSun) self.myThreads[i] = True else: self.myThreads[i] = False # отмечает какие модули запустились self.isVerified = True for i in self.myThreads: if not self.myThreads[i]: self.isVerified = False if not crash: # ждёт запуска всех модулей if not self.started: if self.isVerified: if sys.argv[0].lower().endswith('.exe'): log.info( f"Lootnika started - Executable version: {__version__}_{platform}" ) else: log.info( f"Lootnika started - Source version: {__version__}_{platform}" ) log.info( f"Welcome to http://localhost:{cfg['rest']['port']}/admin" ) ds.execute("UPDATE lootnika SET self_status='working'") self.started = True self.rate = 2 # уже можно реже смотреть else: n += 1 if n == 20: # ограничение времени запуска crash = self.crash( 'One of the modules does not work correctly') elif n == 10: log.warning("detected slow Lootnika startup") # иначе следит за их работой else: if not self.isVerified and not self.exit: crash = self.crash( "One of the modules does not work correctly") time.sleep(self.rate)
def shutdown_me(signum=1, frame=1): """ Останавливает модули в нужном порядке """ log.warning( f'Lootnika stopping on {cfg["rest"]["host"]}:{cfg["rest"]["port"]}') if selfControl.exit: return selfControl.exit = True selfControl.rate = 0.3 n = 0 try: while True: time.sleep(0.3) if not bool(selfControl.myThreads): break if selfControl.myThreads['RestServer']: if n < 1: log.debug("Stopping REST server") try: if cfg["rest"]["host"] in ['::1', '0.0.0.0']: host = '127.0.0.1' else: host = cfg["rest"]["host"] cnx = httpClient.HTTPConnection(host, cfg["rest"]["port"], timeout=12) cnx.request(method="GET", url='/a=stop?stop') cnx.getresponse() except Exception: pass n = 1 continue elif selfControl.myThreads['Scheduler']: if n < 2: log.debug("Stopping Scheduler thread") scheduler.cmd = 'stop' n = 2 elif selfControl.myThreads['Datastore']: if n < 3: log.debug("Stopping Datastore thread") ds.close() n = 3 else: break except Exception as e: log.error(f'Shutdown failed: {traceback.format_exc()}') finally: selfControl.stop = True log.info("Lootnika stopped") if not stillWork: os._exit(1)
def get_svc_params() -> list: try: return [ config.get("service", "Name"), config.get("service", "DisplayName"), config.get("service", "Description") ] except Exception as e: e = f"incorrect parameters in [Service]: {e}" if 'log' in locals(): log.error(e) else: print(e) time.sleep(3) raise SystemExit(1)
def run(self, Picker): log.debug("Starting Scheduler thread") self.Picker = Picker while not selfControl.started and self.cmd != 'stop': time.sleep(0.2) while self.cmd != 'stop': self._get_workers() # print('%s >= %s is %s' %(dtime.datetime.now(), self.startTime, self._isTaskTime())) # message(('self.status', self.status), clrSun) if self.status == 'ready': # если расписание не включено или всё выполнилось, # то переходит в ждущий режим if self.taskCycles > 0: self.status = 'wait' if self._isTaskTime(): ht = Thread(name='work_manager', target=self._work_manager) ht.start() self.workers.append(ht) # все последующие повторы отсчитываются от первого else: if self.taskCycles > 0: if self._isTaskTime(): ht = Thread(name='work_manager', target=self._work_manager) ht.start() self.workers.append(ht) elif self.taskCycles == 0: # -1 означает выкл. расписание if self.status == 'ready': log.info('Tasks cycle done') # print('!#cycle Status', self.status) time.sleep(1) # при выходе из цикла ждёт завершения работы рабочих и отменяет таймеры self.status = 'stop' self._get_workers() for ht in self.workers: # message(ht,clrSun) ht.join() log.debug("Stopped Scheduler thread") return
def open_config() -> configparser.RawConfigParser: try: open(f"{homeDir}{cfgFileName}", encoding='utf-8') except IOError: open(f"{homeDir}{cfgFileName}", 'tw', encoding='utf-8') config = configparser.RawConfigParser(comment_prefixes=(['//', '#', ';']), allow_no_value=True) config = lowcase_sections(config) try: config.read(f"{homeDir}{cfgFileName}") except Exception as e: print(f"Fail to read configuration file: {e}") time.sleep(3) raise SystemExit(1) return config
def verify_diskUsage(): try: cfg['diskUsage']['critFreeGb'] = config.getint( "diskusage", "critFreeGb") cfg['diskUsage']['pathWatch'] = config.get("diskusage", "pathWatch") if ":" not in cfg['diskUsage']['pathWatch']: cfg['diskUsage'][ 'pathWatch'] = f"{homeDir}{cfg['diskUsage']['pathWatch']}" if not os.path.exists(cfg['diskUsage']['pathWatch']): raise Exception('wrong directory pathWatch.') except Exception as e: log.error(f"incorrect parameters in [DiskUsage]: {e}") time.sleep(3) raise SystemExit(1)
def verify_scheduler(): tmp = cfg['schedule'] try: tmp["startTask"] = config.getboolean("schedule", "enable") tmp["taskStartTime"] = config.get("schedule", "taskStartTime") if tmp["taskStartTime"].lower() != 'now': try: dtime.datetime.strptime(tmp["taskStartTime"], '%H:%M:%S') except: raise Exception( 'incorrect parameter taskStartTime. Use <HH:MM:SS> or <Now>' ) tmp["taskCycles"] = config.getint("schedule", "taskCycles") if tmp["taskCycles"] == -1: tmp["taskCycles"] = float('Inf') elif tmp["taskCycles"] < -1: raise Exception('incorrect parameter taskCycles') tmp["repeatMin"] = config.getint("schedule", "repeatMin") if tmp["repeatMin"] < 1: raise Exception('incorrect parameter repeatMin') tmp["taskCount"] = config.getint("schedule", "taskCount") if tmp["taskCount"] < 0: raise Exception('incorrect parameter taskCount') elif tmp["taskCount"] == 0: tmp['tasks'] = None else: tmp['tasks'] = {} # TODO start=1 for n in range(tmp["taskCount"]): tmp['tasks'][config.get("schedule", str(n)).lower()] = {} if tmp["startTask"]: if tmp['tasks'] is None: raise Exception( 'Schedule is enabled, but no one task is active') elif tmp["taskCycles"] == 0: raise Exception('Schedule is enabled, but taskCycles = 0') except Exception as e: log.error(f"incorrect parameters in [Schedule]: {e}") time.sleep(3) raise SystemExit(1)
def check_base_sections(config: configparser.RawConfigParser) -> None: edited = False try: for k in ['server', 'service', 'logging', 'diskusage', 'schedule']: if not config.has_section(k): print(f"ERROR: no section {k}") edited = write_section(k, default[k]) if k == 'schedule': write_section('export', default['export']) if edited: print( "WARNING: created new sections in config file. Restart me to apply them" ) time.sleep(3) raise SystemExit(1) except Exception as e: print(f"ERROR: Fail to create configuration file: {e}") time.sleep(3) raise SystemExit(1)
def verify_rest(): try: cfg['rest']['host'] = config.get("server", "host") cfg['rest']['port'] = config.getint("server", "port") # field actions is used for GetStatus action, so they filling in restserver acl = { 'admin': { 'users': 'AdminClients', 'actions': '' }, 'query': { 'users': 'QueryClients', 'actions': '' } } for role in acl: val = (config.get('server', acl[role]['users'])).strip() if '*' in val: acl[role]['users'] = '*' elif val == '' or val == ';': raise Exception(f"Not set {acl[role]['users']}") elif ';' not in val: raise Exception( f"No delimiter <;> in {acl[role]['users']}") else: acl[role]['users'] = [ ip.strip() for ip in val.split(';') if ip != '' ] acl[role]['users'].extend( ['::1', 'localhost', '127.0.0.1', cfg['rest']['host']]) acl[role]['users'] = set(acl[role]['users']) cfg['rest']['acl'] = acl except Exception as e: log.error(f"incorrect parameters in [Server]: {e}") time.sleep(3) raise SystemExit(1)
if __name__ != "__main__": log.debug("Starting main thread") selfControl = SelfControl() ds = Datastore(f'{homeDir}lootnika_tasks_journal.db') sphinxbuilder.check_rst(ds) from scheduler import Scheduler, first_start_calc startTime, taskCycles, repeatMin = first_start_calc(cfg['schedule']) # Scheduler и Picker должны видеть друг друга scheduler = Scheduler(cfg['schedule']['tasks'], taskCycles, repeatMin, startTime) Picker = load_picker() Thread(name='Scheduler', target=scheduler.run, args=(Picker, )).start() import restserv Thread( name='RestServer', target=restserv.start_me, ).start() if 'run' in sys.argv: # захват клавиатуры возможен лишь из консоли signal.signal(signal.SIGTERM, shutdown_me) signal.signal(signal.SIGINT, shutdown_me) while not selfControl.exit: time.sleep(1)
def verify_tasks() -> set: """ Any Pickers can have options: - exporter - overwritetaskstore :return: set of tasks exporters """ try: module = __import__(f'pickers.{pickerType}.conf', globals=globals(), locals=locals(), fromlist=['load_config', 'defaultCfg']) load_config = getattr(module, 'load_config') defaultCfg = getattr(module, 'defaultCfg') except ModuleNotFoundError as e: log.fatal( f"No picker {pickerType}. Check if a module exists in directory pickers" ) raise SystemExit(1) except AttributeError as e: log.fatal(f'Wrong picker: {e}') raise SystemExit(1) except Exception as e: log.fatal(f'Fail load picker: {e}') raise SystemExit(1) if cfg['schedule']['tasks'] is None: log.warning("Lootnika have no tasks") return set() exports = [] for taskName in cfg['schedule']['tasks']: if not config.has_section(taskName): log.warning(f'Not found task section {taskName}') if write_section(taskName, defaultCfg): log.error( "created new sections in config file. Restart me to apply them" ) time.sleep(3) raise SystemExit(1) try: task = load_config(taskName, config) if config.has_option(taskName, 'overwriteTaskstore'): task['overwriteTaskstore'] = config.getboolean( taskName, 'overwriteTaskstore') else: task['overwriteTaskstore'] = False # TODO реализовать if config.has_option(taskName, 'exporter'): task['exporter'] = config.get(taskName, 'exporter') else: log.warning(f"Task {taskName} use default exporter=export") task['exporter'] = "export" exports.append(task['exporter']) cfg['schedule']['tasks'][taskName] = task except Exception as e: log.error(f"incorrect parameters in [{taskName}]: {e}") time.sleep(3) raise SystemExit(1) return set(exports)
) raise SystemExit(1) except AttributeError as e: log.fatal(f'Wrong exporter: {e}') raise SystemExit(1) except Exception as e: log.fatal(f'Fail load exporter: {e}') raise SystemExit(1) if __name__ != '__main__': try: create_dirs([ f"{homeDir}{'logs'}", f"{homeDir}{'temp'}", f"{homeDir}{'temp'}", f"{homeDir}{'sphinx-doc'}", ]) except Exception as e: print(e) time.sleep(3) raise SystemExit(-1) config = open_config() check_base_sections(config) log, logRest, console = create_logger(config) cfg, exporters = verify_config(config, log) for i in exporters: cfg = load_exporter(i)