def __init__(self): self.current_loop = None self.connection = None self.comport_state = None self.config = None self.db_log = None self.run = True # List of current worker tasks; we use it for tasks per worker limitation self.TASKS = TasksList() # List for temporary storage completed task for clenup self.COMPLETED_TASKS = TasksList()
class BaseWorker(metaclass=ABCMeta): def __init__(self): self.current_loop = None self.connection = None self.comport_state = None self.config = None self.db_log = None self.run = True # List of current worker tasks; we use it for tasks per worker limitation self.TASKS = TasksList() # List for temporary storage completed task for clenup self.COMPLETED_TASKS = TasksList() @asyncio.coroutine def bootstrap(self): log.info("Running worker loop") self.connection = yield from asyncio_redis.Pool.create(host='localhost', port=6379, encoder=asyncio_redis.encoders.BytesEncoder(), poolsize=3) self.config = ConfigStorage() self.comport_state = ComPortState() self.db_log = LoggingStorage() def start(self, loop): self.current_loop = loop loop.add_signal_handler(signal.SIGINT, partial(self.stop, 'SIGINT')) loop.add_signal_handler(signal.SIGTERM, partial(self.stop, 'SIGTERM')) asyncio.Task(self.loop()) def stop(self, sig): log.info("Got {} signal, we should finish all tasks and stop daemon".format(sig)) self.run = False self.current_loop.stop() @asyncio.coroutine def loop(self): """ Main event loop of worker :param loop: current event loop :return: None """ yield from self.bootstrap() # Inside a while loop, wait for incoming events. while self.run: # Limit tasks per worker, wait for task complete, do not fetch new if len(self.TASKS) > SETTINGS.WORKER_TASKS_LIMIT: log.debug("Too much tasks in local queue, wait for complete, timeout {}s".format(SETTINGS.WORKER_TASK_TIMEOUT)) try: done, pending = yield from asyncio.wait(self.TASKS, return_when=asyncio.FIRST_COMPLETED, timeout=SETTINGS.WORKER_TASK_TIMEOUT) continue except GeneratorExit: break # Pop new task from dispatched queue try: raw_task = yield from self._pop_task() except GeneratorExit: break if not raw_task: continue # Deserialize task = yield from self._deserialize_task(raw_task) if not task: continue # Set new status task = yield from self._move_to_inprogress(task) # Run task try: task_future = yield from self._run_task(task) except Exception as ex: log.error(ex, exc_info=True) pass # When finished, close the connection. self.current_loop.stop() self.connection.close() log.info('Bye-bye!') @asyncio.coroutine def _pop_task(self): # Blocking pop task from dispatched list (and atomic push to inprogress list) try: raw_task = yield from self.connection.brpoplpush(SETTINGS.DISPATCHED_QUEUE, SETTINGS.INPROGRESS_QUEUE, SETTINGS.WORKER_BPOP_TIMEOUT) log.debug("Got new tasks from queue {}".format(raw_task)) return raw_task except asyncio_redis.TimeoutError: # No new tasks? Sleep for a while yield from asyncio.sleep(SETTINGS.WORKER_PULL_SLEEP) return except Exception: log.error('Unexpected error', exc_info=True) @asyncio.coroutine def _deserialize_task(self, raw_task): try: task_obj = yield from self.connection.get(SETTINGS.TASK_STORAGE_KEY.format(raw_task.decode('utf-8')).encode('utf-8')) if not task_obj: raise TypeError() task = Task.deserialize(task_obj) log.info("Got new task id={}, type={}, status={}".format(task.id, task.type, task.status)) if not task.status == Task.SCHEDULED: log.error("Wrong status={} for task id={}, type={}; Should be SCHEDULED".format(task.status, task.id, task.type)) return return task except TypeError as ex: log.error("Wrong task_id {}".format(raw_task)) return except (pickle.UnpicklingError, EOFError, TypeError, ImportError): yield from self.connection.lrem(SETTINGS.INPROGRESS_QUEUE, value=raw_task) log.error("Wrong message in queue", exc_info=True) return @asyncio.coroutine def _move_to_inprogress(self, task): """ move_to_inprogress -- Change task status to 'in progress' Store task in sorted set with TTL :param task: `sensors.models.Task` instance """ ttl = task.ttl or SETTINGS.WORKER_TASK_TIMEOUT expires_at = datetime.datetime.now() + datetime.timedelta(seconds=ttl) new_task = task._replace(status=Task.INPROGRESS) # XXX may be transaction here? yield from self.connection.zadd(SETTINGS.INPROGRESS_TASKS_SET, {task.bid(): datetime_to_timestamp(expires_at)}) yield from self.connection.set(SETTINGS.TASK_STORAGE_KEY.format(new_task.id).encode('utf-8'), new_task.serialize(), expire=SETTINGS.TASK_STORAGE_EXPIRE) return new_task @asyncio.coroutine def _throttle(self, task): try: freq = int(task.kwargs.get('execution_limit', 0)) except: freq = 0 if freq == 0: return False, 0 key = SETTINGS.WORKER_THROTTLE_LOCKS.format(task.name).encode('utf-8') ttl = yield from self.connection.ttl(key) if ttl <= 0: # No throttle, no key yield from self.connection.set(key, b'', expire=freq) return False, 0 else: # Throttled return True, ttl @asyncio.coroutine def _run_task(self, task): """ run_task -- Runs tasks :param task: `sensors.models.Task` instance """ log.debug("_run_task {}".format(task.id)) if task.type == Task.TYPE_TRIGGERED: is_throttled, expiration = yield from self._throttle(task) if is_throttled: task_future = asyncio.Future() self.TASKS.append(task_future) task_future.set_result(None) log.info("Task id={}, name={} throttled for {} seconds.".format(task.id, task.name, expiration)) self.db_log.info("Сценарий не был запущен из-за превышения частоты запуска", None, 'action', task.id) self._mark_as_completed_callback(task, task_future) return # Create future to store Process pid = asyncio.Future() stopper = asyncio.Future() ttl = task.ttl or SETTINGS.WORKER_TASK_TIMEOUT # Call runner, pass pid there (need to fill in process!) task_future = asyncio.Task(self.runner(task, _pid=pid, stopper=stopper, ttl=ttl, kwargs=task.kwargs)) # Run expire timer task, pass pid there for ability to kill process expire_timer_future = asyncio.Task(self._expire_timer_task(task=task, task_future=task_future, stopper=stopper, _pid=pid, timeout=ttl)) # Store task in local worker cache list (need to limit tasks per worker) self.TASKS.append(task_future) # Add callbacks for completed tasks cleanup and remove expire timer task_future.add_done_callback(partial(self._mark_as_completed_callback, task)) task_future.add_done_callback(partial(self._remove_expire_timer_callback, task, expire_timer_future)) ### TASKS @abstractmethod @asyncio.coroutine def runner(self, task, _pid, stopper, ttl, kwargs): """ runner -- Abstarct method, you should implement it in concrete Worker class. Runner should be a coroutine. System should run it as Task. Runner should accept only keyword arguments. Runner should accept `_pid` argument (Future of Process) and set_result for the _pid variable -- asyncio.subprocess.Process instance. Runner should accept kwargs argument. """ pass @abstractmethod @asyncio.coroutine def _store_results(self, task, task_results): """ _store_results -- Abstarct method, you should implement it in concrete Worker class. Runner should be a coroutine. """ pass @asyncio.coroutine def _cleanup_task(self, task): """ clenaup_task -- Task for cleanup completed tasks from redis queue. :param task: `sensors.models.Task` instance :return: None """ log.debug("_cleanup_task task_id={}".format(task.id)) # Remove task from inprogress queue # XXX may be transaction here? cnt1 = yield from self.connection.lrem(SETTINGS.INPROGRESS_QUEUE, value=task.bid()) # Remove task from sorted set cnt2 = yield from self.connection.zrem(SETTINGS.INPROGRESS_TASKS_SET, [task.bid()]) # Update scheduler information # Store next_run in scheduled task_scheduler_obj = yield from self.connection.hget(SETTINGS.SCHEDULER_HISTORY_HASH, task.name.encode('utf-8')) try: task_scheduler = SchedulerTaskHistory.deserialize(task_scheduler_obj) except (pickle.UnpicklingError, EOFError, TypeError, ImportError): task_scheduler = None if task_scheduler and task_scheduler.scheduled_task_id == task.id: #if task.status == Task.SUCCESSFUL: # # Update last_run only on success # last_run = datetime_to_timestamp(task.run_at) #else: # # If task failed, do not update last_run (last_run is about SUCCESSFUL task exectuion) # last_run = task_scheduler.last_run last_run = datetime_to_timestamp(task.run_at) task_scheduler = task_scheduler._replace(last_run=last_run, next_run=0, scheduled_task_id=None) yield from self.connection.hset(SETTINGS.SCHEDULER_HISTORY_HASH, task.name.encode('utf-8'), task_scheduler.serialize()) # Publish message about finish yield from self.connection.publish(SETTINGS.TASK_CHANNEL.format(task.id).encode('utf-8'), task.status.encode('utf-8')) log.debug('Publish message about task {} to {}'.format(task.id, SETTINGS.TASK_CHANNEL.format(task.id))) log.debug("_cleanup_task lrem result {}".format(cnt1)) log.debug("_cleanup_task zrem result {}".format(cnt2)) # Ping scheduler yield from self._ping_scheduler(task) @asyncio.coroutine def _ping_scheduler(self, task): # Publish message about new finished task if task.type == Task.TYPE_REGULAR: yield from self.connection.publish(SETTINGS.WORKER_TO_SCHEDULER_CHANNEL, b'') @asyncio.coroutine def _expire_timer_task(self, task, task_future, _pid, stopper, timeout): """ expire_timer -- Task for check timeouted processes and kill it. :param task: `sensors.models.Task` instance :param task_future: `asyncio.Future instance` -- (Future of) runned task we should cancel() after `timeout` :param _pid: `asyncio.Future instance` -- (Future of) instance of asyncio.subprocess.Process -- proces we should kill() after `timeout` :param timeout: int, timeout in seconds :return: None """ log.debug("Run expire timer for task {}".format(task.id)) yield from asyncio.sleep(timeout) try: stopper.set_result(True) task_future.cancel() killers = _pid.result() for killer in killers: try: killer() except ProcessLookupError: pass except: log.error("What is this? I try to kill my action process", exc_info=True) except: log.error("Unexpected error in _expire_timer", exc_info=True) log.debug('EXPIRE TIMER for task {}, killing process {}...'.format(task.id, _pid)) self.db_log.error("Сценарий был остановлен по превышению лимита времени исполнения", None, 'action', task.kwargs.get('_id')) ### CALLBACKS def _mark_as_completed_callback(self, task, task_future): # Store results, change status, remove task from local list log.debug('Mark as completed callback is here!') try: if task_future.result() is not None: asyncio.Task(self._store_results(task, task_future.result())) new_task = task._replace(status=Task.SUCCESSFUL) else: new_task = task._replace(status=Task.FAILED) except asyncio.CancelledError: new_task = task._replace(status=Task.FAILED) finally: del task log.info("Finish task id={}, status={}".format(new_task.id, new_task.status)) log.debug("Update task status as COMPLETED <id={}> status={}".format(new_task.id, new_task.status)) asyncio.Task(self.connection.set(SETTINGS.TASK_STORAGE_KEY.format(new_task.id).encode('utf-8'), new_task.serialize(), expire=SETTINGS.TASK_STORAGE_EXPIRE)) asyncio.Task(self._cleanup_task(new_task)) # Callback вызывается thread-safe — можно выпилить локи и юзать просто лист self.TASKS.remove(task_future) def _remove_expire_timer_callback(self, task, expire_timer_future, task_future): # Remove exire time checker if task is successfully completed log.debug("Cancel expire timer Task for task {}".format(task.id)) expire_timer_future.cancel() ## Helpers @staticmethod def _get_esc_char(matchobj): char = matchobj.group(1).upper() if ord(char) in range(65, 96): code = ord(char) - 64 return chr(code) return matchobj.group(0) @classmethod def _parse_command(cls, command): # Convert command like <CTRL+X> to escape character tmp = command tmp = PARSE_COMMAND_RE.sub(cls._get_esc_char, tmp) tmp = PARSE_COMMAND_NEWLINE_RE.sub(lambda x: '\r', tmp) return tmp
class BaseWorker(metaclass=ABCMeta): def __init__(self): self.current_loop = None self.connection = None self.comport_state = None self.config = None self.db_log = None self.run = True # List of current worker tasks; we use it for tasks per worker limitation self.TASKS = TasksList() # List for temporary storage completed task for clenup self.COMPLETED_TASKS = TasksList() @asyncio.coroutine def bootstrap(self): log.info("Running worker loop") self.connection = yield from asyncio_redis.Pool.create( host='localhost', port=6379, encoder=asyncio_redis.encoders.BytesEncoder(), poolsize=3) self.config = ConfigStorage() self.comport_state = ComPortState() self.db_log = LoggingStorage() def start(self, loop): self.current_loop = loop loop.add_signal_handler(signal.SIGINT, partial(self.stop, 'SIGINT')) loop.add_signal_handler(signal.SIGTERM, partial(self.stop, 'SIGTERM')) asyncio.Task(self.loop()) def stop(self, sig): log.info( "Got {} signal, we should finish all tasks and stop daemon".format( sig)) self.run = False self.current_loop.stop() @asyncio.coroutine def loop(self): """ Main event loop of worker :param loop: current event loop :return: None """ yield from self.bootstrap() # Inside a while loop, wait for incoming events. while self.run: # Limit tasks per worker, wait for task complete, do not fetch new if len(self.TASKS) > SETTINGS.WORKER_TASKS_LIMIT: log.debug( "Too much tasks in local queue, wait for complete, timeout {}s" .format(SETTINGS.WORKER_TASK_TIMEOUT)) try: done, pending = yield from asyncio.wait( self.TASKS, return_when=asyncio.FIRST_COMPLETED, timeout=SETTINGS.WORKER_TASK_TIMEOUT) continue except GeneratorExit: break # Pop new task from dispatched queue try: raw_task = yield from self._pop_task() except GeneratorExit: break if not raw_task: continue # Deserialize task = yield from self._deserialize_task(raw_task) if not task: continue # Set new status task = yield from self._move_to_inprogress(task) # Run task try: task_future = yield from self._run_task(task) except Exception as ex: log.error(ex, exc_info=True) pass # When finished, close the connection. self.current_loop.stop() self.connection.close() log.info('Bye-bye!') @asyncio.coroutine def _pop_task(self): # Blocking pop task from dispatched list (and atomic push to inprogress list) try: raw_task = yield from self.connection.brpoplpush( SETTINGS.DISPATCHED_QUEUE, SETTINGS.INPROGRESS_QUEUE, SETTINGS.WORKER_BPOP_TIMEOUT) log.debug("Got new tasks from queue {}".format(raw_task)) return raw_task except asyncio_redis.TimeoutError: # No new tasks? Sleep for a while yield from asyncio.sleep(SETTINGS.WORKER_PULL_SLEEP) return except Exception: log.error('Unexpected error', exc_info=True) @asyncio.coroutine def _deserialize_task(self, raw_task): try: task_obj = yield from self.connection.get( SETTINGS.TASK_STORAGE_KEY.format( raw_task.decode('utf-8')).encode('utf-8')) if not task_obj: raise TypeError() task = Task.deserialize(task_obj) log.info("Got new task id={}, type={}, status={}".format( task.id, task.type, task.status)) if not task.status == Task.SCHEDULED: log.error( "Wrong status={} for task id={}, type={}; Should be SCHEDULED" .format(task.status, task.id, task.type)) return return task except TypeError as ex: log.error("Wrong task_id {}".format(raw_task)) return except (pickle.UnpicklingError, EOFError, TypeError, ImportError): yield from self.connection.lrem(SETTINGS.INPROGRESS_QUEUE, value=raw_task) log.error("Wrong message in queue", exc_info=True) return @asyncio.coroutine def _move_to_inprogress(self, task): """ move_to_inprogress -- Change task status to 'in progress' Store task in sorted set with TTL :param task: `sensors.models.Task` instance """ ttl = task.ttl or SETTINGS.WORKER_TASK_TIMEOUT expires_at = datetime.datetime.now() + datetime.timedelta(seconds=ttl) new_task = task._replace(status=Task.INPROGRESS) # XXX may be transaction here? yield from self.connection.zadd( SETTINGS.INPROGRESS_TASKS_SET, {task.bid(): datetime_to_timestamp(expires_at)}) yield from self.connection.set(SETTINGS.TASK_STORAGE_KEY.format( new_task.id).encode('utf-8'), new_task.serialize(), expire=SETTINGS.TASK_STORAGE_EXPIRE) return new_task @asyncio.coroutine def _throttle(self, task): try: freq = int(task.kwargs.get('execution_limit', 0)) except: freq = 0 if freq == 0: return False, 0 key = SETTINGS.WORKER_THROTTLE_LOCKS.format(task.name).encode('utf-8') ttl = yield from self.connection.ttl(key) if ttl <= 0: # No throttle, no key yield from self.connection.set(key, b'', expire=freq) return False, 0 else: # Throttled return True, ttl @asyncio.coroutine def _run_task(self, task): """ run_task -- Runs tasks :param task: `sensors.models.Task` instance """ log.debug("_run_task {}".format(task.id)) if task.type == Task.TYPE_TRIGGERED: is_throttled, expiration = yield from self._throttle(task) if is_throttled: task_future = asyncio.Future() self.TASKS.append(task_future) task_future.set_result(None) log.info( "Task id={}, name={} throttled for {} seconds.".format( task.id, task.name, expiration)) self.db_log.info( "Сценарий не был запущен из-за превышения частоты запуска", None, 'action', task.id) self._mark_as_completed_callback(task, task_future) return # Create future to store Process pid = asyncio.Future() stopper = asyncio.Future() ttl = task.ttl or SETTINGS.WORKER_TASK_TIMEOUT # Call runner, pass pid there (need to fill in process!) task_future = asyncio.Task( self.runner(task, _pid=pid, stopper=stopper, ttl=ttl, kwargs=task.kwargs)) # Run expire timer task, pass pid there for ability to kill process expire_timer_future = asyncio.Task( self._expire_timer_task(task=task, task_future=task_future, stopper=stopper, _pid=pid, timeout=ttl)) # Store task in local worker cache list (need to limit tasks per worker) self.TASKS.append(task_future) # Add callbacks for completed tasks cleanup and remove expire timer task_future.add_done_callback( partial(self._mark_as_completed_callback, task)) task_future.add_done_callback( partial(self._remove_expire_timer_callback, task, expire_timer_future)) ### TASKS @abstractmethod @asyncio.coroutine def runner(self, task, _pid, stopper, ttl, kwargs): """ runner -- Abstarct method, you should implement it in concrete Worker class. Runner should be a coroutine. System should run it as Task. Runner should accept only keyword arguments. Runner should accept `_pid` argument (Future of Process) and set_result for the _pid variable -- asyncio.subprocess.Process instance. Runner should accept kwargs argument. """ pass @abstractmethod @asyncio.coroutine def _store_results(self, task, task_results): """ _store_results -- Abstarct method, you should implement it in concrete Worker class. Runner should be a coroutine. """ pass @asyncio.coroutine def _cleanup_task(self, task): """ clenaup_task -- Task for cleanup completed tasks from redis queue. :param task: `sensors.models.Task` instance :return: None """ log.debug("_cleanup_task task_id={}".format(task.id)) # Remove task from inprogress queue # XXX may be transaction here? cnt1 = yield from self.connection.lrem(SETTINGS.INPROGRESS_QUEUE, value=task.bid()) # Remove task from sorted set cnt2 = yield from self.connection.zrem(SETTINGS.INPROGRESS_TASKS_SET, [task.bid()]) # Update scheduler information # Store next_run in scheduled task_scheduler_obj = yield from self.connection.hget( SETTINGS.SCHEDULER_HISTORY_HASH, task.name.encode('utf-8')) try: task_scheduler = SchedulerTaskHistory.deserialize( task_scheduler_obj) except (pickle.UnpicklingError, EOFError, TypeError, ImportError): task_scheduler = None if task_scheduler and task_scheduler.scheduled_task_id == task.id: #if task.status == Task.SUCCESSFUL: # # Update last_run only on success # last_run = datetime_to_timestamp(task.run_at) #else: # # If task failed, do not update last_run (last_run is about SUCCESSFUL task exectuion) # last_run = task_scheduler.last_run last_run = datetime_to_timestamp(task.run_at) task_scheduler = task_scheduler._replace(last_run=last_run, next_run=0, scheduled_task_id=None) yield from self.connection.hset(SETTINGS.SCHEDULER_HISTORY_HASH, task.name.encode('utf-8'), task_scheduler.serialize()) # Publish message about finish yield from self.connection.publish( SETTINGS.TASK_CHANNEL.format(task.id).encode('utf-8'), task.status.encode('utf-8')) log.debug('Publish message about task {} to {}'.format( task.id, SETTINGS.TASK_CHANNEL.format(task.id))) log.debug("_cleanup_task lrem result {}".format(cnt1)) log.debug("_cleanup_task zrem result {}".format(cnt2)) # Ping scheduler yield from self._ping_scheduler(task) @asyncio.coroutine def _ping_scheduler(self, task): # Publish message about new finished task if task.type == Task.TYPE_REGULAR: yield from self.connection.publish( SETTINGS.WORKER_TO_SCHEDULER_CHANNEL, b'') @asyncio.coroutine def _expire_timer_task(self, task, task_future, _pid, stopper, timeout): """ expire_timer -- Task for check timeouted processes and kill it. :param task: `sensors.models.Task` instance :param task_future: `asyncio.Future instance` -- (Future of) runned task we should cancel() after `timeout` :param _pid: `asyncio.Future instance` -- (Future of) instance of asyncio.subprocess.Process -- proces we should kill() after `timeout` :param timeout: int, timeout in seconds :return: None """ log.debug("Run expire timer for task {}".format(task.id)) yield from asyncio.sleep(timeout) try: stopper.set_result(True) task_future.cancel() killers = _pid.result() for killer in killers: try: killer() except ProcessLookupError: pass except: log.error("What is this? I try to kill my action process", exc_info=True) except: log.error("Unexpected error in _expire_timer", exc_info=True) log.debug('EXPIRE TIMER for task {}, killing process {}...'.format( task.id, _pid)) self.db_log.error( "Сценарий был остановлен по превышению лимита времени исполнения", None, 'action', task.kwargs.get('_id')) ### CALLBACKS def _mark_as_completed_callback(self, task, task_future): # Store results, change status, remove task from local list log.debug('Mark as completed callback is here!') try: if task_future.result() is not None: asyncio.Task(self._store_results(task, task_future.result())) new_task = task._replace(status=Task.SUCCESSFUL) else: new_task = task._replace(status=Task.FAILED) except asyncio.CancelledError: new_task = task._replace(status=Task.FAILED) finally: del task log.info("Finish task id={}, status={}".format(new_task.id, new_task.status)) log.debug("Update task status as COMPLETED <id={}> status={}".format( new_task.id, new_task.status)) asyncio.Task( self.connection.set(SETTINGS.TASK_STORAGE_KEY.format( new_task.id).encode('utf-8'), new_task.serialize(), expire=SETTINGS.TASK_STORAGE_EXPIRE)) asyncio.Task(self._cleanup_task(new_task)) # Callback вызывается thread-safe — можно выпилить локи и юзать просто лист self.TASKS.remove(task_future) def _remove_expire_timer_callback(self, task, expire_timer_future, task_future): # Remove exire time checker if task is successfully completed log.debug("Cancel expire timer Task for task {}".format(task.id)) expire_timer_future.cancel() ## Helpers @staticmethod def _get_esc_char(matchobj): char = matchobj.group(1).upper() if ord(char) in range(65, 96): code = ord(char) - 64 return chr(code) return matchobj.group(0) @classmethod def _parse_command(cls, command): # Convert command like <CTRL+X> to escape character tmp = command tmp = PARSE_COMMAND_RE.sub(cls._get_esc_char, tmp) tmp = PARSE_COMMAND_NEWLINE_RE.sub(lambda x: '\r', tmp) return tmp