def _reload_config_tasks_list(self): """ Load list of tasks, details """ time_now = int(now()) if time_now - self._ttl_reload_config_last_run < 1000: # 1000 = 1sec return self._ttl_reload_config_last_run = time_now config_version = self.config.get_config_version() if config_version != self.config_version: log.info('Changes in actions list, update.') new_scheduler_tasks = self.config.get_scheduled_actions() new_keys = set(new_scheduler_tasks.keys()) - set(self.scheduler_tasks.keys()) deleted_keys = set(self.scheduler_tasks.keys()) - set(new_scheduler_tasks.keys()) if new_keys or deleted_keys: log.info('New actions list, new_keys={}, deleted_keys={}'.format(new_keys, deleted_keys)) self.scheduler_tasks = new_scheduler_tasks yield from self._load_scheduler_tasks_history() # Check scheduler_tasks_history here, please # Возможно, интервал запуска изменился с длинного на короткий # А у нас уже next_run стоит далеко в будущем for scheduled_task_name, scheduled_task_history in self.scheduler_tasks_history.items(): # Смотри все таски, для которых сохранена инфорамция по шедулингу if scheduled_task_history.get('next_run', 0): # and (scheduled_task_name in self.scheduler_tasks): # Если есть запланированный таск if scheduled_task_name in self.scheduler_tasks: # Если у таска осталось расписание possible_next_run = datetime_to_timestamp(self._get_next_run_time(scheduled_task_name, self.scheduler_tasks[scheduled_task_name], int(now()))) else: # У таска не осталось расписания, next_run надо привести к 0 и больше ничего не делать possible_next_run = 0 if scheduled_task_history.get('next_run', 0) != possible_next_run: # Cancel scheduled task # Reset next_run task_id = scheduled_task_history.get('scheduled_task_id') log.info('Schedule changed for task with id={}, name={}, reschedule next_task'.format(task_id, scheduled_task_name)) key = SETTINGS.TASK_STORAGE_KEY.format(task_id).encode('utf-8') task_obj = yield from self.connection.delete([key]) scheduled_task_history['next_run'] = 0 scheduled_task_history['scheduled_task_id'] = 0 try: task_scheduler_obj = yield from self.connection.hget(SETTINGS.SCHEDULER_HISTORY_HASH, scheduled_task_name.encode('utf-8')) task_scheduler = SchedulerTaskHistory.deserialize(task_scheduler_obj) task_scheduler = task_scheduler._replace(next_run=0, scheduled_task_id=None) yield from self.connection.hset(SETTINGS.SCHEDULER_HISTORY_HASH, task_scheduler.name.encode('utf-8'), task_scheduler.serialize()) except: log.error('Broken SchedulerTaskHistory object for task id={}, delete it'.format(scheduled_task_name)) yield from self.connection.hdel(SETTINGS.SCHEDULER_HISTORY_HASH, task_scheduler.name.encode('utf-8')) # Удалился какой-то таск? Удалим его из мониторинга выполнения for key in deleted_keys: if key in self.scheduler_tasks_history: del self.scheduler_tasks_history[key] self.config_version = config_version
def store_metric(self, metric, value, time=None): if time: fields = ('value', 'time') values = (value, datetime_to_timestamp(time)) else: fields = ('value', ) values = (value, ) return self._store_values(table=self.METRIC_TABLE_NAME_TPL.format(metric), fields=fields, values=(values,))
def store_metric(self, metric, value, time=None): if time: fields = ('value', 'time') values = (value, datetime_to_timestamp(time)) else: fields = ('value', ) values = (value, ) return self._store_values( table=self.METRIC_TABLE_NAME_TPL.format(metric), fields=fields, values=(values, ))
def schedule_task(self, task): self.log.info('Schedule task id={}, name={}, run_at={}'.format( task.id, task.name, task.run_at)) # Create task yield from self.connection.set(SETTINGS.TASK_STORAGE_KEY.format( task.id).encode('utf-8'), task.serialize(), expire=SETTINGS.TASK_STORAGE_EXPIRE) # Add tasks to scheduled queue yield from self.connection.zadd( SETTINGS.SCHEDULED_QUEUE, {task.bid(): datetime_to_timestamp(task.run_at)})
def schedule_task(self, name, task_type, run_at, ttl, kwargs): """ Create Task object and add to Scheduled queue """ # Create and store Task object task = yield from self.tq_storage.create_task(name, task_type, run_at, ttl, kwargs, store_to=Task.STORE_TO_METRICS) yield from self.tq_storage.schedule_task(task) # Store next_run and scheduled_task_id in TaskHistory task = yield from self.tq_storage.create_scheduler_task_history(task, last_run=self.scheduler_tasks_history.get(task.name).get('last_run', 0), next_run=datetime_to_timestamp(run_at), scheduled_task_id=task.id)
def _cleanup_task(self, task): """ clenaup_task -- Task for cleanup completed tasks from redis queue. :param task: `sensors.models.Task` instance :return: None """ log.debug("_cleanup_task task_id={}".format(task.id)) # Remove task from inprogress queue # XXX may be transaction here? cnt1 = yield from self.connection.lrem(SETTINGS.INPROGRESS_QUEUE, value=task.bid()) # Remove task from sorted set cnt2 = yield from self.connection.zrem(SETTINGS.INPROGRESS_TASKS_SET, [task.bid()]) # Update scheduler information # Store next_run in scheduled task_scheduler_obj = yield from self.connection.hget( SETTINGS.SCHEDULER_HISTORY_HASH, task.name.encode('utf-8')) try: task_scheduler = SchedulerTaskHistory.deserialize( task_scheduler_obj) except (pickle.UnpicklingError, EOFError, TypeError, ImportError): task_scheduler = None if task_scheduler and task_scheduler.scheduled_task_id == task.id: #if task.status == Task.SUCCESSFUL: # # Update last_run only on success # last_run = datetime_to_timestamp(task.run_at) #else: # # If task failed, do not update last_run (last_run is about SUCCESSFUL task exectuion) # last_run = task_scheduler.last_run last_run = datetime_to_timestamp(task.run_at) task_scheduler = task_scheduler._replace(last_run=last_run, next_run=0, scheduled_task_id=None) yield from self.connection.hset(SETTINGS.SCHEDULER_HISTORY_HASH, task.name.encode('utf-8'), task_scheduler.serialize()) # Publish message about finish yield from self.connection.publish( SETTINGS.TASK_CHANNEL.format(task.id).encode('utf-8'), task.status.encode('utf-8')) log.debug('Publish message about task {} to {}'.format( task.id, SETTINGS.TASK_CHANNEL.format(task.id))) log.debug("_cleanup_task lrem result {}".format(cnt1)) log.debug("_cleanup_task zrem result {}".format(cnt2)) # Ping scheduler yield from self._ping_scheduler(task)
def _move_to_inprogress(self, task): """ move_to_inprogress -- Change task status to 'in progress' Store task in sorted set with TTL :param task: `sensors.models.Task` instance """ ttl = task.ttl or SETTINGS.WORKER_TASK_TIMEOUT expires_at = datetime.datetime.now() + datetime.timedelta(seconds=ttl) new_task = task._replace(status=Task.INPROGRESS) # XXX may be transaction here? yield from self.connection.zadd(SETTINGS.INPROGRESS_TASKS_SET, {task.bid(): datetime_to_timestamp(expires_at)}) yield from self.connection.set(SETTINGS.TASK_STORAGE_KEY.format(new_task.id).encode('utf-8'), new_task.serialize(), expire=SETTINGS.TASK_STORAGE_EXPIRE) return new_task
def schedule_task(self, name, task_type, run_at, ttl, kwargs): """ Create Task object and add to Scheduled queue """ # Create and store Task object task = yield from self.tq_storage.create_task( name, task_type, run_at, ttl, kwargs, store_to=Task.STORE_TO_METRICS) yield from self.tq_storage.schedule_task(task) # Store next_run and scheduled_task_id in TaskHistory task = yield from self.tq_storage.create_scheduler_task_history( task, last_run=self.scheduler_tasks_history.get(task.name).get( 'last_run', 0), next_run=datetime_to_timestamp(run_at), scheduled_task_id=task.id)
def _move_to_inprogress(self, task): """ move_to_inprogress -- Change task status to 'in progress' Store task in sorted set with TTL :param task: `sensors.models.Task` instance """ ttl = task.ttl or SETTINGS.WORKER_TASK_TIMEOUT expires_at = datetime.datetime.now() + datetime.timedelta(seconds=ttl) new_task = task._replace(status=Task.INPROGRESS) # XXX may be transaction here? yield from self.connection.zadd( SETTINGS.INPROGRESS_TASKS_SET, {task.bid(): datetime_to_timestamp(expires_at)}) yield from self.connection.set(SETTINGS.TASK_STORAGE_KEY.format( new_task.id).encode('utf-8'), new_task.serialize(), expire=SETTINGS.TASK_STORAGE_EXPIRE) return new_task
def _cleanup_task(self, task): """ clenaup_task -- Task for cleanup completed tasks from redis queue. :param task: `sensors.models.Task` instance :return: None """ log.debug("_cleanup_task task_id={}".format(task.id)) # Remove task from inprogress queue # XXX may be transaction here? cnt1 = yield from self.connection.lrem(SETTINGS.INPROGRESS_QUEUE, value=task.bid()) # Remove task from sorted set cnt2 = yield from self.connection.zrem(SETTINGS.INPROGRESS_TASKS_SET, [task.bid()]) # Update scheduler information # Store next_run in scheduled task_scheduler_obj = yield from self.connection.hget(SETTINGS.SCHEDULER_HISTORY_HASH, task.name.encode('utf-8')) try: task_scheduler = SchedulerTaskHistory.deserialize(task_scheduler_obj) except (pickle.UnpicklingError, EOFError, TypeError, ImportError): task_scheduler = None if task_scheduler and task_scheduler.scheduled_task_id == task.id: #if task.status == Task.SUCCESSFUL: # # Update last_run only on success # last_run = datetime_to_timestamp(task.run_at) #else: # # If task failed, do not update last_run (last_run is about SUCCESSFUL task exectuion) # last_run = task_scheduler.last_run last_run = datetime_to_timestamp(task.run_at) task_scheduler = task_scheduler._replace(last_run=last_run, next_run=0, scheduled_task_id=None) yield from self.connection.hset(SETTINGS.SCHEDULER_HISTORY_HASH, task.name.encode('utf-8'), task_scheduler.serialize()) # Publish message about finish yield from self.connection.publish(SETTINGS.TASK_CHANNEL.format(task.id).encode('utf-8'), task.status.encode('utf-8')) log.debug('Publish message about task {} to {}'.format(task.id, SETTINGS.TASK_CHANNEL.format(task.id))) log.debug("_cleanup_task lrem result {}".format(cnt1)) log.debug("_cleanup_task zrem result {}".format(cnt2)) # Ping scheduler yield from self._ping_scheduler(task)
def _reload_config_tasks_list(self): """ Load list of tasks, details """ time_now = int(now()) if time_now - self._ttl_reload_config_last_run < 1000: # 1000 = 1sec return self._ttl_reload_config_last_run = time_now config_version = self.config.get_config_version() if config_version != self.config_version: log.info('Changes in actions list, update.') new_scheduler_tasks = self.config.get_scheduled_actions() new_keys = set(new_scheduler_tasks.keys()) - set( self.scheduler_tasks.keys()) deleted_keys = set(self.scheduler_tasks.keys()) - set( new_scheduler_tasks.keys()) if new_keys or deleted_keys: log.info( 'New actions list, new_keys={}, deleted_keys={}'.format( new_keys, deleted_keys)) self.scheduler_tasks = new_scheduler_tasks yield from self._load_scheduler_tasks_history() # Check scheduler_tasks_history here, please # Возможно, интервал запуска изменился с длинного на короткий # А у нас уже next_run стоит далеко в будущем for scheduled_task_name, scheduled_task_history in self.scheduler_tasks_history.items( ): # Смотри все таски, для которых сохранена инфорамция по шедулингу if scheduled_task_history.get( 'next_run', 0 ): # and (scheduled_task_name in self.scheduler_tasks): # Если есть запланированный таск if scheduled_task_name in self.scheduler_tasks: # Если у таска осталось расписание possible_next_run = datetime_to_timestamp( self._get_next_run_time( scheduled_task_name, self.scheduler_tasks[scheduled_task_name], int(now()))) else: # У таска не осталось расписания, next_run надо привести к 0 и больше ничего не делать possible_next_run = 0 if scheduled_task_history.get('next_run', 0) != possible_next_run: # Cancel scheduled task # Reset next_run task_id = scheduled_task_history.get( 'scheduled_task_id') log.info( 'Schedule changed for task with id={}, name={}, reschedule next_task' .format(task_id, scheduled_task_name)) key = SETTINGS.TASK_STORAGE_KEY.format(task_id).encode( 'utf-8') task_obj = yield from self.connection.delete([key]) scheduled_task_history['next_run'] = 0 scheduled_task_history['scheduled_task_id'] = 0 try: task_scheduler_obj = yield from self.connection.hget( SETTINGS.SCHEDULER_HISTORY_HASH, scheduled_task_name.encode('utf-8')) task_scheduler = SchedulerTaskHistory.deserialize( task_scheduler_obj) task_scheduler = task_scheduler._replace( next_run=0, scheduled_task_id=None) yield from self.connection.hset( SETTINGS.SCHEDULER_HISTORY_HASH, task_scheduler.name.encode('utf-8'), task_scheduler.serialize()) except: log.error( 'Broken SchedulerTaskHistory object for task id={}, delete it' .format(scheduled_task_name)) yield from self.connection.hdel( SETTINGS.SCHEDULER_HISTORY_HASH, task_scheduler.name.encode('utf-8')) # Удалился какой-то таск? Удалим его из мониторинга выполнения for key in deleted_keys: if key in self.scheduler_tasks_history: del self.scheduler_tasks_history[key] self.config_version = config_version
def schedule_task(self, task): self.log.info('Schedule task id={}, name={}, run_at={}'.format(task.id, task.name, task.run_at)) # Create task yield from self.connection.set(SETTINGS.TASK_STORAGE_KEY.format(task.id).encode('utf-8'), task.serialize(), expire=SETTINGS.TASK_STORAGE_EXPIRE) # Add tasks to scheduled queue yield from self.connection.zadd(SETTINGS.SCHEDULED_QUEUE, {task.bid(): datetime_to_timestamp(task.run_at)})
def store_metric_value(self, metric_id, object_id, task, values): log.debug( 'store_metric_value {} for action/connection {} by task {}'.format( metric_id, object_id, task['id'])) exit_codes = values.get('exit_codes') stdout = values.get('stdout') metric = self.metrics.get(metric_id) value = self.parse_value(metric, stdout) log.debug('Metric (id={}) parsed value: {}'.format(metric_id, value)) if value is None: logging.error( "No parser match for metric {}, nothing to store".format( metric_id)) self.db_log.error("Пустое значение после фильтрации", stdout, "metric", metric_id) return converter = lambda x: x # Convert metric type if metric['type'] == 'boolean': value = self.cast_to_boolean(metric_id, metric, value) else: converter = SETTINGS.METRICS_TYPES_MAP[metric['type']] try: value = converter(value) except ValueError: log.error( "Wrong value for metric '{}', cannot convert to {}".format( metric_id, metric['type']), exc_info=True) self.db_log.error( "Не удалось привести тип значения к {}".format( metric['type']), str(value), "metric", metric_id) return # Trim strings if isinstance(value, str): value = value[:SETTINGS.METRIC_STRING_LIMIT] # Apply multiplier multiplier = metric.get('multiplier', None) try: if multiplier and metric['type'] in SETTINGS.METRIC_NUMERICAL_TYPES: multiplier = float(multiplier) value = value * multiplier # If it is int, convert to int value = converter(value) except: log.error('Cannot apply multiplier', exc_info=True) self.db_log.error("Не удалось применить множитель", str(value), "metric", metric_id) return timestamp = datetime_to_timestamp(task['run_at']) skip_interval = parse_timetable(metric.get('limit_duplicate_save', '')) if skip_interval: prev_val, prev_timestamp = self._lcache.get(metric_id, (None, 0)) if (prev_val == value) and (timestamp - prev_timestamp) < skip_interval: return True else: self._lcache[metric_id] = (value, datetime_to_timestamp( task['run_at'])) log.info('Store value="{}" for metric {}'.format(value, metric_id)) try: self.metrics_storage.store_metric(metric_id, value, time=task['run_at']) yield from self.connection.hset( SETTINGS.LAST_VALUES_HASH, metric_id.encode('utf-8'), ujson.dumps({ 'value': value, 'timestamp': timestamp }).encode('utf-8')) except: log.error('Cannot store metric value, storage exception', exc_info=True) return # Publish message about finish yield from self.connection.publish( SETTINGS.METRICS_CHANNEL.format(metric_id).encode('utf-8'), b'') return True
def store_metric_value(self, metric_id, object_id, task, values): log.debug('store_metric_value {} for action/connection {} by task {}'.format(metric_id, object_id, task['id'])) exit_codes = values.get('exit_codes') stdout = values.get('stdout') metric = self.metrics.get(metric_id) value = self.parse_value(metric, stdout) log.debug('Metric (id={}) parsed value: {}'.format(metric_id, value)) if value is None: logging.error("No parser match for metric {}, nothing to store".format(metric_id)) self.db_log.error("Пустое значение после фильтрации", stdout, "metric", metric_id) return converter = lambda x: x # Convert metric type if metric['type'] == 'boolean': value = self.cast_to_boolean(metric_id, metric, value) else: converter = SETTINGS.METRICS_TYPES_MAP[metric['type']] try: value = converter(value) except ValueError: log.error("Wrong value for metric '{}', cannot convert to {}".format(metric_id, metric['type']), exc_info=True) self.db_log.error("Не удалось привести тип значения к {}".format(metric['type']), str(value), "metric", metric_id) return # Trim strings if isinstance(value, str): value = value[:SETTINGS.METRIC_STRING_LIMIT] # Apply multiplier multiplier = metric.get('multiplier', None) try: if multiplier and metric['type'] in SETTINGS.METRIC_NUMERICAL_TYPES: multiplier = float(multiplier) value = value * multiplier # If it is int, convert to int value = converter(value) except: log.error('Cannot apply multiplier', exc_info=True) self.db_log.error("Не удалось применить множитель", str(value), "metric", metric_id) return timestamp = datetime_to_timestamp(task['run_at']) skip_interval = parse_timetable(metric.get('limit_duplicate_save', '')) if skip_interval: prev_val, prev_timestamp = self._lcache.get(metric_id, (None, 0)) if (prev_val == value) and (timestamp - prev_timestamp) < skip_interval: return True else: self._lcache[metric_id] = (value, datetime_to_timestamp(task['run_at'])) log.info('Store value="{}" for metric {}'.format(value, metric_id)) try: self.metrics_storage.store_metric(metric_id, value, time=task['run_at']) yield from self.connection.hset(SETTINGS.LAST_VALUES_HASH, metric_id.encode('utf-8'), ujson.dumps({'value': value, 'timestamp': timestamp}).encode('utf-8')) except: log.error('Cannot store metric value, storage exception', exc_info=True) return # Publish message about finish yield from self.connection.publish(SETTINGS.METRICS_CHANNEL.format(metric_id).encode('utf-8'), b'') return True