def clear_queue(): ''' Clears all inactive tasks from queue. ''' for queue, stats in tiger.get_queue_stats().items(): for state, tasks in stats.items(): if state == 'error': for task in Task.tasks_from_queue(tiger, queue, state)[-1]: task.delete() logging.info('Task %s cleared from queue %s', task, queue) elif state == 'scheduled': for task in Task.tasks_from_queue(tiger, queue, state)[-1]: task.cancel() logging.info('Task %s cleared from queue %s', task, queue)
def test_tasks_from_queue_with_executions(self): task = self.tiger.delay(exception_task, retry=True) # Get two executions in task Worker(self.tiger).run(once=True) time.sleep(DELAY) # Second run (run twice to move from scheduled to queued) Worker(self.tiger).run(once=True) Worker(self.tiger).run(once=True) n, tasks = Task.tasks_from_queue(self.tiger, 'default', 'scheduled', load_executions=1) assert n == 1 assert len(tasks[0].executions) == 1 n, tasks = Task.tasks_from_queue(self.tiger, 'default', 'scheduled', load_executions=10) assert n == 1 assert len(tasks[0].executions) == 2
def queue_detail(self, queue, state): n, tasks = Task.tasks_from_queue(self.tiger, queue, state, load_executions=1) return self.render('tasktiger_admin/tasktiger_queue_detail.html', queue=queue, state=state, n=n, tasks=tasks, )
def queue_detail(self, queue, state): n, tasks = Task.tasks_from_queue(self.tiger, queue, state, load_executions=1) return self.render( 'tasktiger_admin/tasktiger_queue_detail.html', queue=queue, state=state, n=n, tasks=tasks, )
def test_successful_execution_doesnt_clear_previous_errors(self): """ Ensure previous executions are not cleared if we have had non-retriable errors. """ sleep_until_next_second() # Queue the periodic task. self._ensure_queues() Worker(tiger).run(once=True) # Prepare to execute the periodic task (as permanent failure). tiger.connection.set('fail-periodic-task', 'permanent') n_total, tasks = Task.tasks_from_queue(tiger, 'periodic', SCHEDULED) task_id = tasks[0].id time.sleep(1) # Queue the periodic task. self._ensure_queues(scheduled={'periodic': 1}) Worker(tiger).run(once=True) # Run the failing periodic task. self._ensure_queues(queued={'periodic': 1}) Worker(tiger).run(once=True) task = Task.from_id(tiger, 'periodic', SCHEDULED, task_id, load_executions=10) assert len(task.executions) == 1 tiger.connection.delete('fail-periodic-task') time.sleep(1) # Queue the periodic task. self._ensure_queues(scheduled={'periodic': 1}, error={'periodic': 1}) Worker(tiger).run(once=True) # Run the successful periodic task. self._ensure_queues(queued={'periodic': 1}, error={'periodic': 1}) Worker(tiger).run(once=True) # Ensure we didn't clear previous executions. task = Task.from_id(tiger, 'periodic', SCHEDULED, task_id, load_executions=10) assert len(task.executions) == 1
def test_successful_execution_clears_executions_from_retries(self): """ Ensure previous executions from retries are cleared after a successful execution. """ sleep_until_next_second() # Queue the periodic task. self._ensure_queues() Worker(tiger).run(once=True) # Prepare to execute the periodic task (as retriable failure). tiger.connection.set('fail-periodic-task', 'retriable') n_total, tasks = Task.tasks_from_queue(tiger, 'periodic', SCHEDULED) task_id = tasks[0].id time.sleep(1) # Queue the periodic task. self._ensure_queues(scheduled={'periodic': 1}) Worker(tiger).run(once=True) # Run the failing periodic task. self._ensure_queues(queued={'periodic': 1}) Worker(tiger).run(once=True) task = Task.from_id(tiger, 'periodic', SCHEDULED, task_id, load_executions=10) assert len(task.executions) == 1 tiger.connection.delete('fail-periodic-task') time.sleep(1) # Queue the periodic task. self._ensure_queues(scheduled={'periodic': 1}) Worker(tiger).run(once=True) # Run the successful periodic task. self._ensure_queues(queued={'periodic': 1}) Worker(tiger).run(once=True) # Ensure we cleared any previous executions. task = Task.from_id(tiger, 'periodic', SCHEDULED, task_id, load_executions=10) assert len(task.executions) == 0
def get_queue_tasks(): query = request.args queue = query.get('queue') state = query.get('state') page = int(query.get('page', 1)) size = int(query.get('pageSize', 500)) offset = (page - 1) * size if not queue or not state: return jsonify({ 'message': 'invalid params', 'code': 194000 }), 400 n, tasks = Task.tasks_from_queue(tiger, queue, state, skip=offset, limit=size, load_executions=1) bucket = [] for task in tasks: data = task.data.copy() del data['args'] record = db.collection('tasks').find_one({'t_id': task.id}) if record: job_record = db.collection('jobs').find_one({'_id': ObjectId(record['job_id'])}) if job_record: data['job_name'] = job_record.get('name') data['state'] = state data['result'] = record['result'] else: data['job_name'] = 'default' data['state'] = state data['executions'] = task.executions bucket.append(data) return jsonify({ 'message': 'ok', 'code': 0, 'data': { 'list': bucket, 'total': n, } })
def test_tasks_from_queue(self): task0 = Task(self.tiger, simple_task) task1 = Task(self.tiger, exception_task) task2 = Task(self.tiger, simple_task, queue='other') task0.delay() task1.delay() task2.delay() n, tasks = Task.tasks_from_queue(self.tiger, 'default', 'queued') assert n == 2 assert task0.id == tasks[0].id assert task0.func == simple_task assert task0.func == tasks[0].func assert task0.serialized_func == 'tests.tasks:simple_task' assert task0.serialized_func == tasks[0].serialized_func assert task0.state == tasks[0].state assert task0.state == 'queued' assert task0.queue == tasks[0].queue assert task0.queue == 'default'
def test_purge_errored_tasks_older_than(self): task_timestamps = [ datetime.datetime(2015, 1, 1), datetime.datetime(2016, 1, 1), datetime.datetime(2017, 1, 1), datetime.datetime(2018, 1, 1), ] for task_timestamp in task_timestamps: with FreezeTime(task_timestamp): self.tiger.delay(exception_task) Worker(self.tiger).run(once=True) self._ensure_queues(queued={'default': 0}, error={'default': 4}) _, tasks = Task.tasks_from_queue(self.tiger, 'default', 'error') actual_timestamps = [task.ts for task in tasks] assert task_timestamps == actual_timestamps assert 2 == self.tiger.purge_errored_tasks( last_execution_before=datetime.datetime(2016, 6, 1)) self._ensure_queues(queued={'default': 0}, error={'default': 2})
def task_retry_multiple(self, queue, state): LIMIT = 50 n, tasks = Task.tasks_from_queue(self.tiger, queue, state, limit=LIMIT) for task in tasks: task.retry() return redirect(url_for('.queue_detail', queue=queue, state=state))