Ejemplo n.º 1
0
    def test_periodic_execution_unique_ids_self_correct(self):
        """
        Test that periodic tasks will self-correct unique ids
        """
        # Sleep until the next second
        sleep_until_next_second()

        # generate the ids
        correct_unique_id = gen_unique_id(serialize_func_name(periodic_task),
                                          [], {})
        malformed_unique_id = gen_unique_id(serialize_func_name(periodic_task),
                                            None, None)

        task = Task(tiger, func=periodic_task)

        # patch the id to something slightly wrong
        assert task.id == correct_unique_id
        task._data['id'] = malformed_unique_id
        assert task.id == malformed_unique_id

        # schedule the task
        task.delay()
        self._ensure_queues(queued={'periodic': 1})

        # pull task out of the queue by the malformed id
        task = Task.from_id(tiger, 'periodic', QUEUED, malformed_unique_id)
        assert task is not None

        Worker(tiger).run(once=True)
        self._ensure_queues(scheduled={'periodic': 1})

        # pull task out of the queue by the self-corrected id
        task = Task.from_id(tiger, 'periodic', SCHEDULED, correct_unique_id)
        assert task is not None
Ejemplo n.º 2
0
    def test_successful_execution_doesnt_clear_previous_errors(self):
        """
        Ensure previous executions are not cleared if we have had non-retriable
        errors.
        """
        sleep_until_next_second()

        # Queue the periodic task.
        self._ensure_queues()
        Worker(tiger).run(once=True)

        # Prepare to execute the periodic task (as permanent failure).
        tiger.connection.set('fail-periodic-task', 'permanent')
        n_total, tasks = Task.tasks_from_queue(tiger, 'periodic', SCHEDULED)
        task_id = tasks[0].id
        time.sleep(1)

        # Queue the periodic task.
        self._ensure_queues(scheduled={'periodic': 1})
        Worker(tiger).run(once=True)

        # Run the failing periodic task.
        self._ensure_queues(queued={'periodic': 1})
        Worker(tiger).run(once=True)

        task = Task.from_id(tiger,
                            'periodic',
                            SCHEDULED,
                            task_id,
                            load_executions=10)
        assert len(task.executions) == 1

        tiger.connection.delete('fail-periodic-task')
        time.sleep(1)

        # Queue the periodic task.
        self._ensure_queues(scheduled={'periodic': 1}, error={'periodic': 1})
        Worker(tiger).run(once=True)

        # Run the successful periodic task.
        self._ensure_queues(queued={'periodic': 1}, error={'periodic': 1})
        Worker(tiger).run(once=True)

        # Ensure we didn't clear previous executions.
        task = Task.from_id(tiger,
                            'periodic',
                            SCHEDULED,
                            task_id,
                            load_executions=10)
        assert len(task.executions) == 1
Ejemplo n.º 3
0
    def test_successful_execution_clears_executions_from_retries(self):
        """
        Ensure previous executions from retries are cleared after a successful
        execution.
        """
        sleep_until_next_second()

        # Queue the periodic task.
        self._ensure_queues()
        Worker(tiger).run(once=True)

        # Prepare to execute the periodic task (as retriable failure).
        tiger.connection.set('fail-periodic-task', 'retriable')
        n_total, tasks = Task.tasks_from_queue(tiger, 'periodic', SCHEDULED)
        task_id = tasks[0].id
        time.sleep(1)

        # Queue the periodic task.
        self._ensure_queues(scheduled={'periodic': 1})
        Worker(tiger).run(once=True)

        # Run the failing periodic task.
        self._ensure_queues(queued={'periodic': 1})
        Worker(tiger).run(once=True)

        task = Task.from_id(tiger,
                            'periodic',
                            SCHEDULED,
                            task_id,
                            load_executions=10)
        assert len(task.executions) == 1

        tiger.connection.delete('fail-periodic-task')
        time.sleep(1)

        # Queue the periodic task.
        self._ensure_queues(scheduled={'periodic': 1})
        Worker(tiger).run(once=True)

        # Run the successful periodic task.
        self._ensure_queues(queued={'periodic': 1})
        Worker(tiger).run(once=True)

        # Ensure we cleared any previous executions.
        task = Task.from_id(tiger,
                            'periodic',
                            SCHEDULED,
                            task_id,
                            load_executions=10)
        assert len(task.executions) == 0
Ejemplo n.º 4
0
 def task_delete(self, queue, state, task_id):
     try:
         task = Task.from_id(self.tiger, queue, state, task_id)
     except TaskNotFound:
         abort(404)
     task.delete()
     return redirect(url_for('.queue_detail', queue=queue, state=state))
Ejemplo n.º 5
0
    def task_detail(self, queue, state, task_id):
        LIMIT = 1000
        try:
            task = Task.from_id(self.tiger,
                                queue,
                                state,
                                task_id,
                                load_executions=LIMIT)
        except TaskNotFound:
            abort(404)

        executions_dumped = []
        for execution in task.executions:
            traceback = execution.pop('traceback', None)
            executions_dumped.append((json.dumps(execution,
                                                 indent=2,
                                                 sort_keys=True), traceback))

        return self.render(
            'tasktiger_admin/tasktiger_task_detail.html',
            queue=queue,
            state=state,
            task=task,
            task_dumped=json.dumps(task.data, indent=2, sort_keys=True),
            executions_dumped=executions_dumped,
        )
Ejemplo n.º 6
0
def get_task_info(_id):
    record = db.collection('tasks').find_one({'_id': ObjectId(_id)})
    if not record:
        return jsonify({
            'message': 'record not found',
            'code': 104040
        }), 404

    log = db.collection('logs').find_one({'task_id': str(record.get('_id'))})
    # log = db.collection('logs').find_one({'task_id': '5d6d4e0ae3f7e086eaa30321'})

    record['log'] = log
    job = db.collection('jobs').find_one({'_id': ObjectId(record.get('job_id'))})
    record['job'] = job
    queue = record.get('queue')
    state = record.get('state')
    task_id = record.get('t_id')
    try:
        task = Task.from_id(tiger, queue, state, task_id)
        record['queue_info'] = task.data.copy()
    except TaskNotFound:
        record['queue_info'] = None

    return jsonify({
        'message': 'ok',
        'code': 0,
        'data': record,
    })
Ejemplo n.º 7
0
def retry(_id, state):
    record = db.collection('tasks').find_one({'_id': ObjectId(_id)})
    if not record:
        return jsonify({
            'message': 'task not found',
            'code': 194041
        }), 404

    task_id = record.get('t_id')
    queue = record.get('queue')
    try:
        task = Task.from_id(tiger, queue, state, task_id)
        task.retry()
        db.collection('tasks').update_one({'_id': record['_id']}, {'$set': {
            'updated_at': datetime.now(),
            'state': state,
        }})
        extra = {
            'queue': queue,
            'task_id': task_id,
            'from_state': state,
            'to_state': QUEUED
        }
        logger.info('retry task', extra=extra)
    except TaskNotFound:
        return jsonify({
            'message': 'invalid task',
            'code': 104044
        }), 404

    return jsonify({
        'message': 'ok',
        'code': 0,
    })
Ejemplo n.º 8
0
def delete_task(_id, state):
    record = db.collection('tasks').find_one({'t_id': _id})
    if not record:
        return jsonify({
            'message': 'task not found',
            'code': 194041
        }), 404

    task_id = record.get('t_id')
    queue = record.get('queue')
    try:
        task = Task.from_id(tiger, queue, state, task_id)
        task._move(from_state=state)
        db.collection('tasks').update_one({'_id': record['_id']}, {'$set': {
            'updated_at': datetime.now(),
            'state': 'delete',
        }})
        extra = {
            'queue': queue,
            'task_id': task_id,
            'from_state': state,
            'to_state': None
        }
        logger.info('cancel task', extra=extra)
    except TaskNotFound:
        return jsonify({
            'message': 'invalid task',
            'code': 104044
        }), 404

    return jsonify({
        'message': 'ok',
        'code': 0,
    })
Ejemplo n.º 9
0
 def task_delete(self, queue, state, task_id):
     try:
         task = Task.from_id(self.tiger, queue, state, task_id)
     except TaskNotFound:
         abort(404)
     task.delete()
     return redirect(url_for('.queue_detail', queue=queue, state=state))
Ejemplo n.º 10
0
    def test_periodic_execution_unique_ids(self):
        """
        Test that periodic tasks generate the same unique ids

        When a periodic task is scheduled initially as part of worker startup
        vs re-scheduled from within python the unique id generated should be
        the same. If they aren't it could result in duplicate tasks.
        """
        # Sleep until the next second
        now = datetime.datetime.utcnow()
        time.sleep(1 - now.microsecond / 10.0**6)

        # After the first worker run, the periodic task will be queued.
        # Note that since periodic tasks register with the Tiger instance, it
        # must be the same instance that was used to decorate the task. We
        # therefore use `tiger` from the tasks module instead of `self.tiger`.
        self._ensure_queues()
        Worker(tiger).run(once=True)
        self._ensure_queues(scheduled={'periodic': 1})
        time.sleep(1)
        Worker(tiger).run(once=True)
        self._ensure_queues(queued={'periodic': 1})

        # generate the expected unique id
        expected_unique_id = gen_unique_id(serialize_func_name(periodic_task),
                                           [], {})

        # pull task out of the queue by id. If found, then the id is correct
        task = Task.from_id(tiger, 'periodic', QUEUED, expected_unique_id)
        assert task is not None

        # execute and reschedule the task
        self._ensure_queues(queued={'periodic': 1})
        Worker(tiger).run(once=True)
        self._ensure_queues(scheduled={'periodic': 1})

        # wait for the task to need to be queued
        time.sleep(1)
        Worker(tiger).run(once=True)
        self._ensure_queues(queued={'periodic': 1})

        # The unique id shouldn't change between executions. Try finding the
        # task by id again
        task = Task.from_id(tiger, 'periodic', QUEUED, expected_unique_id)
        assert task is not None
Ejemplo n.º 11
0
    def task_detail(self, queue, state, task_id):
        LIMIT = 1000
        try:
            task = Task.from_id(self.tiger,
                                queue,
                                state,
                                task_id,
                                load_executions=LIMIT)
        except TaskNotFound:
            abort(404)

        executions_dumped = []
        for execution in task.executions:
            traceback = execution.pop('traceback', None)
            execution_integrations = generate_integrations(
                self.integration_config.get('EXECUTION_INTEGRATION_LINKS', []),
                task,
                execution,
            )
            execution_converted = convert_keys_to_datetime(
                execution, ['time_failed', 'time_started'])
            executions_dumped.append((
                json.dumps(
                    execution_converted,
                    indent=2,
                    sort_keys=True,
                    default=str,
                ),
                traceback,
                execution_integrations,
                execution_converted,
            ))

        integrations = generate_integrations(
            self.integration_config.get('INTEGRATION_LINKS', []), task, None)

        return self.render(
            'tasktiger_admin/tasktiger_task_detail.html',
            queue=queue,
            state=state,
            task=task,
            task_data=task.data,
            task_data_dumped=json.dumps(task.data, indent=2, sort_keys=True),
            executions_dumped=reversed(executions_dumped),
            integrations=integrations,
        )
Ejemplo n.º 12
0
    def test_delay_scheduled_2(self):
        task = Task(self.tiger, simple_task, queue='a')
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'a': 1})

        task_id = task.id

        # We can't look up a non-unique task by recreating it.
        task = Task(self.tiger, simple_task, queue='a')
        pytest.raises(TaskNotFound, task.cancel)

        # We can look up a task by its ID.
        fetch_task = lambda: Task.from_id(self.tiger, 'a', 'scheduled', task_id)

        task = fetch_task()
        task.cancel()
        self._ensure_queues()

        # Task.from_id raises if it doesn't exist.
        pytest.raises(TaskNotFound, fetch_task)
Ejemplo n.º 13
0
    def task_detail(self, queue, state, task_id):
        LIMIT = 1000
        try:
            task = Task.from_id(self.tiger, queue, state, task_id,
                                load_executions=LIMIT)
        except TaskNotFound:
            abort(404)

        executions_dumped = []
        for execution in task.executions:
            traceback = execution.pop('traceback', None)
            executions_dumped.append((
                json.dumps(execution, indent=2, sort_keys=True),
                traceback)
            )

        return self.render('tasktiger_admin/tasktiger_task_detail.html',
            queue=queue,
            state=state,
            task=task,
            task_dumped=json.dumps(task.data, indent=2, sort_keys=True),
            executions_dumped=executions_dumped,
        )
Ejemplo n.º 14
0
 def test_task_wrong_queue(self, tiger, queued_task):
     with pytest.raises(TaskNotFound):
         Task.from_id(tiger, "other", "active", queued_task.id)
Ejemplo n.º 15
0
 def test_task_wrong_state(self, tiger, queued_task):
     with pytest.raises(TaskNotFound):
         Task.from_id(tiger, "default", "active", queued_task.id)
Ejemplo n.º 16
0
 def test_task_found(self, tiger, queued_task):
     task = Task.from_id(tiger, "default", "queued", queued_task.id)
     assert queued_task.id == task.id