コード例 #1
0
ファイル: dispatch.py プロジェクト: fakegit/eclogue
def run_schedule_task(_id, request_id, username, **kwargs):
    db = Mongo()
    params = (_id, request_id, username)
    queue_name = get_queue_by_job(_id)
    job = db.collection('jobs').find_one({'_id': ObjectId(_id)})
    func = run_playbook_task
    if job.get('type') == 'adhoc':
        func = run_adhoc_task

    task = Task(tiger,
                func=func,
                args=params,
                kwargs=kwargs,
                queue=queue_name,
                unique=False,
                lock=True,
                lock_key=_id)
    task_record = {
        'job_id': _id,
        'state': QUEUED,
        'type': 'schedule',
        'ansible': job.get('type'),
        'queue': queue_name,
        'result': '',
        'request_id': request_id,
        't_id': task.id,
        'created_at': time(),
    }

    db.collection('tasks').insert_one(task_record)
    task.delay()
コード例 #2
0
    def test_max_workers(self):
        """Test Single Worker Queue."""

        # Queue three tasks
        for i in range(0, 3):
            task = Task(self.tiger, long_task_ok, queue='a')
            task.delay()
        self._ensure_queues(queued={'a': 3})

        # Start two workers and wait until they start processing.
        worker1 = Process(target=external_worker,
                          kwargs={'max_workers_per_queue': 2})
        worker2 = Process(target=external_worker,
                          kwargs={'max_workers_per_queue': 2})
        worker1.start()
        worker2.start()

        # Wait for both tasks to start
        wait_for_long_task()
        wait_for_long_task()

        # Verify they both are active
        self._ensure_queues(active={'a': 2}, queued={'a': 1})

        # This worker should fail to get the queue lock and exit immediately
        worker = Worker(self.tiger)
        worker.max_workers_per_queue = 2
        worker.run(once=True, force_once=True)
        self._ensure_queues(active={'a': 2}, queued={'a': 1})
        # Wait for external workers
        worker1.join()
        worker2.join()
コード例 #3
0
ファイル: test_workers.py プロジェクト: closeio/tasktiger
    def test_queue_system_lock(self):
        """Test queue system lock."""

        with FreezeTime(datetime.datetime(2014, 1, 1)):
            # Queue three tasks
            for i in range(0, 3):
                task = Task(self.tiger, long_task_ok, queue='a')
                task.delay()
            self._ensure_queues(queued={'a': 3})

            # Ensure we can process one
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 2})

            # Set system lock so no processing should occur for 10 seconds
            self.tiger.set_queue_system_lock('a', 10)

            lock_timeout = self.tiger.get_queue_system_lock('a')
            assert lock_timeout == time.time() + 10

        # Confirm tasks don't get processed within the system lock timeout
        with FreezeTime(datetime.datetime(2014, 1, 1, 0, 0, 9)):
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 2})

        # 10 seconds in the future the lock should have expired
        with FreezeTime(datetime.datetime(2014, 1, 1, 0, 0, 10)):
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 1})
コード例 #4
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_current_task_eager(self):
        self.tiger.config['ALWAYS_EAGER'] = True

        task = Task(self.tiger, verify_current_task)
        task.delay()
        assert not self.conn.exists('runtime_error')
        assert self.conn.get('task_id') == task.id
コード例 #5
0
ファイル: test_workers.py プロジェクト: closeio/tasktiger
    def test_max_workers(self):
        """Test Single Worker Queue."""

        # Queue three tasks
        for i in range(0, 3):
            task = Task(self.tiger, long_task_killed, queue='a')
            task.delay()
        self._ensure_queues(queued={'a': 3})

        # Start two workers and wait until they start processing.
        worker1 = Process(target=external_worker,
                          kwargs={'max_workers_per_queue': 2})
        worker2 = Process(target=external_worker,
                          kwargs={'max_workers_per_queue': 2})
        worker1.start()
        worker2.start()
        time.sleep(DELAY)

        # This worker should fail to get the queue lock and exit immediately
        worker = Worker(self.tiger)
        worker.max_workers_per_queue = 2
        worker.run(once=True, force_once=True)
        self._ensure_queues(active={'a': 2}, queued={'a': 1})
        # Wait for external workers
        worker1.join()
        worker2.join()
コード例 #6
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_current_tasks_eager(self):
        self.tiger.config['ALWAYS_EAGER'] = True

        task = Task(self.tiger, verify_current_tasks)
        task.delay()
        assert not self.conn.exists('runtime_error')
        assert self.conn.lrange('task_ids', 0, -1) == [task.id]
コード例 #7
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
    def test_current_task_eager(self):
        self.tiger.config['ALWAYS_EAGER'] = True

        task = Task(self.tiger, verify_current_task)
        task.delay()
        assert not self.conn.exists('runtime_error')
        assert self.conn.get('task_id') == task.id
コード例 #8
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
    def test_current_tasks_eager(self):
        self.tiger.config['ALWAYS_EAGER'] = True

        task = Task(self.tiger, verify_current_tasks)
        task.delay()
        assert not self.conn.exists('runtime_error')
        assert self.conn.lrange('task_ids', 0, -1) == [task.id]
コード例 #9
0
    def test_queue_system_lock(self):
        """Test queue system lock."""

        with FreezeTime(datetime.datetime(2014, 1, 1)):
            # Queue three tasks
            for i in range(0, 3):
                task = Task(self.tiger, long_task_ok, queue='a')
                task.delay()
            self._ensure_queues(queued={'a': 3})

            # Ensure we can process one
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 2})

            # Set system lock so no processing should occur for 10 seconds
            self.tiger.set_queue_system_lock('a', 10)

            lock_timeout = self.tiger.get_queue_system_lock('a')
            assert lock_timeout == time.time() + 10

        # Confirm tasks don't get processed within the system lock timeout
        with FreezeTime(datetime.datetime(2014, 1, 1, 0, 0, 9)):
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 2})

        # 10 seconds in the future the lock should have expired
        with FreezeTime(datetime.datetime(2014, 1, 1, 0, 0, 10)):
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 1})
コード例 #10
0
ファイル: once.py プロジェクト: eclogue/eclogue
def dispatch(payload):
    hosts = payload.get('inventory')
    tasks = payload.get('tasks')
    if not hosts or not tasks:
        return None
    uid = md5(str(json.dumps(payload)))
    username = payload.get('username')
    run_id = payload.get('req_id') or str(uuid.uuid4())
    params = [run_id, payload]
    queue_name = 'book_runtime'
    func = run
    task = Task(tiger,
                func=func,
                args=params,
                queue=queue_name,
                unique=True,
                lock=True,
                lock_key=uid)
    run_record = {
        'uid': uid,
        'run_id': run_id,
        'run_by': username,
        'options': payload,
        'result': '',
        'state': 'pending',
        'created_at': time.time(),
        'updated_at': time.time(),
    }
    result = Perform.insert_one(run_record)
    task.delay()

    return result.inserted_id
コード例 #11
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_delay(self):
        task = Task(self.tiger, simple_task)
        self._ensure_queues()
        task.delay()
        self._ensure_queues(queued={'default': 1})

        # Canceling only works for scheduled tasks.
        pytest.raises(TaskNotFound, task.cancel)
コード例 #12
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
    def test_delay(self):
        task = Task(self.tiger, simple_task)
        self._ensure_queues()
        task.delay()
        self._ensure_queues(queued={'default': 1})

        # Canceling only works for scheduled tasks.
        pytest.raises(TaskNotFound, task.cancel)
コード例 #13
0
ファイル: dispatch.py プロジェクト: fakegit/eclogue
def run_job(_id, history_id=None, **kwargs):
    db = Mongo()
    record = Job.find_by_id(_id)
    if not record:
        return False

    request_id = str(current_request_id())
    username = None if not login_user else login_user.get('username')
    params = (_id, request_id, username, history_id)
    queue_name = get_queue_by_job(_id)
    extra = record.get('extra')
    template = record.get('template')
    schedule = extra.get('schedule')
    ansible_type = record.get('type')
    if template.get('run_type') == 'schedule':
        existed = db.collection('scheduler_jobs').find_one(
            {'_id': record['_id']})
        if existed:
            return False

        scheduler.add_job(func=run_schedule_task,
                          trigger='cron',
                          args=params,
                          coalesce=True,
                          kwargs=kwargs,
                          id=str(record.get('_id')),
                          max_instances=1,
                          name=record.get('name'),
                          **schedule)
        return True
    else:
        func = run_playbook_task if ansible_type != 'adhoc' else run_adhoc_task
        task = Task(tiger,
                    func=func,
                    args=params,
                    kwargs=kwargs,
                    queue=queue_name,
                    unique=True,
                    lock=True,
                    lock_key=_id)

        task_record = {
            'job_id': _id,
            'type': 'trigger',
            'ansible': ansible_type,
            'state': QUEUED,
            'queue': queue_name,
            'result': '',
            'request_id': request_id,
            't_id': task.id,
            'created_at': time(),
            'kwargs': kwargs,
        }

        result = db.collection('tasks').insert_one(task_record)
        task.delay()

        return result.inserted_id
コード例 #14
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
    def test_task_disappears(self):
        """
        Ensure that a task object that disappears while the task is processing
        is handled properly. This could happen when a worker processes a task,
        then hangs for a long time, causing another worker to pick up and finish
        the task. Then, when the original worker resumes, the task object will
        be gone. Make sure we log a "not found" error and move on.
        """

        task = Task(self.tiger, sleep_task, kwargs={'delay': 2 * DELAY})
        task.delay()
        self._ensure_queues(queued={'default': 1})

        # Start a worker and wait until it starts processing.
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # Remove the task object while the task is processing.
        assert self.conn.delete('t:task:{}'.format(task.id)) == 1

        # Kill the worker while it's still processing the task.
        os.kill(worker.pid, signal.SIGKILL)

        # _ensure_queues() breaks here because it can't find the task
        assert self.conn.scard('t:queued') == 0
        assert self.conn.scard('t:active') == 1
        assert self.conn.scard('t:error') == 0
        assert self.conn.scard('t:scheduled') == 0

        # Capture logger
        errors = []

        def fake_error(msg):
            errors.append(msg)

        with Patch(self.tiger.log._logger, 'error', fake_error):
            # Since ACTIVE_TASK_UPDATE_TIMEOUT hasn't elapsed yet, re-running
            # the worker at this time won't change anything. (run twice to move
            # from scheduled to queued)
            Worker(self.tiger).run(once=True)
            Worker(self.tiger).run(once=True)

            assert len(errors) == 0
            assert self.conn.scard('t:queued') == 0
            assert self.conn.scard('t:active') == 1
            assert self.conn.scard('t:error') == 0
            assert self.conn.scard('t:scheduled') == 0

            # After waiting and re-running the worker, queues will clear.
            time.sleep(2 * DELAY)
            Worker(self.tiger).run(once=True)
            Worker(self.tiger).run(once=True)

            self._ensure_queues()
            assert len(errors) == 1
            assert "not found" in errors[0]
コード例 #15
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_task_disappears(self):
        """
        Ensure that a task object that disappears while the task is processing
        is handled properly. This could happen when a worker processes a task,
        then hangs for a long time, causing another worker to pick up and finish
        the task. Then, when the original worker resumes, the task object will
        be gone. Make sure we log a "not found" error and move on.
        """

        task = Task(self.tiger, sleep_task, kwargs={'delay': 2 * DELAY})
        task.delay()
        self._ensure_queues(queued={'default': 1})

        # Start a worker and wait until it starts processing.
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # Remove the task object while the task is processing.
        assert self.conn.delete('t:task:{}'.format(task.id)) == 1

        # Kill the worker while it's still processing the task.
        os.kill(worker.pid, signal.SIGKILL)

        # _ensure_queues() breaks here because it can't find the task
        assert self.conn.scard('t:queued') == 0
        assert self.conn.scard('t:active') == 1
        assert self.conn.scard('t:error') == 0
        assert self.conn.scard('t:scheduled') == 0

        # Capture logger
        errors = []

        def fake_error(msg):
            errors.append(msg)

        with Patch(self.tiger.log._logger, 'error', fake_error):
            # Since ACTIVE_TASK_UPDATE_TIMEOUT hasn't elapsed yet, re-running
            # the worker at this time won't change anything. (run twice to move
            # from scheduled to queued)
            Worker(self.tiger).run(once=True)
            Worker(self.tiger).run(once=True)

            assert len(errors) == 0
            assert self.conn.scard('t:queued') == 0
            assert self.conn.scard('t:active') == 1
            assert self.conn.scard('t:error') == 0
            assert self.conn.scard('t:scheduled') == 0

            # After waiting and re-running the worker, queues will clear.
            time.sleep(2 * DELAY)
            Worker(self.tiger).run(once=True)
            Worker(self.tiger).run(once=True)

            self._ensure_queues()
            assert len(errors) == 1
            assert "not found" in errors[0]
コード例 #16
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_delay_scheduled(self):
        task = Task(self.tiger, simple_task, queue='a')
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'a': 1})

        # Test canceling a scheduled task.
        task.cancel()
        self._ensure_queues()

        # Canceling again raises an error
        pytest.raises(TaskNotFound, task.cancel)
コード例 #17
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_update_scheduled_time(self):
        task = Task(self.tiger, simple_task, unique=True)
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'default': 1})
        old_score = self.conn.zscore('t:scheduled:default', task.id)

        task.update_scheduled_time(when=datetime.timedelta(minutes=6))
        self._ensure_queues(scheduled={'default': 1})
        new_score = self.conn.zscore('t:scheduled:default', task.id)

        # The difference can be slightly over 60 due to processing time, but
        # shouldn't be much higher.
        assert 60 <= new_score - old_score < 61
コード例 #18
0
ファイル: test_periodic.py プロジェクト: fakegit/tasktiger
    def test_successful_execution_doesnt_clear_previous_errors(self):
        """
        Ensure previous executions are not cleared if we have had non-retriable
        errors.
        """
        sleep_until_next_second()

        # Queue the periodic task.
        self._ensure_queues()
        Worker(tiger).run(once=True)

        # Prepare to execute the periodic task (as permanent failure).
        tiger.connection.set('fail-periodic-task', 'permanent')
        n_total, tasks = Task.tasks_from_queue(tiger, 'periodic', SCHEDULED)
        task_id = tasks[0].id
        time.sleep(1)

        # Queue the periodic task.
        self._ensure_queues(scheduled={'periodic': 1})
        Worker(tiger).run(once=True)

        # Run the failing periodic task.
        self._ensure_queues(queued={'periodic': 1})
        Worker(tiger).run(once=True)

        task = Task.from_id(tiger,
                            'periodic',
                            SCHEDULED,
                            task_id,
                            load_executions=10)
        assert len(task.executions) == 1

        tiger.connection.delete('fail-periodic-task')
        time.sleep(1)

        # Queue the periodic task.
        self._ensure_queues(scheduled={'periodic': 1}, error={'periodic': 1})
        Worker(tiger).run(once=True)

        # Run the successful periodic task.
        self._ensure_queues(queued={'periodic': 1}, error={'periodic': 1})
        Worker(tiger).run(once=True)

        # Ensure we didn't clear previous executions.
        task = Task.from_id(tiger,
                            'periodic',
                            SCHEDULED,
                            task_id,
                            load_executions=10)
        assert len(task.executions) == 1
コード例 #19
0
ファイル: test_periodic.py プロジェクト: fakegit/tasktiger
    def test_successful_execution_clears_executions_from_retries(self):
        """
        Ensure previous executions from retries are cleared after a successful
        execution.
        """
        sleep_until_next_second()

        # Queue the periodic task.
        self._ensure_queues()
        Worker(tiger).run(once=True)

        # Prepare to execute the periodic task (as retriable failure).
        tiger.connection.set('fail-periodic-task', 'retriable')
        n_total, tasks = Task.tasks_from_queue(tiger, 'periodic', SCHEDULED)
        task_id = tasks[0].id
        time.sleep(1)

        # Queue the periodic task.
        self._ensure_queues(scheduled={'periodic': 1})
        Worker(tiger).run(once=True)

        # Run the failing periodic task.
        self._ensure_queues(queued={'periodic': 1})
        Worker(tiger).run(once=True)

        task = Task.from_id(tiger,
                            'periodic',
                            SCHEDULED,
                            task_id,
                            load_executions=10)
        assert len(task.executions) == 1

        tiger.connection.delete('fail-periodic-task')
        time.sleep(1)

        # Queue the periodic task.
        self._ensure_queues(scheduled={'periodic': 1})
        Worker(tiger).run(once=True)

        # Run the successful periodic task.
        self._ensure_queues(queued={'periodic': 1})
        Worker(tiger).run(once=True)

        # Ensure we cleared any previous executions.
        task = Task.from_id(tiger,
                            'periodic',
                            SCHEDULED,
                            task_id,
                            load_executions=10)
        assert len(task.executions) == 0
コード例 #20
0
ファイル: taskqueue.py プロジェクト: JokerQyou/mobot
def clear_queue():
    '''
    Clears all inactive tasks from queue.
    '''
    for queue, stats in tiger.get_queue_stats().items():
        for state, tasks in stats.items():
            if state == 'error':
                for task in Task.tasks_from_queue(tiger, queue, state)[-1]:
                    task.delete()
                    logging.info('Task %s cleared from queue %s', task, queue)
            elif state == 'scheduled':
                for task in Task.tasks_from_queue(tiger, queue, state)[-1]:
                    task.cancel()
                    logging.info('Task %s cleared from queue %s', task, queue)
コード例 #21
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
 def test_current_tasks(self):
     task1 = Task(self.tiger, verify_current_tasks)
     task1.delay()
     task2 = Task(self.tiger, verify_current_tasks)
     task2.delay()
     Worker(self.tiger).run(once=True)
     assert self.conn.lrange('task_ids', 0, -1) == [task1.id, task2.id]
コード例 #22
0
ファイル: book.py プロジェクト: eclogue/eclogue
def dispatch(book_id, entry, payload):
    print('xxxx', book_id, entry, payload)
    book = Book.find_by_id(book_id)
    if not book:
        return False

    username = payload.get('username')
    run_id = payload.get('req_id') or str(uuid.uuid4())
    params = [book_id, run_id]
    options = payload.get('options')
    if not entry:
        return False
    if type(options) == str:
        args = options.split(' ')
        pb = PlaybookCLI(args)
        pb.init_parser()
        options, args = pb.parser.parse_args(args[1:])
        options, args = pb.post_process_args(options, args)
        options = options.__dict__
        options['entry'] = args
        for i in options['inventory']:
            if not os.path.isfile(i):
                i = os.path.basename(i)
                options['inventory'] = i
                break
    queue_name = 'book_runtime'
    func = run
    task = Task(tiger,
                func=func,
                args=params,
                kwargs=options,
                queue=queue_name,
                unique=True,
                lock=True,
                lock_key=book_id)
    run_record = {
        'book_id': book_id,
        'run_id': run_id,
        'run_by': username,
        'options': options,
        'result': '',
        'state': 'pending',
        'created_at': 1,
        'updated_at': 2,
    }
    result = Perform.insert_one(run_record)
    task.delay()

    return result.inserted_id
コード例 #23
0
    def task_detail(self, queue, state, task_id):
        LIMIT = 1000
        try:
            task = Task.from_id(self.tiger,
                                queue,
                                state,
                                task_id,
                                load_executions=LIMIT)
        except TaskNotFound:
            abort(404)

        executions_dumped = []
        for execution in task.executions:
            traceback = execution.pop('traceback', None)
            executions_dumped.append((json.dumps(execution,
                                                 indent=2,
                                                 sort_keys=True), traceback))

        return self.render(
            'tasktiger_admin/tasktiger_task_detail.html',
            queue=queue,
            state=state,
            task=task,
            task_dumped=json.dumps(task.data, indent=2, sort_keys=True),
            executions_dumped=executions_dumped,
        )
コード例 #24
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
 def test_requeue_expired_task(self):
     """
     Ensure a retriable task ends up in "queued" state if the worker is
     killed prematurely.
     """
     task = Task(self.tiger, sleep_task, retry_on=[JobTimeoutException])
     self._test_expired_task(task, 'queued')
コード例 #25
0
ファイル: task.py プロジェクト: eclogue/eclogue
def retry(_id, state):
    record = db.collection('tasks').find_one({'_id': ObjectId(_id)})
    if not record:
        return jsonify({
            'message': 'task not found',
            'code': 194041
        }), 404

    task_id = record.get('t_id')
    queue = record.get('queue')
    try:
        task = Task.from_id(tiger, queue, state, task_id)
        task.retry()
        db.collection('tasks').update_one({'_id': record['_id']}, {'$set': {
            'updated_at': datetime.now(),
            'state': state,
        }})
        extra = {
            'queue': queue,
            'task_id': task_id,
            'from_state': state,
            'to_state': QUEUED
        }
        logger.info('retry task', extra=extra)
    except TaskNotFound:
        return jsonify({
            'message': 'invalid task',
            'code': 104044
        }), 404

    return jsonify({
        'message': 'ok',
        'code': 0,
    })
コード例 #26
0
ファイル: task.py プロジェクト: eclogue/eclogue
def delete_task(_id, state):
    record = db.collection('tasks').find_one({'t_id': _id})
    if not record:
        return jsonify({
            'message': 'task not found',
            'code': 194041
        }), 404

    task_id = record.get('t_id')
    queue = record.get('queue')
    try:
        task = Task.from_id(tiger, queue, state, task_id)
        task._move(from_state=state)
        db.collection('tasks').update_one({'_id': record['_id']}, {'$set': {
            'updated_at': datetime.now(),
            'state': 'delete',
        }})
        extra = {
            'queue': queue,
            'task_id': task_id,
            'from_state': state,
            'to_state': None
        }
        logger.info('cancel task', extra=extra)
    except TaskNotFound:
        return jsonify({
            'message': 'invalid task',
            'code': 104044
        }), 404

    return jsonify({
        'message': 'ok',
        'code': 0,
    })
コード例 #27
0
ファイル: task.py プロジェクト: eclogue/eclogue
def get_task_info(_id):
    record = db.collection('tasks').find_one({'_id': ObjectId(_id)})
    if not record:
        return jsonify({
            'message': 'record not found',
            'code': 104040
        }), 404

    log = db.collection('logs').find_one({'task_id': str(record.get('_id'))})
    # log = db.collection('logs').find_one({'task_id': '5d6d4e0ae3f7e086eaa30321'})

    record['log'] = log
    job = db.collection('jobs').find_one({'_id': ObjectId(record.get('job_id'))})
    record['job'] = job
    queue = record.get('queue')
    state = record.get('state')
    task_id = record.get('t_id')
    try:
        task = Task.from_id(tiger, queue, state, task_id)
        record['queue_info'] = task.data.copy()
    except TaskNotFound:
        record['queue_info'] = None

    return jsonify({
        'message': 'ok',
        'code': 0,
        'data': record,
    })
コード例 #28
0
 def task_delete(self, queue, state, task_id):
     try:
         task = Task.from_id(self.tiger, queue, state, task_id)
     except TaskNotFound:
         abort(404)
     task.delete()
     return redirect(url_for('.queue_detail', queue=queue, state=state))
コード例 #29
0
ファイル: views.py プロジェクト: closeio/tasktiger-admin
 def task_delete(self, queue, state, task_id):
     try:
         task = Task.from_id(self.tiger, queue, state, task_id)
     except TaskNotFound:
         abort(404)
     task.delete()
     return redirect(url_for('.queue_detail', queue=queue, state=state))
コード例 #30
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
 def test_discard_expired_task(self):
     """
     Ensure a non-retriable task ends up in "error" state if the worker is
     killed prematurely.
     """
     task = Task(self.tiger, sleep_task)
     self._test_expired_task(task, 'error')
コード例 #31
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
    def test_delay_scheduled_3(self):
        task = Task(self.tiger, simple_task, unique=True)
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'default': 1})

        # We can look up a unique task by recreating it.
        task = Task(self.tiger, simple_task, unique=True)
        task.cancel()
        self._ensure_queues()
コード例 #32
0
    def test_periodic_execution_unique_ids_self_correct(self):
        """
        Test that periodic tasks will self-correct unique ids
        """
        # Sleep until the next second
        sleep_until_next_second()

        # generate the ids
        correct_unique_id = gen_unique_id(serialize_func_name(periodic_task),
                                          [], {})
        malformed_unique_id = gen_unique_id(serialize_func_name(periodic_task),
                                            None, None)

        task = Task(tiger, func=periodic_task)

        # patch the id to something slightly wrong
        assert task.id == correct_unique_id
        task._data['id'] = malformed_unique_id
        assert task.id == malformed_unique_id

        # schedule the task
        task.delay()
        self._ensure_queues(queued={'periodic': 1})

        # pull task out of the queue by the malformed id
        task = Task.from_id(tiger, 'periodic', QUEUED, malformed_unique_id)
        assert task is not None

        Worker(tiger).run(once=True)
        self._ensure_queues(scheduled={'periodic': 1})

        # pull task out of the queue by the self-corrected id
        task = Task.from_id(tiger, 'periodic', SCHEDULED, correct_unique_id)
        assert task is not None
コード例 #33
0
    def test_periodic_execution_unique_ids(self):
        """
        Test that periodic tasks generate the same unique ids

        When a periodic task is scheduled initially as part of worker startup
        vs re-scheduled from within python the unique id generated should be
        the same. If they aren't it could result in duplicate tasks.
        """
        # Sleep until the next second
        now = datetime.datetime.utcnow()
        time.sleep(1 - now.microsecond / 10.0**6)

        # After the first worker run, the periodic task will be queued.
        # Note that since periodic tasks register with the Tiger instance, it
        # must be the same instance that was used to decorate the task. We
        # therefore use `tiger` from the tasks module instead of `self.tiger`.
        self._ensure_queues()
        Worker(tiger).run(once=True)
        self._ensure_queues(scheduled={'periodic': 1})
        time.sleep(1)
        Worker(tiger).run(once=True)
        self._ensure_queues(queued={'periodic': 1})

        # generate the expected unique id
        expected_unique_id = gen_unique_id(serialize_func_name(periodic_task),
                                           [], {})

        # pull task out of the queue by id. If found, then the id is correct
        task = Task.from_id(tiger, 'periodic', QUEUED, expected_unique_id)
        assert task is not None

        # execute and reschedule the task
        self._ensure_queues(queued={'periodic': 1})
        Worker(tiger).run(once=True)
        self._ensure_queues(scheduled={'periodic': 1})

        # wait for the task to need to be queued
        time.sleep(1)
        Worker(tiger).run(once=True)
        self._ensure_queues(queued={'periodic': 1})

        # The unique id shouldn't change between executions. Try finding the
        # task by id again
        task = Task.from_id(tiger, 'periodic', QUEUED, expected_unique_id)
        assert task is not None
コード例 #34
0
    def test_task_all_states(self):
        """Test max queue size with tasks in all three states."""

        # Active
        task = Task(self.tiger, sleep_task, queue='a')
        task.delay()
        self._ensure_queues(queued={'a': 1})

        # Start a worker and wait until it starts processing.
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # Kill the worker while it's still processing the task.
        os.kill(worker.pid, signal.SIGKILL)
        self._ensure_queues(active={'a': 1})

        # Scheduled
        self.tiger.delay(
            simple_task,
            queue='a',
            max_queue_size=3,
            when=datetime.timedelta(seconds=10),
        )

        # Queued
        self.tiger.delay(simple_task, queue='a', max_queue_size=3)

        self._ensure_queues(active={'a': 1},
                            queued={'a': 1},
                            scheduled={'a': 1})

        # Should fail to queue task to run immediately
        with pytest.raises(QueueFullException):
            self.tiger.delay(simple_task, queue='a', max_queue_size=3)

        # Should fail to queue task to run in the future
        with pytest.raises(QueueFullException):
            self.tiger.delay(
                simple_task,
                queue='a',
                max_queue_size=3,
                when=datetime.timedelta(seconds=10),
            )
コード例 #35
0
ファイル: views.py プロジェクト: closeio/tasktiger-admin
    def queue_detail(self, queue, state):
        n, tasks = Task.tasks_from_queue(self.tiger, queue, state,
                                         load_executions=1)

        return self.render('tasktiger_admin/tasktiger_queue_detail.html',
            queue=queue,
            state=state,
            n=n,
            tasks=tasks,
        )
コード例 #36
0
    def test_tasks_from_queue_with_executions(self):
        task = self.tiger.delay(exception_task, retry=True)

        # Get two executions in task
        Worker(self.tiger).run(once=True)
        time.sleep(DELAY)
        # Second run (run twice to move from scheduled to queued)
        Worker(self.tiger).run(once=True)
        Worker(self.tiger).run(once=True)

        n, tasks = Task.tasks_from_queue(self.tiger, 'default', 'scheduled',
                                         load_executions=1)
        assert n == 1
        assert len(tasks[0].executions) == 1

        n, tasks = Task.tasks_from_queue(self.tiger, 'default', 'scheduled',
                                         load_executions=10)
        assert n == 1
        assert len(tasks[0].executions) == 2
コード例 #37
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_tasks_from_queue(self):
        task0 = Task(self.tiger, simple_task)
        task1 = Task(self.tiger, exception_task)
        task2 = Task(self.tiger, simple_task, queue='other')

        task0.delay()
        task1.delay()
        task2.delay()

        n, tasks = Task.tasks_from_queue(self.tiger, 'default', 'queued')
        assert n == 2
        assert task0.id == tasks[0].id
        assert task0.func == simple_task
        assert task0.func == tasks[0].func
        assert task0.serialized_func == 'tests.tasks:simple_task'
        assert task0.serialized_func == tasks[0].serialized_func
        assert task0.state == tasks[0].state
        assert task0.state == 'queued'
        assert task0.queue == tasks[0].queue
        assert task0.queue == 'default'
コード例 #38
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
 def test_current_tasks(self):
     task1 = Task(self.tiger, verify_current_tasks)
     task1.delay()
     task2 = Task(self.tiger, verify_current_tasks)
     task2.delay()
     Worker(self.tiger).run(once=True)
     assert self.conn.lrange('task_ids', 0, -1) == [task1.id, task2.id]
コード例 #39
0
    def queue_detail(self, queue, state):
        n, tasks = Task.tasks_from_queue(self.tiger,
                                         queue,
                                         state,
                                         load_executions=1)

        return self.render(
            'tasktiger_admin/tasktiger_queue_detail.html',
            queue=queue,
            state=state,
            n=n,
            tasks=tasks,
        )
コード例 #40
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_delay_scheduled_3(self):
        task = Task(self.tiger, simple_task, unique=True)
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'default': 1})

        # We can look up a unique task by recreating it.
        task = Task(self.tiger, simple_task, unique=True)
        task.cancel()
        self._ensure_queues()
コード例 #41
0
    def test_delay_scheduled_2(self):
        task = Task(self.tiger, simple_task, queue='a')
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'a': 1})

        task_id = task.id

        # We can't look up a non-unique task by recreating it.
        task = Task(self.tiger, simple_task, queue='a')
        pytest.raises(TaskNotFound, task.cancel)

        # We can look up a task by its ID.
        fetch_task = lambda: Task.from_id(self.tiger, 'a', 'scheduled', task_id)

        task = fetch_task()
        task.cancel()
        self._ensure_queues()

        # Task.from_id raises if it doesn't exist.
        pytest.raises(TaskNotFound, fetch_task)
コード例 #42
0
ファイル: test_queue_size.py プロジェクト: closeio/tasktiger
    def test_task_all_states(self):
        """Test max queue size with tasks in all three states."""

        # Active
        task = Task(self.tiger, sleep_task, queue='a')
        task.delay()
        self._ensure_queues(queued={'a': 1})

        # Start a worker and wait until it starts processing.
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # Kill the worker while it's still processing the task.
        os.kill(worker.pid, signal.SIGKILL)
        self._ensure_queues(active={'a': 1})

        # Scheduled
        self.tiger.delay(simple_task, queue='a', max_queue_size=3,
                         when=datetime.timedelta(seconds=10))

        # Queued
        self.tiger.delay(simple_task, queue='a', max_queue_size=3)

        self._ensure_queues(active={'a': 1},
                            queued={'a': 1},
                            scheduled={'a': 1})

        # Should fail to queue task to run immediately
        with pytest.raises(QueueFullException):
            self.tiger.delay(simple_task, queue='a', max_queue_size=3)

        # Should fail to queue task to run in the future
        with pytest.raises(QueueFullException):
            self.tiger.delay(simple_task, queue='a', max_queue_size=3,
                             when=datetime.timedelta(seconds=10))
コード例 #43
0
ファイル: test_base.py プロジェクト: xuru/tasktiger
    def test_delay_scheduled(self):
        task = Task(self.tiger, simple_task, queue='a')
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'a': 1})

        # Test canceling a scheduled task.
        task.cancel()
        self._ensure_queues()

        # Canceling again raises an error
        pytest.raises(TaskNotFound, task.cancel)
コード例 #44
0
    def task_detail(self, queue, state, task_id):
        LIMIT = 1000
        try:
            task = Task.from_id(self.tiger,
                                queue,
                                state,
                                task_id,
                                load_executions=LIMIT)
        except TaskNotFound:
            abort(404)

        executions_dumped = []
        for execution in task.executions:
            traceback = execution.pop('traceback', None)
            execution_integrations = generate_integrations(
                self.integration_config.get('EXECUTION_INTEGRATION_LINKS', []),
                task,
                execution,
            )
            execution_converted = convert_keys_to_datetime(
                execution, ['time_failed', 'time_started'])
            executions_dumped.append((
                json.dumps(
                    execution_converted,
                    indent=2,
                    sort_keys=True,
                    default=str,
                ),
                traceback,
                execution_integrations,
                execution_converted,
            ))

        integrations = generate_integrations(
            self.integration_config.get('INTEGRATION_LINKS', []), task, None)

        return self.render(
            'tasktiger_admin/tasktiger_task_detail.html',
            queue=queue,
            state=state,
            task=task,
            task_data=task.data,
            task_data_dumped=json.dumps(task.data, indent=2, sort_keys=True),
            executions_dumped=reversed(executions_dumped),
            integrations=integrations,
        )
コード例 #45
0
ファイル: views.py プロジェクト: closeio/tasktiger-admin
    def task_detail(self, queue, state, task_id):
        LIMIT = 1000
        try:
            task = Task.from_id(self.tiger, queue, state, task_id,
                                load_executions=LIMIT)
        except TaskNotFound:
            abort(404)

        executions_dumped = []
        for execution in task.executions:
            traceback = execution.pop('traceback', None)
            executions_dumped.append((
                json.dumps(execution, indent=2, sort_keys=True),
                traceback)
            )

        return self.render('tasktiger_admin/tasktiger_task_detail.html',
            queue=queue,
            state=state,
            task=task,
            task_dumped=json.dumps(task.data, indent=2, sort_keys=True),
            executions_dumped=executions_dumped,
        )
コード例 #46
0
ファイル: views.py プロジェクト: closeio/tasktiger-admin
 def task_retry_multiple(self, queue, state):
     LIMIT = 50
     n, tasks = Task.tasks_from_queue(self.tiger, queue, state, limit=LIMIT)
     for task in tasks:
         task.retry()
     return redirect(url_for('.queue_detail', queue=queue, state=state))
コード例 #47
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
    def test_eager(self):
        self.tiger.config['ALWAYS_EAGER'] = True

        # Ensure task is immediately executed.
        task = Task(self.tiger, simple_task)
        task.delay()
        self._ensure_queues()

        # Ensure task is immediately executed.
        task = Task(self.tiger, exception_task)
        pytest.raises(Exception, task.delay)
        self._ensure_queues()

        # Even when we specify "when" in the past
        task = Task(self.tiger, simple_task)
        task.delay(when=datetime.timedelta(seconds=-5))
        self._ensure_queues()

        # or with a zero timedelta.
        task = Task(self.tiger, simple_task)
        task.delay(when=datetime.timedelta(seconds=0))
        self._ensure_queues()

        # Ensure there is an exception if we can't serialize the task.
        task = Task(self.tiger, decorated_task,
                    args=[object()])
        pytest.raises(TypeError, task.delay)
        self._ensure_queues()

        # Ensure task is not executed if it's scheduled in the future.
        task = Task(self.tiger, simple_task)
        task.delay(when=datetime.timedelta(seconds=5))
        self._ensure_queues(scheduled={'default': 1})
コード例 #48
0
ファイル: test_base.py プロジェクト: closeio/tasktiger
 def test_current_task(self):
     task = Task(self.tiger, verify_current_task)
     task.delay()
     Worker(self.tiger).run(once=True)
     assert not self.conn.exists('runtime_error')
     assert self.conn.get('task_id') == task.id
コード例 #49
0
ファイル: test_workers.py プロジェクト: closeio/tasktiger
    def test_single_worker_queue(self):
        """
        Test Single Worker Queue.

        Single worker queues are the same as running with MAX_WORKERS_PER_QUEUE
        set to 1.
        """

        # Queue two tasks
        task = Task(self.tiger, long_task_ok, queue='swq')
        task.delay()
        task = Task(self.tiger, long_task_ok, queue='swq')
        task.delay()
        self._ensure_queues(queued={'swq': 2})

        # Start a worker and wait until it starts processing.
        # It should start processing one task and hold a lock on the queue
        worker = Process(target=external_worker)
        worker.start()

        # Wait up to 2 seconds for external task to start
        result = self.conn.blpop('long_task_ok', 2)
        assert result[1] == '1'

        # This worker should fail to get the queue lock and exit immediately
        Worker(self.tiger).run(once=True, force_once=True)
        self._ensure_queues(active={'swq': 1}, queued={'swq': 1})
        # Wait for external worker
        worker.join()

        # Retest using a non-single worker queue
        # Queue two tasks
        task = Task(self.tiger, long_task_ok, queue='not_swq')
        task.delay()
        task = Task(self.tiger, long_task_ok, queue='not_swq')
        task.delay()
        self._ensure_queues(queued={'not_swq': 2})

        # Start a worker and wait until it starts processing.
        # It should start processing one task
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # This worker should process the second task
        Worker(self.tiger).run(once=True, force_once=True)

        # Queues should be empty
        self._ensure_queues()

        worker.join()