Example #1
0
    def test_eager(self):
        self.tiger.config['ALWAYS_EAGER'] = True

        # Ensure task is immediately executed.
        task = Task(self.tiger, simple_task)
        task.delay()
        self._ensure_queues()

        # Ensure task is immediately executed.
        task = Task(self.tiger, exception_task)
        pytest.raises(Exception, task.delay)
        self._ensure_queues()

        # Even when we specify "when" in the past.
        task = Task(self.tiger, simple_task)
        task.delay(when=datetime.timedelta(seconds=-5))
        self._ensure_queues()

        # Ensure there is an exception if we can't serialize the task.
        task = Task(self.tiger, decorated_task, args=[object()])
        pytest.raises(TypeError, task.delay)
        self._ensure_queues()

        # Ensure task is not executed if it's scheduled in the future.
        task = Task(self.tiger, simple_task)
        task.delay(when=datetime.timedelta(seconds=5))
        self._ensure_queues(scheduled={'default': 1})
Example #2
0
 def test_current_tasks(self):
     task1 = Task(self.tiger, verify_current_tasks)
     task1.delay()
     task2 = Task(self.tiger, verify_current_tasks)
     task2.delay()
     Worker(self.tiger).run(once=True)
     assert self.conn.lrange('task_ids', 0, -1) == [task1.id, task2.id]
Example #3
0
def message(**kwargs):
    refs = ['seconds', 'minutes', 'hours', 'days']
    ref = ''
    val = ''
    text = ''
    player = ''
    command = ''
    if kwargs is not None:
        for key, value in kwargs.iteritems():
            if key in refs:
                ref = key
                val = value
            if key == 'text':
                text = value
            if key == 'command':
                command = value
            if key == 'player':
                player = value
    if text != '' and player != '':
        task = Task(tiger, push, ([text, player]))
        print task.delay(when=datetime.timedelta(**{ref: val}))
        print 'task player... ', task._queue, task._state, task._ts, task.id
    elif text != '':
        task = Task(tiger, push, ([text]))
        print task.delay(when=datetime.timedelta(**{ref: val}))
        print 'task... ', task._queue, task._state, task._ts
    if command != '':
        task = Task(tiger, execute, ([command]))
        print task.delay(when=datetime.timedelta(**{ref: val}))
Example #4
0
    def test_single_worker_queue(self):
        """
        Test Single Worker Queue.

        Single worker queues are the same as running with MAX_WORKERS_PER_QUEUE
        set to 1.
        """

        # Queue two tasks
        task = Task(self.tiger, long_task_ok, queue='swq')
        task.delay()
        task = Task(self.tiger, long_task_ok, queue='swq')
        task.delay()
        self._ensure_queues(queued={'swq': 2})

        # Start a worker and wait until it starts processing.
        # It should start processing one task and hold a lock on the queue
        worker = Process(target=external_worker)
        worker.start()

        # Wait for task to start
        wait_for_long_task()

        # This worker should fail to get the queue lock and exit immediately
        Worker(self.tiger).run(once=True, force_once=True)
        self._ensure_queues(active={'swq': 1}, queued={'swq': 1})
        # Wait for external worker
        worker.join()

        # Clear out second task
        Worker(self.tiger).run(once=True, force_once=True)
        self.conn.delete('long_task_ok')

        # Retest using a non-single worker queue
        # Queue two tasks
        task = Task(self.tiger, long_task_ok, queue='not_swq')
        task.delay()
        task = Task(self.tiger, long_task_ok, queue='not_swq')
        task.delay()
        self._ensure_queues(queued={'not_swq': 2})

        # Start a worker and wait until it starts processing.
        # It should start processing one task
        worker = Process(target=external_worker)
        worker.start()

        # Wait for task to start processing
        wait_for_long_task()

        # This worker should process the second task
        Worker(self.tiger).run(once=True, force_once=True)

        # Queues should be empty since the first task will have to
        # have finished before the second task finishes.
        self._ensure_queues()

        worker.join()
Example #5
0
    def test_delay_scheduled_3(self):
        task = Task(self.tiger, simple_task, unique=True)
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'default': 1})

        # We can look up a unique task by recreating it.
        task = Task(self.tiger, simple_task, unique=True)
        task.cancel()
        self._ensure_queues()
Example #6
0
    def test_single_worker_queue(self):
        """
        Test Single Worker Queue.

        Single worker queues are the same as running with MAX_WORKERS_PER_QUEUE
        set to 1.
        """

        # Queue two tasks
        task = Task(self.tiger, long_task_ok, queue='swq')
        task.delay()
        task = Task(self.tiger, long_task_ok, queue='swq')
        task.delay()
        self._ensure_queues(queued={'swq': 2})

        # Start a worker and wait until it starts processing.
        # It should start processing one task and hold a lock on the queue
        worker = Process(target=external_worker)
        worker.start()

        # Wait up to 2 seconds for external task to start
        result = self.conn.blpop('long_task_ok', 2)
        assert result[1] == '1'

        # This worker should fail to get the queue lock and exit immediately
        Worker(self.tiger).run(once=True, force_once=True)
        self._ensure_queues(active={'swq': 1}, queued={'swq': 1})
        # Wait for external worker
        worker.join()

        # Retest using a non-single worker queue
        # Queue two tasks
        task = Task(self.tiger, long_task_ok, queue='not_swq')
        task.delay()
        task = Task(self.tiger, long_task_ok, queue='not_swq')
        task.delay()
        self._ensure_queues(queued={'not_swq': 2})

        # Start a worker and wait until it starts processing.
        # It should start processing one task
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # This worker should process the second task
        Worker(self.tiger).run(once=True, force_once=True)

        # Queues should be empty
        self._ensure_queues()

        worker.join()
Example #7
0
    def test_max_workers(self):
        """Test Single Worker Queue."""

        # Queue three tasks
        for i in range(0, 3):
            task = Task(self.tiger, long_task_ok, queue='a')
            task.delay()
        self._ensure_queues(queued={'a': 3})

        # Start two workers and wait until they start processing.
        worker1 = Process(target=external_worker,
                          kwargs={'max_workers_per_queue': 2})
        worker2 = Process(target=external_worker,
                          kwargs={'max_workers_per_queue': 2})
        worker1.start()
        worker2.start()

        # Wait for both tasks to start
        wait_for_long_task()
        wait_for_long_task()

        # Verify they both are active
        self._ensure_queues(active={'a': 2}, queued={'a': 1})

        # This worker should fail to get the queue lock and exit immediately
        worker = Worker(self.tiger)
        worker.max_workers_per_queue = 2
        worker.run(once=True, force_once=True)
        self._ensure_queues(active={'a': 2}, queued={'a': 1})
        # Wait for external workers
        worker1.join()
        worker2.join()
Example #8
0
 def test_discard_expired_task(self):
     """
     Ensure a non-retriable task ends up in "error" state if the worker is
     killed prematurely.
     """
     task = Task(self.tiger, sleep_task)
     self._test_expired_task(task, 'error')
Example #9
0
def dispatch(payload):
    hosts = payload.get('inventory')
    tasks = payload.get('tasks')
    if not hosts or not tasks:
        return None
    uid = md5(str(json.dumps(payload)))
    username = payload.get('username')
    run_id = payload.get('req_id') or str(uuid.uuid4())
    params = [run_id, payload]
    queue_name = 'book_runtime'
    func = run
    task = Task(tiger,
                func=func,
                args=params,
                queue=queue_name,
                unique=True,
                lock=True,
                lock_key=uid)
    run_record = {
        'uid': uid,
        'run_id': run_id,
        'run_by': username,
        'options': payload,
        'result': '',
        'state': 'pending',
        'created_at': time.time(),
        'updated_at': time.time(),
    }
    result = Perform.insert_one(run_record)
    task.delay()

    return result.inserted_id
Example #10
0
 def test_requeue_expired_task(self):
     """
     Ensure a retriable task ends up in "queued" state if the worker is
     killed prematurely.
     """
     task = Task(self.tiger, sleep_task, retry_on=[JobTimeoutException])
     self._test_expired_task(task, 'queued')
Example #11
0
    def test_queue_system_lock(self):
        """Test queue system lock."""

        with FreezeTime(datetime.datetime(2014, 1, 1)):
            # Queue three tasks
            for i in range(0, 3):
                task = Task(self.tiger, long_task_ok, queue='a')
                task.delay()
            self._ensure_queues(queued={'a': 3})

            # Ensure we can process one
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 2})

            # Set system lock so no processing should occur for 10 seconds
            self.tiger.set_queue_system_lock('a', 10)

            lock_timeout = self.tiger.get_queue_system_lock('a')
            assert lock_timeout == time.time() + 10

        # Confirm tasks don't get processed within the system lock timeout
        with FreezeTime(datetime.datetime(2014, 1, 1, 0, 0, 9)):
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 2})

        # 10 seconds in the future the lock should have expired
        with FreezeTime(datetime.datetime(2014, 1, 1, 0, 0, 10)):
            worker = Worker(self.tiger)
            worker.max_workers_per_queue = 2
            worker.run(once=True, force_once=True)
            self._ensure_queues(queued={'a': 1})
Example #12
0
def run_schedule_task(_id, request_id, username, **kwargs):
    db = Mongo()
    params = (_id, request_id, username)
    queue_name = get_queue_by_job(_id)
    job = db.collection('jobs').find_one({'_id': ObjectId(_id)})
    func = run_playbook_task
    if job.get('type') == 'adhoc':
        func = run_adhoc_task

    task = Task(tiger,
                func=func,
                args=params,
                kwargs=kwargs,
                queue=queue_name,
                unique=False,
                lock=True,
                lock_key=_id)
    task_record = {
        'job_id': _id,
        'state': QUEUED,
        'type': 'schedule',
        'ansible': job.get('type'),
        'queue': queue_name,
        'result': '',
        'request_id': request_id,
        't_id': task.id,
        'created_at': time(),
    }

    db.collection('tasks').insert_one(task_record)
    task.delay()
Example #13
0
    def test_current_tasks_eager(self):
        self.tiger.config['ALWAYS_EAGER'] = True

        task = Task(self.tiger, verify_current_tasks)
        task.delay()
        assert not self.conn.exists('runtime_error')
        assert self.conn.lrange('task_ids', 0, -1) == [task.id]
Example #14
0
    def test_periodic_execution_unique_ids_self_correct(self):
        """
        Test that periodic tasks will self-correct unique ids
        """
        # Sleep until the next second
        sleep_until_next_second()

        # generate the ids
        correct_unique_id = gen_unique_id(serialize_func_name(periodic_task),
                                          [], {})
        malformed_unique_id = gen_unique_id(serialize_func_name(periodic_task),
                                            None, None)

        task = Task(tiger, func=periodic_task)

        # patch the id to something slightly wrong
        assert task.id == correct_unique_id
        task._data['id'] = malformed_unique_id
        assert task.id == malformed_unique_id

        # schedule the task
        task.delay()
        self._ensure_queues(queued={'periodic': 1})

        # pull task out of the queue by the malformed id
        task = Task.from_id(tiger, 'periodic', QUEUED, malformed_unique_id)
        assert task is not None

        Worker(tiger).run(once=True)
        self._ensure_queues(scheduled={'periodic': 1})

        # pull task out of the queue by the self-corrected id
        task = Task.from_id(tiger, 'periodic', SCHEDULED, correct_unique_id)
        assert task is not None
Example #15
0
    def test_current_task_eager(self):
        self.tiger.config['ALWAYS_EAGER'] = True

        task = Task(self.tiger, verify_current_task)
        task.delay()
        assert not self.conn.exists('runtime_error')
        assert self.conn.get('task_id') == task.id
Example #16
0
    def test_delay(self):
        task = Task(self.tiger, simple_task)
        self._ensure_queues()
        task.delay()
        self._ensure_queues(queued={'default': 1})

        # Canceling only works for scheduled tasks.
        pytest.raises(TaskNotFound, task.cancel)
Example #17
0
def run_job(_id, history_id=None, **kwargs):
    db = Mongo()
    record = Job.find_by_id(_id)
    if not record:
        return False

    request_id = str(current_request_id())
    username = None if not login_user else login_user.get('username')
    params = (_id, request_id, username, history_id)
    queue_name = get_queue_by_job(_id)
    extra = record.get('extra')
    template = record.get('template')
    schedule = extra.get('schedule')
    ansible_type = record.get('type')
    if template.get('run_type') == 'schedule':
        existed = db.collection('scheduler_jobs').find_one(
            {'_id': record['_id']})
        if existed:
            return False

        scheduler.add_job(func=run_schedule_task,
                          trigger='cron',
                          args=params,
                          coalesce=True,
                          kwargs=kwargs,
                          id=str(record.get('_id')),
                          max_instances=1,
                          name=record.get('name'),
                          **schedule)
        return True
    else:
        func = run_playbook_task if ansible_type != 'adhoc' else run_adhoc_task
        task = Task(tiger,
                    func=func,
                    args=params,
                    kwargs=kwargs,
                    queue=queue_name,
                    unique=True,
                    lock=True,
                    lock_key=_id)

        task_record = {
            'job_id': _id,
            'type': 'trigger',
            'ansible': ansible_type,
            'state': QUEUED,
            'queue': queue_name,
            'result': '',
            'request_id': request_id,
            't_id': task.id,
            'created_at': time(),
            'kwargs': kwargs,
        }

        result = db.collection('tasks').insert_one(task_record)
        task.delay()

        return result.inserted_id
Example #18
0
    def test_task_disappears(self):
        """
        Ensure that a task object that disappears while the task is processing
        is handled properly. This could happen when a worker processes a task,
        then hangs for a long time, causing another worker to pick up and finish
        the task. Then, when the original worker resumes, the task object will
        be gone. Make sure we log a "not found" error and move on.
        """

        task = Task(self.tiger, sleep_task, kwargs={'delay': 2 * DELAY})
        task.delay()
        self._ensure_queues(queued={'default': 1})

        # Start a worker and wait until it starts processing.
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # Remove the task object while the task is processing.
        assert self.conn.delete('t:task:{}'.format(task.id)) == 1

        # Kill the worker while it's still processing the task.
        os.kill(worker.pid, signal.SIGKILL)

        # _ensure_queues() breaks here because it can't find the task
        assert self.conn.scard('t:queued') == 0
        assert self.conn.scard('t:active') == 1
        assert self.conn.scard('t:error') == 0
        assert self.conn.scard('t:scheduled') == 0

        # Capture logger
        errors = []

        def fake_error(msg):
            errors.append(msg)

        with Patch(self.tiger.log._logger, 'error', fake_error):
            # Since ACTIVE_TASK_UPDATE_TIMEOUT hasn't elapsed yet, re-running
            # the worker at this time won't change anything. (run twice to move
            # from scheduled to queued)
            Worker(self.tiger).run(once=True)
            Worker(self.tiger).run(once=True)

            assert len(errors) == 0
            assert self.conn.scard('t:queued') == 0
            assert self.conn.scard('t:active') == 1
            assert self.conn.scard('t:error') == 0
            assert self.conn.scard('t:scheduled') == 0

            # After waiting and re-running the worker, queues will clear.
            time.sleep(2 * DELAY)
            Worker(self.tiger).run(once=True)
            Worker(self.tiger).run(once=True)

            self._ensure_queues()
            assert len(errors) == 1
            assert "not found" in errors[0]
Example #19
0
    def test_single_worker_queue(self):
        """Test Single Worker Queue."""

        # Queue two tasks
        task = Task(self.tiger, long_task_ok, queue='swq')
        task.delay()
        task = Task(self.tiger, long_task_ok, queue='swq')
        task.delay()
        self._ensure_queues(queued={'swq': 2})

        # Start a worker and wait until it starts processing.
        # It should start processing one task and hold a lock on the queue
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # This worker should fail to get the queue lock and exit immediately
        Worker(tiger).run(once=True, force_once=True)
        self._ensure_queues(active={'swq': 1}, queued={'swq': 1})
        # Wait for external worker
        worker.join()

        # Retest using a non-single worker queue
        # Queue two tasks
        task = Task(self.tiger, long_task_ok, queue='not_swq')
        task.delay()
        task = Task(self.tiger, long_task_ok, queue='not_swq')
        task.delay()
        self._ensure_queues(queued={'not_swq': 2})

        # Start a worker and wait until it starts processing.
        # It should start processing one task
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # This worker should process the second task
        Worker(tiger).run(once=True, force_once=True)

        # Queues should be empty
        self._ensure_queues()

        worker.join()
Example #20
0
    def test_tasks_from_queue(self):
        task0 = Task(self.tiger, simple_task)
        task1 = Task(self.tiger, exception_task)
        task2 = Task(self.tiger, simple_task, queue='other')

        task0.delay()
        task1.delay()
        task2.delay()

        n, tasks = Task.tasks_from_queue(self.tiger, 'default', 'queued')
        assert n == 2
        assert task0.id == tasks[0].id
        assert task0.func == simple_task
        assert task0.func == tasks[0].func
        assert task0.serialized_func == 'tests.tasks:simple_task'
        assert task0.serialized_func == tasks[0].serialized_func
        assert task0.state == tasks[0].state
        assert task0.state == 'queued'
        assert task0.queue == tasks[0].queue
        assert task0.queue == 'default'
Example #21
0
    def test_delay_scheduled_2(self):
        task = Task(self.tiger, simple_task, queue='a')
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'a': 1})

        task_id = task.id

        # We can't look up a non-unique task by recreating it.
        task = Task(self.tiger, simple_task, queue='a')
        pytest.raises(TaskNotFound, task.cancel)

        # We can look up a task by its ID.
        fetch_task = lambda: Task.from_id(self.tiger, 'a', 'scheduled', task_id)

        task = fetch_task()
        task.cancel()
        self._ensure_queues()

        # Task.from_id raises if it doesn't exist.
        pytest.raises(TaskNotFound, fetch_task)
Example #22
0
    def test_delay_scheduled(self):
        task = Task(self.tiger, simple_task, queue='a')
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'a': 1})

        # Test canceling a scheduled task.
        task.cancel()
        self._ensure_queues()

        # Canceling again raises an error
        pytest.raises(TaskNotFound, task.cancel)
Example #23
0
    def test_update_scheduled_time(self):
        task = Task(self.tiger, simple_task, unique=True)
        task.delay(when=datetime.timedelta(minutes=5))
        self._ensure_queues(scheduled={'default': 1})
        old_score = self.conn.zscore('t:scheduled:default', task.id)

        task.update_scheduled_time(when=datetime.timedelta(minutes=6))
        self._ensure_queues(scheduled={'default': 1})
        new_score = self.conn.zscore('t:scheduled:default', task.id)

        # The difference can be slightly over 60 due to processing time, but
        # shouldn't be much higher.
        assert 60 <= new_score - old_score < 61
Example #24
0
def dispatch(book_id, entry, payload):
    print('xxxx', book_id, entry, payload)
    book = Book.find_by_id(book_id)
    if not book:
        return False

    username = payload.get('username')
    run_id = payload.get('req_id') or str(uuid.uuid4())
    params = [book_id, run_id]
    options = payload.get('options')
    if not entry:
        return False
    if type(options) == str:
        args = options.split(' ')
        pb = PlaybookCLI(args)
        pb.init_parser()
        options, args = pb.parser.parse_args(args[1:])
        options, args = pb.post_process_args(options, args)
        options = options.__dict__
        options['entry'] = args
        for i in options['inventory']:
            if not os.path.isfile(i):
                i = os.path.basename(i)
                options['inventory'] = i
                break
    queue_name = 'book_runtime'
    func = run
    task = Task(tiger,
                func=func,
                args=params,
                kwargs=options,
                queue=queue_name,
                unique=True,
                lock=True,
                lock_key=book_id)
    run_record = {
        'book_id': book_id,
        'run_id': run_id,
        'run_by': username,
        'options': options,
        'result': '',
        'state': 'pending',
        'created_at': 1,
        'updated_at': 2,
    }
    result = Perform.insert_one(run_record)
    task.delay()

    return result.inserted_id
Example #25
0
    def test_task_all_states(self):
        """Test max queue size with tasks in all three states."""

        # Active
        task = Task(self.tiger, sleep_task, queue='a')
        task.delay()
        self._ensure_queues(queued={'a': 1})

        # Start a worker and wait until it starts processing.
        worker = Process(target=external_worker)
        worker.start()
        time.sleep(DELAY)

        # Kill the worker while it's still processing the task.
        os.kill(worker.pid, signal.SIGKILL)
        self._ensure_queues(active={'a': 1})

        # Scheduled
        self.tiger.delay(
            simple_task,
            queue='a',
            max_queue_size=3,
            when=datetime.timedelta(seconds=10),
        )

        # Queued
        self.tiger.delay(simple_task, queue='a', max_queue_size=3)

        self._ensure_queues(active={'a': 1},
                            queued={'a': 1},
                            scheduled={'a': 1})

        # Should fail to queue task to run immediately
        with pytest.raises(QueueFullException):
            self.tiger.delay(simple_task, queue='a', max_queue_size=3)

        # Should fail to queue task to run in the future
        with pytest.raises(QueueFullException):
            self.tiger.delay(
                simple_task,
                queue='a',
                max_queue_size=3,
                when=datetime.timedelta(seconds=10),
            )
Example #26
0
 def test_task(self):
     task = Task(self.tiger, verify_tasktiger_instance)
     task.delay()
     Worker(self.tiger).run(once=True)
     self._ensure_queues()
Example #27
0
 def test_eager_task(self):
     self.tiger.config['ALWAYS_EAGER'] = True
     task = Task(self.tiger, simple_task, runner_class=MyRunnerClass)
     assert task.delay() == 123
     self._ensure_queues()
Example #28
0
 def test_eager(self):
     self.tiger.config['ALWAYS_EAGER'] = True
     task = Task(self.tiger, verify_tasktiger_instance)
     task.delay()
Example #29
0
 def test_execute(self):
     task = Task(self.tiger, exception_task)
     pytest.raises(Exception, task.execute)
Example #30
0
 def test_current_task(self):
     task = Task(self.tiger, verify_current_task)
     task.delay()
     Worker(self.tiger).run(once=True)
     assert not self.conn.exists('runtime_error')
     assert self.conn.get('task_id') == task.id