Пример #1
0
    def test_dependencies_finished_watches_job(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello)

        dependent_job = Job.create(func=fixtures.say_hello)
        dependent_job._dependency_ids = [dependency_job.id]
        dependent_job.register_dependency()

        with self.testconn.pipeline() as pipeline:
            dependent_job.dependencies_are_met(pipeline=pipeline, )

            dependency_job.set_status(JobStatus.FAILED, pipeline=self.testconn)
            pipeline.multi()

            with self.assertRaises(WatchError):
                pipeline.touch(Job.key_for(dependent_job.id))
                pipeline.execute()
Пример #2
0
    def test_fetch_dependencies_watches(self):
        queue = Queue(connection=self.testconn)
        dependency_job = queue.enqueue(fixtures.say_hello)
        dependent_job = Job.create(func=fixtures.say_hello,
                                   depends_on=dependency_job)

        dependent_job.register_dependency()
        dependent_job.save()

        with self.testconn.pipeline() as pipeline:
            dependent_job.fetch_dependencies(watch=True, pipeline=pipeline)

            pipeline.multi()

            with self.assertRaises(WatchError):
                self.testconn.set(Job.key_for(dependency_job.id),
                                  'somethingelsehappened')
                pipeline.touch(dependency_job.id)
                pipeline.execute()
Пример #3
0
    def test_key_for_should_return_prefixed_job_id(self):
        """test redis key to store job hash under"""
        job_id = 'random'
        key = Job.key_for(job_id=job_id)

        assert key == (Job.redis_job_namespace_prefix + job_id).encode('utf-8')
Пример #4
0
def create_update_cache_job(queue, instance, keys, decrement=1):
    queue.connection.sadd(queue.redis_queues_keys, queue.key)
    job_wrapper = JobWrapper.create(update_cache_job,
                                    instance=instance,
                                    keys=keys,
                                    decrement=decrement,
                                    connection=queue.connection,
                                    origin=queue.name,
                                    timeout=queue.DEFAULT_TIMEOUT)
    last_job_key = instance.get_last_job_key()

    with queue.connection.pipeline() as pipe:
        while True:
            try:
                pipe.watch(last_job_key)
                last_job_id = queue.connection.get(last_job_key)
                depends_on_wrapper = None
                if last_job_id is not None:
                    pipe.watch(Job.key_for(last_job_id),
                               JobWrapper.params_key_for(last_job_id))
                    depends_on_wrapper = JobWrapper(last_job_id,
                                                    queue.connection)

                pipe.multi()

                depends_on_status = None
                if depends_on_wrapper is not None:
                    depends_on = depends_on_wrapper.job
                    depends_on_status = depends_on.get_status()

                if depends_on_status is None:
                    # enqueue without dependencies
                    pipe.set(last_job_key, job_wrapper.id)
                    job_wrapper.save_enqueued(pipe)
                    pipe.execute()
                    break

                if depends_on_status in [JobStatus.QUEUED, JobStatus.DEFERRED]:
                    new_job_params = \
                        depends_on_wrapper.merge_job_params(keys, decrement,
                                                            pipeline=pipe)
                    pipe.execute()
                    msg = 'SKIP %s (decrement=%s, job_status=%s, job_id=%s)'
                    msg = msg % (last_job_key, new_job_params[1],
                                 depends_on_status, last_job_id)
                    logger.debug(msg)
                    # skip this job
                    return None

                pipe.set(last_job_key, job_wrapper.id)

                if depends_on_status not in [JobStatus.FINISHED]:
                    # add job as a dependent
                    job = job_wrapper.save_deferred(last_job_id, pipe)
                    pipe.execute()
                    logger.debug('ADD AS DEPENDENT for %s (job_id=%s) OF %s' %
                                 (last_job_key, job.id, last_job_id))
                    return job

                job_wrapper.save_enqueued(pipe)
                pipe.execute()
                break
            except WatchError:
                logger.debug('RETRY after WatchError for %s' % last_job_key)
                continue
    logger.debug('ENQUEUE %s (job_id=%s)' % (last_job_key, job_wrapper.id))

    queue.push_job_id(job_wrapper.id)
Пример #5
0
def create_update_cache_job(queue, instance, keys, decrement=1):
    queue.connection.sadd(queue.redis_queues_keys, queue.key)
    job_wrapper = JobWrapper.create(update_cache_job,
                                    instance=instance,
                                    keys=keys,
                                    decrement=decrement,
                                    connection=queue.connection,
                                    origin=queue.name,
                                    timeout=queue.DEFAULT_TIMEOUT)
    last_job_key = instance.get_last_job_key()

    with queue.connection.pipeline() as pipe:
        while True:
            try:
                pipe.watch(last_job_key)
                last_job_id = queue.connection.get(last_job_key)
                depends_on_wrapper = None
                if last_job_id is not None:
                    pipe.watch(Job.key_for(last_job_id),
                               JobWrapper.params_key_for(last_job_id))
                    depends_on_wrapper = JobWrapper(last_job_id, queue.connection)

                pipe.multi()

                depends_on_status = None
                if depends_on_wrapper is not None:
                    depends_on = depends_on_wrapper.job
                    depends_on_status = depends_on.get_status()

                if depends_on_status is None:
                    # enqueue without dependencies
                    pipe.set(last_job_key, job_wrapper.id)
                    job_wrapper.save_enqueued(pipe)
                    pipe.execute()
                    break

                if depends_on_status in [JobStatus.QUEUED,
                                         JobStatus.DEFERRED]:
                    new_job_params = \
                        depends_on_wrapper.merge_job_params(keys, decrement,
                                                            pipeline=pipe)
                    pipe.execute()
                    msg = 'SKIP %s (decrement=%s, job_status=%s, job_id=%s)'
                    msg = msg % (last_job_key, new_job_params[1],
                                 depends_on_status, last_job_id)
                    logger.debug(msg)
                    # skip this job
                    return None

                pipe.set(last_job_key, job_wrapper.id)

                if depends_on_status not in [JobStatus.FINISHED]:
                    # add job as a dependent
                    job = job_wrapper.save_deferred(last_job_id, pipe)
                    pipe.execute()
                    logger.debug('ADD AS DEPENDENT for %s (job_id=%s) OF %s' %
                                 (last_job_key, job.id, last_job_id))
                    return job

                job_wrapper.save_enqueued(pipe)
                pipe.execute()
                break
            except WatchError:
                logger.debug('RETRY after WatchError for %s' % last_job_key)
                continue
    logger.debug('ENQUEUE %s (job_id=%s)' % (last_job_key, job_wrapper.id))

    queue.push_job_id(job_wrapper.id)
Пример #6
0
    def test_key_for_should_return_prefixed_job_id(self):
        """test redis key to store job hash under"""
        job_id = 'random'
        key = Job.key_for(job_id=job_id)

        assert key == (Job.redis_job_namespace_prefix + job_id).encode('utf-8')