コード例 #1
0
ファイル: test_job.py プロジェクト: SkyLothar/rq
    def test_get_call_string_unicode(self):
        """test call string with unicode keyword arguments"""
        queue = Queue(connection=self.testconn)

        job = queue.enqueue(fixtures.echo, arg_with_unicode=fixtures.UnicodeStringObject())
        self.assertIsNotNone(job.get_call_string())
        job.perform()
コード例 #2
0
ファイル: enqueuer.py プロジェクト: biicode/bii-server
    def enqueue_job(self, *args):
        if not BII_LOG_TO_REDIS:
            logger.debug('Skipping logging due to config')
            return
        global POOL

        if not self.async_process:  # Call the method now!
            import importlib
            module_name = ".".join(self.worker.split(".")[0:-1])
            themodule = importlib.import_module(module_name)
            call_method = getattr(themodule, self.worker.split(".")[-1])
            call_method(*args)
        try:
            priority = Priority(self.priority)
            conn = self.connection or get_redis_connection()
            q = Queue(priority, connection=conn)
            return q.enqueue_call(self.worker, args=args,
                                  timeout=self.timeout, result_ttl=self.result_ttl)
        # NOTE: this rare way to call enqueue its needed, look at the code in queue module
        except ConnectionError as e:
            logger.warn("Error connecting redis, reconnecting...")
            raise e
        except Exception as e:
            logger.warn("Error enqueuing: %s" % str(e))
            tb = traceback.format_exc()
            logger.warn(tb)
            raise e
コード例 #3
0
def bulk_invoke(func, args, nargs):
    """Bulk invoke a function via queues

    Uses internal implementation details of rq.
    """
    # for comparison, simplest thing that works
    # for i in nargs:
    #    argv = list(args)
    #    argv.append(i)
    #    func.delay(*argv)

    # some variances between cpy and pypy, sniff detect
    for closure in func.delay.func_closure:
        if getattr(closure.cell_contents, 'queue', None):
            ctx = closure.cell_contents
            break
    q = Queue(ctx.queue, connection=connection)
    argv = list(args)
    argv.append(None)
    job = Job.create(
        func, args=argv, connection=connection,
        description="bucket-%s" % func.func_name,
        origin=q.name, status=JobStatus.QUEUED, timeout=ctx.timeout,
        result_ttl=0, ttl=ctx.ttl)

    for n in chunks(nargs, 100):
        job.created_at = datetime.utcnow()
        with connection.pipeline() as pipe:
            for s in n:
                argv[-1] = s
                job._id = unicode(uuid4())
                job.args = argv
                q.enqueue_job(job, pipeline=pipe)
            pipe.execute()
コード例 #4
0
ファイル: test_job.py プロジェクト: friedcell/rq
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     q.enqueue(fixtures.access_self)  # access_self calls get_current_job() and asserts
     w = Worker([q])
     w.work(burst=True)
     assert get_failed_queue(self.testconn).count == 0
コード例 #5
0
ファイル: test_job.py プロジェクト: friedcell/rq
 def test_create_and_cancel_job(self):
     """test creating and using cancel_job deletes job properly"""
     queue = Queue(connection=self.testconn)
     job = queue.enqueue(fixtures.say_hello)
     self.assertEqual(1, len(queue.get_jobs()))
     cancel_job(job.id)
     self.assertEqual(0, len(queue.get_jobs()))
コード例 #6
0
ファイル: test_registry.py プロジェクト: qq18436558/rq
    def test_job_execution(self):
        """Job is removed from StartedJobRegistry after execution."""
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        job = queue.enqueue(say_hello)
        self.assertTrue(job.is_queued)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())
        self.assertTrue(job.is_started)

        worker.perform_job(job, queue)
        self.assertNotIn(job.id, registry.get_job_ids())
        self.assertTrue(job.is_finished)

        # Job that fails
        job = queue.enqueue(div_by_zero)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        worker.perform_job(job, queue)
        self.assertNotIn(job.id, registry.get_job_ids())
コード例 #7
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_default_failure_ttl(self):
        """Job TTL defaults to DEFAULT_FAILURE_TTL"""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(say_hello)

        registry = FailedJobRegistry(connection=self.testconn)
        key = registry.key

        timestamp = current_timestamp()
        registry.add(job)
        self.assertLess(
            self.testconn.zscore(key, job.id),
            timestamp + DEFAULT_FAILURE_TTL + 2
        )
        self.assertGreater(
            self.testconn.zscore(key, job.id),
            timestamp + DEFAULT_FAILURE_TTL - 2
        )

        timestamp = current_timestamp()
        ttl = 5
        registry.add(job, ttl=5)
        self.assertLess(
            self.testconn.zscore(key, job.id),
            timestamp + ttl + 2
        )
        self.assertGreater(
            self.testconn.zscore(key, job.id),
            timestamp + ttl - 2
        )
コード例 #8
0
ファイル: test_job.py プロジェクト: SkyLothar/rq
    def test_create_job_with_id(self):
        """test creating jobs with a custom ID"""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello, job_id="1234")
        self.assertEqual(job.id, "1234")
        job.perform()

        self.assertRaises(TypeError, queue.enqueue, fixtures.say_hello, job_id=1234)
コード例 #9
0
ファイル: test_job.py プロジェクト: nvie/rq
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     job = q.enqueue(fixtures.access_self)
     w = Worker([q])
     w.work(burst=True)
     # access_self calls get_current_job() and executes successfully
     self.assertEqual(job.get_status(), JobStatus.FINISHED)
コード例 #10
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_invalid_job(self):
        """Requeuing a job that's not in FailedJobRegistry raises an error."""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(say_hello)

        registry = FailedJobRegistry(connection=self.testconn)
        with self.assertRaises(InvalidJobOperation):
            registry.requeue(job)
コード例 #11
0
 def queue(cls, name):
     cls.connect()
     q = Queue(name)
     if name == 'success':
         jobs = cls.successful_jobs()
         return {'name': name, 'count': len(jobs), 'jobs': [j.get('id') for j in jobs]}
     else:
         q.compact()
     return {'name': q.name, 'count': q.count, 'jobs': q.job_ids}
コード例 #12
0
ファイル: worker.py プロジェクト: aelaguiz/pysplash
    def work(self, burst=False):  # noqa
        """Starts the work loop.

        Pops and performs all jobs on the current list of queues.  When all
        queues are empty, block and wait for new jobs to arrive on any of the
        queues, unless `burst` mode is enabled.

        The return value indicates whether any jobs were processed.
        """
        self._install_signal_handlers()

        did_perform_work = False
        self.register_birth()
        self.log.info("RQ worker started, version %s" % rq.version.VERSION)
        self.state = "starting"
        try:
            while True:
                if self.stopped:
                    self.log.info("Stopping on request.")
                    break
                self.state = "idle"
                qnames = self.queue_names()
                self.procline("Listening on %s" % ",".join(qnames))
                self.log.info("")
                self.log.info("*** Listening on %s..." % green(", ".join(qnames)))
                wait_for_job = not burst
                try:
                    result = Queue.dequeue_any(self.queues, False, connection=self.connection)
                    while not result and wait_for_job and self.status_callback():
                        time.sleep(0.1)
                        result = Queue.dequeue_any(self.queues, False, connection=self.connection)
                    if result is None:
                        break
                except StopRequested:
                    break
                except UnpickleError as e:
                    msg = "*** Ignoring unpickleable data on %s." % green(e.queue.name)
                    self.log.warning(msg)
                    self.log.debug("Data follows:")
                    self.log.debug(e.raw_data)
                    self.log.debug("End of unreadable data.")
                    self.failed_queue.push_job_id(e.job_id)
                    continue

                self.state = "busy"

                job, queue = result
                self.log.info("%s: %s (%s)" % (green(queue.name), blue(job.description), job.id))

                self.fork_and_perform_job(job)

                did_perform_work = True
        finally:
            if not self.is_horse:
                self.register_death()
        return did_perform_work
コード例 #13
0
ファイル: test_job.py プロジェクト: SkyLothar/rq
 def test_never_expire_during_execution(self):
     """Test what happens when job expires during execution"""
     ttl = 1
     queue = Queue(connection=self.testconn)
     job = queue.enqueue(fixtures.long_running_job, args=(2,), ttl=ttl)
     self.assertEqual(job.get_ttl(), ttl)
     job.save()
     job.perform()
     self.assertEqual(job.get_ttl(), -1)
     self.assertTrue(job.exists(job.id))
     self.assertEqual(job.result, 'Done sleeping...')
コード例 #14
0
ファイル: test_job.py プロジェクト: SkyLothar/rq
    def test_delete(self):
        """job.delete() deletes itself & dependents mapping from Redis."""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = Job.create(func=fixtures.say_hello, depends_on=job)
        job2.register_dependency()
        job.delete()
        self.assertFalse(self.testconn.exists(job.key))
        self.assertFalse(self.testconn.exists(job.dependents_key))

        self.assertNotIn(job.id, queue.get_job_ids())
コード例 #15
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_contains(self):
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(say_hello)

        self.assertFalse(job in registry)
        self.assertFalse(job.id in registry)

        registry.add(job, 5)

        self.assertTrue(job in registry)
        self.assertTrue(job.id in registry)
コード例 #16
0
ファイル: test_registry.py プロジェクト: qq18436558/rq
    def test_register_dependency(self):
        """Ensure job creation and deletion works properly with DeferredJobRegistry."""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(say_hello)
        job2 = queue.enqueue(say_hello, depends_on=job)

        registry = DeferredJobRegistry(connection=self.testconn)
        self.assertEqual(registry.get_job_ids(), [job2.id])

        # When deleted, job removes itself from DeferredJobRegistry
        job2.delete()
        self.assertEqual(registry.get_job_ids(), [])
コード例 #17
0
ファイル: cli.py プロジェクト: joshuaroot/cloud-custodian
def failures():
    """Show any unexpected failures"""
    if not HAVE_BIN_LIBS:
        click.echo("missing required binary libs (lz4, msgpack)")
        return

    q = Queue('failed', connection=worker.connection)
    for i in q.get_job_ids():
        j = q.job_class.fetch(i, connection=q.connection)
        click.echo("%s on %s" % (j.func_name, j.origin))
        if not j.func_name.endswith('process_keyset'):
            click.echo("params %s %s" % (j._args, j._kwargs))
        click.echo(j.exc_info)
コード例 #18
0
def wait_until_queue_empty(name, port):
    '''
        Wait until the queue is empty.

        >>> from goodcrypto.oce.gpg_queue_settings import GPG_RQ, GPG_REDIS_PORT
        >>> wait_until_queue_empty(GPG_RQ, GPG_REDIS_PORT)
    '''

    redis_connection = Redis(REDIS_HOST, port)
    queue = Queue(name=name, connection=redis_connection)
    while not queue.is_empty():
        # sleep a random amount of time to minimize deadlock
        secs = uniform(1, 20)
        sleep(secs)
コード例 #19
0
ファイル: test_registry.py プロジェクト: qq18436558/rq
    def test_job_deletion(self):
        """Ensure job is removed from StartedJobRegistry when deleted."""
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        job = queue.enqueue(say_hello)
        self.assertTrue(job.is_queued)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        job.delete()
        self.assertNotIn(job.id, registry.get_job_ids())
コード例 #20
0
            def delay(*args, **kwargs):
                if isinstance(self.queue, string_types):
                    queue = Queue(name=self.queue, connection=self.connection)
                else:
                    queue = self.queue
                depends_on = kwargs.pop('depends_on', None)
                function_name = '{}.{}'.format(f.__module__, f.__name__)
                encrypted_args = [secure_serializer.dumps(function_name)]
                encrypted_args += [secure_serializer.dumps(args)]

                encrypted_args += [secure_serializer.dumps(kwargs)]
                return queue.enqueue_call(secure_job_proxy, args=encrypted_args, kwargs={},
                                          timeout=self.timeout, result_ttl=self.result_ttl,
                                          ttl=self.ttl, depends_on=depends_on)
コード例 #21
0
 def jobs(cls, queuename=None):
     cls.connect()
     if queuename:
         queue = Queue(queuename)
         if queuename != 'success':
             queue.compact()
             return [serialize_job(Job(id)) for id in queue.job_ids]
         else:
             return cls.successful_jobs()
     else:
         j = {}
         for queue in cls.queues():
             n = queue.get('name')
             j[n] = cls.jobs(n)
         return j
コード例 #22
0
ファイル: test_registry.py プロジェクト: AaronWan/rq
    def test_jobs_are_put_in_registry(self):
        """Completed jobs are added to FinishedJobRegistry."""
        self.assertEqual(self.registry.get_job_ids(), [])
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        # Completed jobs are put in FinishedJobRegistry
        job = queue.enqueue(say_hello)
        worker.perform_job(job)
        self.assertEqual(self.registry.get_job_ids(), [job.id])

        # Failed jobs are not put in FinishedJobRegistry
        failed_job = queue.enqueue(div_by_zero)
        worker.perform_job(failed_job)
        self.assertEqual(self.registry.get_job_ids(), [job.id])
コード例 #23
0
def queue_keyserver_retrieval(fingerprint, encryption_name, user_initiated_search, callback=None):
    '''
        Start the process of retrieving a key from the keyservers.

        # Test extreme cases
        >>> queue_keyserver_retrieval('99C4 402C AE6F 09DB 604D  4A8A 8559 78CF 296D E1CD', 'GPG', None)
        False
        >>> queue_keyserver_retrieval(None, 'GPG', '*****@*****.**')
        False
    '''

    try:
        if fingerprint is None:
            result_ok = False
            log_message("cannot retrieve key from keyservers without a fingerprint")
        elif encryption_name is None:
            result_ok = False
            log_message("require an encryption_name")
        elif user_initiated_search is None:
            result_ok = False
            log_message("require an email where we can send notification if successful")
        else:
            from goodcrypto.mail.keyservers import get_key_from_keyservers

            crypto_jobs = get_job_count(CRYPTO_RQ, CRYPTO_REDIS_PORT)
            redis_connection = Redis(REDIS_HOST, CRYPTO_REDIS_PORT)
            queue = Queue(name=CRYPTO_RQ, connection=redis_connection)
            secs_to_wait = DEFAULT_TIMEOUT * (queue.count + crypto_jobs + 1)
            job = queue.enqueue_call(get_key_from_keyservers,
                                     args=[
                                       pickle.dumps(fingerprint),
                                       pickle.dumps(encryption_name),
                                       pickle.dumps(user_initiated_search),
                                       callback],
                                     timeout=secs_to_wait)

            result_ok = get_job_results(queue, job, secs_to_wait, fingerprint)
            if job.is_failed:
                result_ok = False
                log_message('job failed for {}'.format(fingerprint))
            else:
                log_message('queued searching keyservers for a key for {}'.format(fingerprint))
    except Exception as exception:
        result_ok = False
        record_exception()
        log_message('EXCEPTION - see syr.exception.log for details')

    return result_ok
コード例 #24
0
ファイル: arbiter.py プロジェクト: matrixise/rq-arbiter
 def dequeue_job_and_maintain_ttl(self, timeout):
     while True:
         try:
             return Queue.dequeue_any(self.queues, timeout,
                     connection=self.connection)
         except DequeueTimeout:
             return None
コード例 #25
0
def queue_keyserver_search(email, user_initiated_search, interactive=False):
    '''
        Start the process of searching and retrieving a key from the keyservers.

        # Test extreme cases
        # In honor of Syrian teenager who refused to be a suicide bomber.
        >>> queue_keyserver_search('*****@*****.**', None)
        False
        >>> queue_keyserver_search('*****@*****.**', None, interactive=True)
        False
        >>> queue_keyserver_search(None, '*****@*****.**')
        False
    '''

    try:
        if email is None:
            result_ok = False
            log_message("cannot search keyservers without an email address")
        elif user_initiated_search is None:
            result_ok = False
            log_message("require an email where we can send notification if successful")
        else:
            from goodcrypto.mail.keyservers import search_keyservers

            crypto_jobs = get_job_count(CRYPTO_RQ, CRYPTO_REDIS_PORT)
            redis_connection = Redis(REDIS_HOST, CRYPTO_REDIS_PORT)
            queue = Queue(name=CRYPTO_RQ, connection=redis_connection)
            secs_to_wait = DEFAULT_TIMEOUT * (queue.count + crypto_jobs + 1)
            job = queue.enqueue_call(search_keyservers,
                                     args=[
                                       pickle.dumps(email),
                                       pickle.dumps(user_initiated_search),
                                       pickle.dumps(interactive)],
                                     timeout=secs_to_wait)

            result_ok = get_job_results(queue, job, secs_to_wait, email)
            if job.is_failed:
                result_ok = False
                log_message('job failed for {}'.format(email))
            else:
                log_message('queued searching keyservers for a key for {}'.format(email))
    except Exception as exception:
        result_ok = False
        record_exception()
        log_message('EXCEPTION - see syr.exception.log for details')

    return result_ok
コード例 #26
0
def get_job_count(name, port):
    '''
        Get the count of jobs in the queue.

        >>> from goodcrypto.oce.gpg_queue_settings import GPG_RQ, GPG_REDIS_PORT
        >>> wait_until_queue_empty(GPG_RQ, GPG_REDIS_PORT)
        >>> get_job_count(GPG_RQ, GPG_REDIS_PORT)
        0
    '''

    redis_connection = Redis(REDIS_HOST, port)
    queue = Queue(name=name, connection=redis_connection)
    job_ids = list(queue.get_job_ids())
    if len(job_ids) > 0:
        for job_id in job_ids:
            log.write_and_flush('job id: {}'.format(job_id))
    return queue.count
コード例 #27
0
ファイル: scheduler.py プロジェクト: peergradeio/rq-scheduler
 def get_queue_for_job(self, job):
     """
     Returns a queue to put job into.
     """
     if self._queue is not None:
         return self._queue
     key = '{0}{1}'.format(Queue.redis_queue_namespace_prefix, job.origin)
     return Queue.from_queue_key(key, connection=self.connection)
コード例 #28
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_cleanup_moves_jobs_to_failed_job_registry(self):
        """Moving expired jobs to FailedJobRegistry."""
        queue = Queue(connection=self.testconn)
        failed_job_registry = FailedJobRegistry(connection=self.testconn)
        job = queue.enqueue(say_hello)

        self.testconn.zadd(self.registry.key, {job.id: 2})

        # Job has not been moved to FailedJobRegistry
        self.registry.cleanup(1)
        self.assertNotIn(job, failed_job_registry)
        self.assertIn(job, self.registry)

        self.registry.cleanup()
        self.assertIn(job.id, failed_job_registry)
        self.assertNotIn(job, self.registry)
        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.FAILED)
コード例 #29
0
ファイル: compat.py プロジェクト: ducu/rq-dashboard
    def __init__(self, default_timeout=None, connection=None):
        super(FailedQueue, self).__init__(
            name=NotImplemented,
            default_timeout=default_timeout,
            connection=connection,
        )

        self._registries = (q.failed_job_registry for q in Queue.all())
        self._job_ids = None
コード例 #30
0
    def queues(cls):
        cls.connect()

        def compact_queue(q):
            q.compact()
            return q
        queues = serialize_queues(sorted(map(compact_queue, Queue.all())))
        queues.append(cls.queue('success'))
        return queues
コード例 #31
0
ファイル: test_job.py プロジェクト: pboribal/rq
    def test_dependencies_finished_returns_false_if_dependencies_queued(self):
        queue = Queue(connection=self.testconn)

        dependency_job_ids = [
            queue.enqueue(fixtures.say_hello).id for _ in range(5)
        ]

        dependent_job = Job.create(func=fixtures.say_hello)
        dependent_job._dependency_ids = dependency_job_ids
        dependent_job.register_dependency()

        dependencies_finished = dependent_job.dependencies_are_met()

        self.assertFalse(dependencies_finished)
コード例 #32
0
ファイル: test_registry.py プロジェクト: soasme/rq
    def test_clean_registries(self):
        """clean_registries() cleans Started and Finished job registries."""

        queue = Queue(connection=self.testconn)

        finished_job_registry = FinishedJobRegistry(connection=self.testconn)
        self.testconn.zadd(finished_job_registry.key, 1, 'foo')

        started_job_registry = StartedJobRegistry(connection=self.testconn)
        self.testconn.zadd(started_job_registry.key, 1, 'foo')

        clean_registries(queue)
        self.assertEqual(self.testconn.zcard(finished_job_registry.key), 0)
        self.assertEqual(self.testconn.zcard(started_job_registry.key), 0)
コード例 #33
0
ファイル: worker.py プロジェクト: wsoula/cloud-custodian
def bulk_invoke(func, args, nargs):
    """Bulk invoke a function via queues

    Uses internal implementation details of rq.
    """
    ctx = func.delay.func_closure[-1].cell_contents
    q = Queue(ctx.queue, connection=connection)
    argv = list(args)
    argv.append(None)
    job = Job.create(
        func, args=argv, connection=connection,
        description="bucket-%s" % func.func_name,
        origin=q.name, status=JobStatus.QUEUED, timeout=None,
        result_ttl=500, ttl=ctx.ttl)

    for n in chunks(nargs, 100):
        job.created_at = datetime.utcnow()
        with connection.pipeline() as pipe:
            for s in n:
                argv[-1] = s
                job._id = unicode(uuid4())
                job.args = argv
                q.enqueue_job(job, pipeline=pipe)
コード例 #34
0
ファイル: test_job.py プロジェクト: pboribal/rq
    def test_create_job_with_async(self):
        """test creating jobs with async function"""
        queue = Queue(connection=self.testconn)

        async_job = queue.enqueue(fixtures.say_hello_async, job_id="async_job")
        sync_job = queue.enqueue(fixtures.say_hello, job_id="sync_job")

        self.assertEqual(async_job.id, "async_job")
        self.assertEqual(sync_job.id, "sync_job")

        async_task_result = async_job.perform()
        sync_task_result = sync_job.perform()

        self.assertEqual(sync_task_result, async_task_result)
コード例 #35
0
ファイル: test_job.py プロジェクト: msukmanowsky/rq
    def test_job_with_dependents_deleteall(self):
        """job.delete() deletes itself from Redis. Dependents need to be
        deleted explictely."""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = Job.create(func=fixtures.say_hello, depends_on=job)
        job2.register_dependency()

        job.delete(delete_dependents=True)
        self.assertFalse(self.testconn.exists(job.key))
        self.assertFalse(self.testconn.exists(job.dependents_key))
        self.assertFalse(self.testconn.exists(job2.key))

        self.assertNotIn(job.id, queue.get_job_ids())
コード例 #36
0
ファイル: test_registry.py プロジェクト: zachgoulet/rq
    def test_requeue(self):
        """FailedJobRegistry.requeue works properly"""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(div_by_zero, failure_ttl=5)

        worker = Worker([queue])
        worker.work(burst=True)

        registry = FailedJobRegistry(connection=worker.connection)
        self.assertTrue(job in registry)

        registry.requeue(job.id)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
        self.assertEqual(job.started_at, None)
        self.assertEqual(job.ended_at, None)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # Should also work with job instance
        registry.requeue(job)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # requeue_job should work the same way
        requeue_job(job.id, connection=self.testconn)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # And so does job.requeue()
        job.requeue()
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
コード例 #37
0
    def test_queues_delete_multiple(self):
        some_queues = ['q1', 'q2', 'q3', 'q4']
        some_queues_instances = []
        for queue in some_queues:
            some_queues_instances.append(Queue(name=queue))

        for queue in some_queues_instances:
            job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2))
            queue.enqueue_job(job)

        response = self.client.post('/queues/empty/all')

        self.assertEqual(response.status_code, HTTP_OK)
        for queue in some_queues_instances:
            self.assertEqual(queue.is_empty(), True)
コード例 #38
0
    def test_fetch_dependencies_raises_if_dependency_deleted(self):
        queue = Queue(connection=self.testconn)
        dependency_job = queue.enqueue(fixtures.say_hello)
        dependent_job = Job.create(func=fixtures.say_hello,
                                   depends_on=dependency_job)

        dependent_job.register_dependency()
        dependent_job.save()

        dependency_job.delete()

        self.assertNotIn(dependent_job.id, [
            job.id
            for job in dependent_job.fetch_dependencies(pipeline=self.testconn)
        ])
コード例 #39
0
    def test_dependent_job_deletes_dependencies_key(self):
        """
        job.delete() deletes itself from Redis.
        """
        queue = Queue(connection=self.testconn)
        dependency_job = queue.enqueue(fixtures.say_hello)
        dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job)

        dependent_job.register_dependency()
        dependent_job.save()
        dependent_job.delete()

        self.assertTrue(self.testconn.exists(dependency_job.key))
        self.assertFalse(self.testconn.exists(dependent_job.dependencies_key))
        self.assertFalse(self.testconn.exists(dependent_job.key))
コード例 #40
0
ファイル: test_job.py プロジェクト: luisbc92/rq
    def test_execution_order_with_dual_dependency(self):
        queue = Queue(connection=self.testconn)
        key = 'test_job:job_order'

        # When there are no dependencies, the two fast jobs ("A" and "B") run in the order enqueued.
        job_slow_1 = queue.enqueue(fixtures.rpush,
                                   args=[key, "slow_1", True, 0.5],
                                   job_id='slow_1')
        job_slow_2 = queue.enqueue(fixtures.rpush,
                                   args=[key, "slow_2", True, 0.75],
                                   job_id='slow_2')
        job_A = queue.enqueue(fixtures.rpush, args=[key, "A", True])
        job_B = queue.enqueue(fixtures.rpush, args=[key, "B", True])
        fixtures.burst_two_workers(queue)
        time.sleep(1)
        jobs_completed = [v.decode() for v in self.testconn.lrange(key, 0, 3)]
        self.assertEqual(queue.count, 0)
        self.assertTrue(
            all(job.is_finished
                for job in [job_slow_1, job_slow_2, job_A, job_B]))
        self.assertEqual(jobs_completed,
                         ["slow_1:w1", "A:w1", "B:w1", "slow_2:w2"])
        self.testconn.delete(key)

        # This time job "A" depends on two slow jobs, while job "B" depends only on the faster of
        # the two. Job "B" should be completed before job "A".
        # There is no clear requirement on which worker should take job "A", so we stay silent on that.
        job_slow_1 = queue.enqueue(fixtures.rpush,
                                   args=[key, "slow_1", True, 0.5],
                                   job_id='slow_1')
        job_slow_2 = queue.enqueue(fixtures.rpush,
                                   args=[key, "slow_2", True, 0.75],
                                   job_id='slow_2')
        job_A = queue.enqueue(fixtures.rpush,
                              args=[key, "A", False],
                              depends_on=['slow_1', 'slow_2'])
        job_B = queue.enqueue(fixtures.rpush,
                              args=[key, "B", True],
                              depends_on=['slow_1'])
        fixtures.burst_two_workers(queue)
        time.sleep(1)
        jobs_completed = [v.decode() for v in self.testconn.lrange(key, 0, 3)]
        self.assertEqual(queue.count, 0)
        self.assertTrue(
            all(job.is_finished
                for job in [job_slow_1, job_slow_2, job_A, job_B]))
        self.assertEqual(jobs_completed,
                         ["slow_1:w1", "B:w1", "slow_2:w2", "A"])
コード例 #41
0
ファイル: utils.py プロジェクト: trodery/rqmonitor
def empty_queue(queue_id):
    """
    :param queue_id: Queue ID/name to delete
    :return: None

    As no specific exception is raised for below method
    we are using general Exception class for now
    """
    def attach_rq_queue_prefix(queue_id):
        return Queue.redis_queue_namespace_prefix + queue_id

    try:
        queue_instance = Queue.from_queue_key(attach_rq_queue_prefix(queue_id))
        queue_instance.empty()
    except Exception as e:
        raise ActionFailed
コード例 #42
0
    def test_job_with_dependents_delete_all_with_saved(self):
        """job.delete() deletes itself from Redis. Dependents need to be
        deleted explictely. Without a save, the dependent job is never saved
        into redis. The delete method will get and pass a NoSuchJobError.
        """
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = Job.create(func=fixtures.say_hello, depends_on=job)
        job2.register_dependency()
        job2.save()

        job.delete(delete_dependents=True)
        self.assertFalse(self.testconn.exists(job.key))
        self.assertFalse(self.testconn.exists(job.dependents_key))
        self.assertFalse(self.testconn.exists(job2.key))

        self.assertNotIn(job.id, queue.get_job_ids())
コード例 #43
0
    def test_fetch_dependencies_watches(self):
        queue = Queue(connection=self.testconn)
        dependency_job = queue.enqueue(fixtures.say_hello)
        dependent_job = Job.create(func=fixtures.say_hello,
                                   depends_on=dependency_job)

        dependent_job.register_dependency()
        dependent_job.save()

        with self.testconn.pipeline() as pipeline:
            dependent_job.fetch_dependencies(watch=True, pipeline=pipeline)

            pipeline.multi()

            with self.assertRaises(WatchError):
                self.testconn.set(dependency_job.id, 'somethingelsehappened')
                pipeline.touch(dependency_job.id)
                pipeline.execute()
コード例 #44
0
    def test_dependencies_finished_watches_job(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello)

        dependent_job = Job.create(func=fixtures.say_hello)
        dependent_job._dependency_ids = [dependency_job.id]
        dependent_job.register_dependency()

        with self.testconn.pipeline() as pipeline:
            dependent_job.dependencies_are_met(pipeline=pipeline, )

            dependency_job.set_status(JobStatus.FAILED, pipeline=self.testconn)
            pipeline.multi()

            with self.assertRaises(WatchError):
                pipeline.touch(Job.key_for(dependent_job.id))
                pipeline.execute()
コード例 #45
0
ファイル: cli.py プロジェクト: testshastra/cloud-custodian
def queues():
    """Reeport on progress by queues."""
    conn = worker.connection
    failure_q = None

    def _repr(q):
        return "running:%d pending:%d finished:%d" % (StartedJobRegistry(
            q.name, conn).count, q.count, FinishedJobRegistry(q.name,
                                                              conn).count)

    for q in Queue.all(conn):
        if q.name == 'failed':
            failure_q = q
            continue
        click.echo("%s %s" % (q.name, _repr(q)))
    if failure_q:
        click.echo(
            click.style(failure_q.name, fg='red') + ' %s' % _repr(failure_q))
コード例 #46
0
ファイル: test_job.py プロジェクト: luisbc92/rq
    def test_create_and_cancel_job_fails_already_canceled(self):
        """Ensure job.cancel() fails on already canceld job"""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello, job_id='fake_job_id')
        self.assertEqual(1, len(queue.get_jobs()))

        # First cancel should be fine
        cancel_job(job.id)
        self.assertEqual(0, len(queue.get_jobs()))
        registry = CanceledJobRegistry(connection=self.testconn, queue=queue)
        self.assertIn(job, registry)
        self.assertEqual(job.get_status(), JobStatus.CANCELED)

        # Second cancel should fail
        self.assertRaisesRegex(
            InvalidJobOperation,
            r'Cannot cancel already canceled job: fake_job_id', cancel_job,
            job.id)
コード例 #47
0
    def test_job_with_dependents_delete_parent_with_saved(self):
        """job.delete() deletes itself from Redis but not dependents. If the
        dependent job was saved, it will remain in redis."""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = Job.create(func=fixtures.say_hello, depends_on=job)
        job2.register_dependency()
        job2.save()

        job.delete()
        self.assertFalse(self.testconn.exists(job.key))
        self.assertFalse(self.testconn.exists(job.dependents_key))

        # By default, dependents are not deleted, but The job is in redis only
        # if it was saved!
        self.assertTrue(self.testconn.exists(job2.key))

        self.assertNotIn(job.id, queue.get_job_ids())
コード例 #48
0
    def test_job_with_dependents_delete_parent(self):
        """job.delete() deletes itself from Redis but not dependents.
        Wthout a save, the dependent job is never saved into redis. The delete
        method will get and pass a NoSuchJobError.
        """
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = Job.create(func=fixtures.say_hello, depends_on=job)
        job2.register_dependency()

        job.delete()
        self.assertFalse(self.testconn.exists(job.key))
        self.assertFalse(self.testconn.exists(job.dependents_key))

        # By default, dependents are not deleted, but The job is in redis only
        # if it was saved!
        self.assertFalse(self.testconn.exists(job2.key))

        self.assertNotIn(job.id, queue.get_job_ids())
コード例 #49
0
ファイル: test_job.py プロジェクト: luisbc92/rq
    def test_create_and_cancel_job_enqueue_dependents(self):
        """Ensure job.cancel() works properly with enqueue_dependents=True"""
        queue = Queue(connection=self.testconn)
        dependency = queue.enqueue(fixtures.say_hello)
        dependent = queue.enqueue(fixtures.say_hello, depends_on=dependency)

        self.assertEqual(1, len(queue.get_jobs()))
        self.assertEqual(1, len(queue.deferred_job_registry))
        cancel_job(dependency.id, enqueue_dependents=True)
        self.assertEqual(1, len(queue.get_jobs()))
        self.assertEqual(0, len(queue.deferred_job_registry))
        registry = CanceledJobRegistry(connection=self.testconn, queue=queue)
        self.assertIn(dependency, registry)
        self.assertEqual(dependency.get_status(), JobStatus.CANCELED)
        self.assertIn(dependent, queue.get_jobs())
        self.assertEqual(dependent.get_status(), JobStatus.QUEUED)
        # If job is deleted, it's also removed from CanceledJobRegistry
        dependency.delete()
        self.assertNotIn(dependency, registry)
コード例 #50
0
    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
コード例 #51
0
ファイル: utils.py プロジェクト: trodery/rqmonitor
def empty_registry(registry_name, queue_name, connection=None):
    """Empties a specific registry for a specific queue, Not in RQ, implemented
            here for performance reasons
    """
    redis_connection = resolve_connection(connection)

    queue_instance = Queue.from_queue_key(Queue.redis_queue_namespace_prefix +
                                          queue_name)

    registry_instance = None
    if registry_name == 'failed':
        registry_instance = queue_instance.failed_job_registry
    elif registry_name == 'started':
        registry_instance = queue_instance.started_job_registry
    elif registry_name == 'scheduled':
        registry_instance = queue_instance.scheduled_job_registry
    elif registry_name == 'deferred':
        registry_instance = queue_instance.deferred_job_registry
    elif registry_name == 'finished':
        registry_instance = queue_instance.finished_job_registry

    script = """
        local prefix = "{0}"
        local q = KEYS[1]
        local count = 0
        while true do
            local job_id, score = unpack(redis.call("zpopmin", q))
            if job_id == nil or score == nil then
                break
            end

            -- Delete the relevant keys
            redis.call("del", prefix..job_id)
            redis.call("del", prefix..job_id..":dependents")
            count = count + 1
        end
        return count
    """.format(
        registry_instance.job_class.redis_job_namespace_prefix).encode("utf-8")
    script = redis_connection.register_script(script)
    return script(keys=[registry_instance.key])
コード例 #52
0
    def test_clean_registries_with_serializer(self):
        """clean_registries() cleans Started and Finished job registries (with serializer)."""

        queue = Queue(connection=self.testconn, serializer=JSONSerializer)

        finished_job_registry = FinishedJobRegistry(connection=self.testconn,
                                                    serializer=JSONSerializer)
        self.testconn.zadd(finished_job_registry.key, {'foo': 1})

        started_job_registry = StartedJobRegistry(connection=self.testconn,
                                                  serializer=JSONSerializer)
        self.testconn.zadd(started_job_registry.key, {'foo': 1})

        failed_job_registry = FailedJobRegistry(connection=self.testconn,
                                                serializer=JSONSerializer)
        self.testconn.zadd(failed_job_registry.key, {'foo': 1})

        clean_registries(queue)
        self.assertEqual(self.testconn.zcard(finished_job_registry.key), 0)
        self.assertEqual(self.testconn.zcard(started_job_registry.key), 0)
        self.assertEqual(self.testconn.zcard(failed_job_registry.key), 0)
コード例 #53
0
def test_pull_stat_res_ok():
    from qeez_stats.utils import save_packets_to_stat
    stat_id = CFG['STAT_CALC_FN']
    qeez_token = get_token()
    res_dc = {
        b'7:6:5:4:3:2:1': b'2,3,1:6.5:4',
        b'1:2:3:4:5:6:7': b'1:2:3',
    }
    save_packets_to_stat(qeez_token, res_dc, redis_conn=None)

    job = queues.enqueue_stat_calc(stat_id, qeez_token, redis_conn=None)
    assert isinstance(job, Job)
    assert job.id

    queue = Queue(name=job.origin, connection=job.connection)
    worker = SimpleWorker([queue], connection=queue.connection)
    worker.work(burst=True)

    res = queues.pull_stat_res(stat_id, qeez_token, redis_conn=None)
    assert isinstance(res, float)
    assert res == 123.1
コード例 #54
0
ファイル: test_job.py プロジェクト: luisbc92/rq
    def test_execution_order_with_sole_dependency(self):
        queue = Queue(connection=self.testconn)
        key = 'test_job:job_order'

        # When there are no dependencies, the two fast jobs ("A" and "B") run in the order enqueued.
        # Worker 1 will be busy with the slow job, so worker 2 will complete both fast jobs.
        job_slow = queue.enqueue(fixtures.rpush,
                                 args=[key, "slow", True, 0.5],
                                 job_id='slow_job')
        job_A = queue.enqueue(fixtures.rpush, args=[key, "A", True])
        job_B = queue.enqueue(fixtures.rpush, args=[key, "B", True])
        fixtures.burst_two_workers(queue)
        time.sleep(0.75)
        jobs_completed = [v.decode() for v in self.testconn.lrange(key, 0, 2)]
        self.assertEqual(queue.count, 0)
        self.assertTrue(
            all(job.is_finished for job in [job_slow, job_A, job_B]))
        self.assertEqual(jobs_completed, ["A:w2", "B:w2", "slow:w1"])
        self.testconn.delete(key)

        # When job "A" depends on the slow job, then job "B" finishes before "A".
        # There is no clear requirement on which worker should take job "A", so we stay silent on that.
        job_slow = queue.enqueue(fixtures.rpush,
                                 args=[key, "slow", True, 0.5],
                                 job_id='slow_job')
        job_A = queue.enqueue(fixtures.rpush,
                              args=[key, "A", False],
                              depends_on='slow_job')
        job_B = queue.enqueue(fixtures.rpush, args=[key, "B", True])
        fixtures.burst_two_workers(queue)
        time.sleep(0.75)
        jobs_completed = [v.decode() for v in self.testconn.lrange(key, 0, 2)]
        self.assertEqual(queue.count, 0)
        self.assertTrue(
            all(job.is_finished for job in [job_slow, job_A, job_B]))
        self.assertEqual(jobs_completed, ["B:w2", "slow:w1", "A"])
コード例 #55
0
def clear_failed_queue(name, port):
    '''
        Clear all the jobs in the failed queue.

        >>> from goodcrypto.oce.gpg_queue_settings import GPG_RQ, GPG_REDIS_PORT
        >>> clear_failed_queue(GPG_RQ, GPG_REDIS_PORT)
    '''

    redis_connection = Redis(REDIS_HOST, port)
    queue = Queue(name='failed', connection=redis_connection)
    job_ids = list(queue.get_job_ids())
    if len(job_ids) > 0:
        log.write('clearing {} failed jobs'.format(name))
        for job_id in job_ids:
            job = queue.fetch_job(job_id)
            if job is not None:
                job_dump = job.to_dict()
                log.write_and_flush('   {}\n\n'.format(job_dump))
            queue.remove(job_id)
コード例 #56
0
ファイル: cli.py プロジェクト: testshastra/cloud-custodian
def failures():
    """Show any unexpected failures"""
    q = Queue('failed', connection=worker.connection)
    for i in q.get_jobs():
        click.echo("%s on %s" % (i.func_name, i.origin))
        click.echo(i.exc_info)
コード例 #57
0
ファイル: utils.py プロジェクト: trodery/rqmonitor
def list_all_queues():
    """
    :return: Iterable for all available queue instances
    """
    return Queue.all()
コード例 #58
0
 def test_ttl_via_enqueue(self):
     ttl = 1
     queue = Queue(connection=self.testconn)
     job = queue.enqueue(fixtures.say_hello, ttl=ttl)
     self.assertEqual(job.get_ttl(), ttl)
コード例 #59
0
 def test_create_job_with_ttl_should_expire(self):
     """test if a job created with ttl expires [issue502]"""
     queue = Queue(connection=self.testconn)
     queue.enqueue(fixtures.say_hello, job_id="1234", ttl=1)
     time.sleep(1.1)
     self.assertEqual(0, len(queue.get_jobs()))
コード例 #60
0
 def test_create_job_with_ttl_should_have_ttl_after_enqueued(self):
     """test creating jobs with ttl and checks if get_jobs returns it properly [issue502]"""
     queue = Queue(connection=self.testconn)
     queue.enqueue(fixtures.say_hello, job_id="1234", ttl=10)
     job = queue.get_jobs()[0]
     self.assertEqual(job.ttl, 10)