コード例 #1
0
ファイル: test_job.py プロジェクト: friedcell/rq
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     q.enqueue(fixtures.access_self)  # access_self calls get_current_job() and asserts
     w = Worker([q])
     w.work(burst=True)
     assert get_failed_queue(self.testconn).count == 0
コード例 #2
0
    def test_all_queues(self):
        """All queues"""
        q1 = Queue('first-queue')
        q2 = Queue('second-queue')
        q3 = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEqual(len(Queue.all()), 0)
        q1.enqueue(say_hello)
        self.assertEqual(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        q2.enqueue(say_hello)
        q3.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEqual(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Now empty two queues
        w = Worker([q2, q3])
        w.work(burst=True)

        # Queue.all() should still report the empty queues
        self.assertEqual(len(Queue.all()), 3)
コード例 #3
0
ファイル: test_cli.py プロジェクト: luisbc92/rq
    def test_cli_enqueue_schedule_in(self):
        """rq enqueue -u <url> tests.fixtures.say_hello --schedule-in 1s"""
        queue = Queue(connection=self.connection)
        registry = ScheduledJobRegistry(queue=queue)
        worker = Worker(queue)
        scheduler = RQScheduler(queue, self.connection)

        self.assertTrue(len(queue) == 0)
        self.assertTrue(len(registry) == 0)

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, 'tests.fixtures.say_hello',
            '--schedule-in', '10s'
        ])
        self.assert_normal_execution(result)

        scheduler.acquire_locks()
        scheduler.enqueue_scheduled_jobs()

        self.assertTrue(len(queue) == 0)
        self.assertTrue(len(registry) == 1)

        self.assertFalse(worker.work(True))

        sleep(11)

        scheduler.enqueue_scheduled_jobs()

        self.assertTrue(len(queue) == 1)
        self.assertTrue(len(registry) == 0)

        self.assertTrue(worker.work(True))
コード例 #4
0
ファイル: worker.py プロジェクト: EonYang/rq_example
def main():
    worker = Worker(
        ['high', 'default', 'low'],
        connection=redis_client
    )
    worker.work()
    return
コード例 #5
0
ファイル: test_job.py プロジェクト: luisbc92/rq
    def test_create_and_cancel_job_enqueue_dependents_in_registry(self):
        """Ensure job.cancel() works properly with enqueue_dependents=True and when the job is in a registry"""
        queue = Queue(connection=self.testconn)
        dependency = queue.enqueue(fixtures.raise_exc)
        dependent = queue.enqueue(fixtures.say_hello, depends_on=dependency)

        self.assertEqual(1, len(queue.get_jobs()))
        self.assertEqual(1, len(queue.deferred_job_registry))
        w = Worker([queue])
        w.work(burst=True, max_jobs=1)
        dependency.refresh()
        dependent.refresh()
        self.assertEqual(0, len(queue.get_jobs()))
        self.assertEqual(1, len(queue.deferred_job_registry))
        self.assertEqual(1, len(queue.failed_job_registry))
        cancel_job(dependency.id, enqueue_dependents=True)
        dependency.refresh()
        dependent.refresh()
        self.assertEqual(1, len(queue.get_jobs()))
        self.assertEqual(0, len(queue.deferred_job_registry))
        self.assertEqual(0, len(queue.failed_job_registry))
        self.assertEqual(1, len(queue.canceled_job_registry))
        registry = CanceledJobRegistry(connection=self.testconn, queue=queue)
        self.assertIn(dependency, registry)
        self.assertEqual(dependency.get_status(), JobStatus.CANCELED)
        self.assertNotIn(dependency, queue.failed_job_registry)
        self.assertIn(dependent, queue.get_jobs())
        self.assertEqual(dependent.get_status(), JobStatus.QUEUED)
        # If job is deleted, it's also removed from CanceledJobRegistry
        dependency.delete()
        self.assertNotIn(dependency, registry)
コード例 #6
0
ファイル: test_cli.py プロジェクト: luisbc92/rq
    def test_cli_enqueue_args(self):
        """rq enqueue -u <url> tests.fixtures.echo hello ':[1, {"key": "value"}]' json:=["abc"] nojson=def"""
        queue = Queue(connection=self.connection)
        self.assertTrue(queue.is_empty())

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, 'tests.fixtures.echo', 'hello',
            ':[1, {"key": "value"}]', ':@tests/test.json', '%1, 2',
            'json:=[3.0, true]', 'nojson=abc', 'file=@tests/test.json'
        ])
        self.assert_normal_execution(result)

        job_id = self.connection.lrange('rq:queue:default', 0,
                                        -1)[0].decode('ascii')

        worker = Worker(queue)
        worker.work(True)

        args, kwargs = Job(job_id).result

        self.assertEqual(args, ('hello', [1, {
            'key': 'value'
        }], {
            "test": True
        }, (1, 2)))
        self.assertEqual(
            kwargs, {
                'json': [3.0, True],
                'nojson': 'abc',
                'file': '{\n    "test": true\n}\n'
            })
コード例 #7
0
ファイル: test_cli.py プロジェクト: luisbc92/rq
    def test_cli_enqueue_with_serializer(self):
        """rq enqueue -u <url> -S rq.serializers.JSONSerializer tests.fixtures.say_hello"""
        queue = Queue(connection=self.connection, serializer=JSONSerializer)
        self.assertTrue(queue.is_empty())

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, '-S',
            'rq.serializers.JSONSerializer', 'tests.fixtures.say_hello'
        ])
        self.assert_normal_execution(result)

        prefix = 'Enqueued tests.fixtures.say_hello() with job-id \''
        suffix = '\'.\n'

        self.assertTrue(result.output.startswith(prefix))
        self.assertTrue(result.output.endswith(suffix))

        job_id = result.output[len(prefix):-len(suffix)]
        queue_key = 'rq:queue:default'
        self.assertEqual(self.connection.llen(queue_key), 1)
        self.assertEqual(
            self.connection.lrange(queue_key, 0, -1)[0].decode('ascii'),
            job_id)

        worker = Worker(queue, serializer=JSONSerializer)
        worker.work(True)
        self.assertEqual(
            Job(job_id, serializer=JSONSerializer).result,
            'Hi there, Stranger!')
コード例 #8
0
ファイル: test_queue.py プロジェクト: bradleyy/rq
    def test_all_queues(self):
        """All queues"""
        q1 = Queue('first-queue')
        q2 = Queue('second-queue')
        q3 = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEquals(len(Queue.all()), 0)
        q1.enqueue(say_hello)
        self.assertEquals(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        q2.enqueue(say_hello)
        q3.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEquals(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Now empty two queues
        w = Worker([q2, q3])
        w.work(burst=True)

        # Queue.all() should still report the empty queues
        self.assertEquals(len(Queue.all()), 3)
コード例 #9
0
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     q.enqueue(fixtures.access_self)  # access_self calls get_current_job() and asserts
     w = Worker([q])
     w.work(burst=True)
     assert get_failed_queue(self.testconn).count == 0
コード例 #10
0
ファイル: test_job.py プロジェクト: nvie/rq
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     job = q.enqueue(fixtures.access_self)
     w = Worker([q])
     w.work(burst=True)
     # access_self calls get_current_job() and executes successfully
     self.assertEqual(job.get_status(), JobStatus.FINISHED)
コード例 #11
0
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     job = q.enqueue(fixtures.access_self)
     w = Worker([q])
     w.work(burst=True)
     # access_self calls get_current_job() and executes successfully
     self.assertEqual(job.get_status(), JobStatus.FINISHED)
コード例 #12
0
    def test_can_enqueue_job_if_dependency_is_deleted(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello, result_ttl=0)

        w = Worker([queue])
        w.work(burst=True)

        assert queue.enqueue(fixtures.say_hello, depends_on=dependency_job)
コード例 #13
0
ファイル: test_scheduler.py プロジェクト: zhouzheng0619/rq
    def test_work_burst(self):
        """worker.work() with scheduler enabled works properly"""
        queue = Queue(connection=self.testconn)
        worker = Worker(queues=[queue], connection=self.testconn)
        worker.work(burst=True, with_scheduler=False)
        self.assertIsNone(worker.scheduler)

        worker = Worker(queues=[queue], connection=self.testconn)
        worker.work(burst=True, with_scheduler=True)
        self.assertIsNotNone(worker.scheduler)
コード例 #14
0
ファイル: fixtures.py プロジェクト: luisbc92/rq
def start_worker(queue_name, conn_kwargs, worker_name, burst):
    """
    Start a worker. We accept only serializable args, so that this can be
    executed via multiprocessing.
    """
    # Silence stdout (thanks to <https://stackoverflow.com/a/28321717/14153673>)
    with open(os.devnull, 'w') as devnull:
        with contextlib.redirect_stdout(devnull):
            w = Worker([queue_name], name=worker_name, connection=Redis(**conn_kwargs))
            w.work(burst=burst)
コード例 #15
0
    def test_dependents_are_met_if_dependency_is_deleted(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello, result_ttl=0)
        dependent_job = queue.enqueue(fixtures.say_hello, depends_on=dependency_job)

        w = Worker([queue])
        w.work(burst=True, max_jobs=1)

        assert dependent_job.dependencies_are_met()
        assert dependent_job.get_status() == JobStatus.QUEUED
コード例 #16
0
ファイル: test_job.py プロジェクト: f0cker/rq
    def test_dependencies_are_met_at_execution_time(self):
        queue = Queue(connection=self.testconn)

        queue.enqueue(fixtures.say_hello, job_id="A")
        queue.enqueue(fixtures.say_hello, job_id="B")
        job_C = queue.enqueue(fixtures.check_dependencies_are_met, job_id="C", depends_on=["A", "B"])

        w = Worker([queue])
        w.work(burst=True)

        assert job_C.result
コード例 #17
0
    def test_work(self):
        queue = Queue(connection=self.testconn)
        worker = Worker(queues=[queue], connection=self.testconn)
        p = Process(target=kill_worker, args=(os.getpid(), False, 5))

        p.start()
        queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc), say_hello)
        worker.work(burst=False, with_scheduler=True)
        p.join(1)
        self.assertIsNotNone(worker.scheduler)
        registry = FinishedJobRegistry(queue=queue)
        self.assertEqual(len(registry), 1)
コード例 #18
0
ファイル: pqbenchmark.py プロジェクト: vladignatyev/django-pq
def worker(worker_num, backend):
    import subprocess
    print('Worker %i started' % worker_num)
    if backend == 'pq':
        subprocess.call('django-admin.py pqworker benchmark -b', shell=True)
    elif backend == 'rq':
        from rq.worker import Worker
        from redis import Redis
        from rq import Queue
        q = Queue('benchmark', connection=Redis())
        w = Worker(q, connection=Redis())
        w.work(burst=False)
    print('Worker %i fin' % worker_num)
    return
コード例 #19
0
class RedisWorker:
    def __init__(self):
        load_dotenv(verbose=True)

        listen = ['high', 'default', 'low']
        redis_host = os.getenv("REDIS_HOST", "localhost")
        redis_port = os.getenv("REDIS_PORT", "6379")
        redis_password = os.getenv("REDIS_PASSWORD", "")
        conn = Redis(host=redis_host, port=redis_port, password=redis_password, db=0)
        with Connection(conn):
            self.worker = Worker(map(Queue, listen))

    def work(self):
        self.worker.work()
コード例 #20
0
ファイル: pqbenchmark.py プロジェクト: vladignatyev/django-pq
def worker(worker_num, backend):
    import subprocess

    print("Worker %i started" % worker_num)
    if backend == "pq":
        subprocess.call("django-admin.py pqworker benchmark -b", shell=True)
    elif backend == "rq":
        from rq.worker import Worker
        from redis import Redis
        from rq import Queue

        q = Queue("benchmark", connection=Redis())
        w = Worker(q, connection=Redis())
        w.work(burst=False)
    print("Worker %i fin" % worker_num)
    return
コード例 #21
0
ファイル: pqbenchmark.py プロジェクト: oinopion/django-pq
def worker(worker_num, backend):
    import subprocess

    print('Worker %i started' % worker_num)
    if backend == 'pq':
        subprocess.call(
            'django-admin.py pqworker benchmark -b', shell=True)
    elif backend == 'rq':
        from rq.worker import Worker
        from redis import Redis
        from rq import Queue

        q = Queue('benchmark', connection=Redis())
        w = Worker(q, connection=Redis())
        w.work(burst=False)
    print('Worker %i fin' % worker_num)
    return
コード例 #22
0
    def test_requeue_with_serializer(self):
        """FailedJobRegistry.requeue works properly (with serializer)"""
        queue = Queue(connection=self.testconn, serializer=JSONSerializer)
        job = queue.enqueue(div_by_zero, failure_ttl=5)

        worker = Worker([queue], serializer=JSONSerializer)
        worker.work(burst=True)

        registry = FailedJobRegistry(connection=worker.connection,
                                     serializer=JSONSerializer)
        self.assertTrue(job in registry)

        registry.requeue(job.id)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
        self.assertEqual(job.started_at, None)
        self.assertEqual(job.ended_at, None)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # Should also work with job instance
        registry.requeue(job)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # requeue_job should work the same way
        requeue_job(job.id,
                    connection=self.testconn,
                    serializer=JSONSerializer)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # And so does job.requeue()
        job.requeue()
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
コード例 #23
0
ファイル: fixtures.py プロジェクト: luisbc92/rq
def burst_two_workers(queue, timeout=2, tries=5, pause=0.1):
    """
    Get two workers working simultaneously in burst mode, on a given queue.
    Return after both workers have finished handling jobs, up to a fixed timeout
    on the worker that runs in another process.
    """
    w1 = start_worker_process(queue.name, worker_name='w1', burst=True)
    w2 = Worker(queue, name='w2')
    jobs = queue.jobs
    if jobs:
        first_job = jobs[0]
        # Give the first worker process time to get started on the first job.
        # This is helpful in tests where we want to control which worker takes which job.
        n = 0
        while n < tries and not first_job.is_started:
            time.sleep(pause)
            n += 1
    # Now can start the second worker.
    w2.work(burst=True)
    w1.join(timeout)
コード例 #24
0
ファイル: test_cli.py プロジェクト: luisbc92/rq
    def test_requeue_with_serializer(self):
        """rq requeue -u <url> -S <serializer> --all"""
        connection = Redis.from_url(self.redis_url)
        queue = Queue('requeue',
                      connection=connection,
                      serializer=JSONSerializer)
        registry = queue.failed_job_registry

        runner = CliRunner()

        job = queue.enqueue(div_by_zero)
        job2 = queue.enqueue(div_by_zero)
        job3 = queue.enqueue(div_by_zero)

        worker = Worker([queue], serializer=JSONSerializer)
        worker.work(burst=True)

        self.assertIn(job, registry)
        self.assertIn(job2, registry)
        self.assertIn(job3, registry)

        result = runner.invoke(main, [
            'requeue', '-u', self.redis_url, '--queue', 'requeue', '-S',
            'rq.serializers.JSONSerializer', job.id
        ])
        self.assert_normal_execution(result)

        # Only the first specified job is requeued
        self.assertNotIn(job, registry)
        self.assertIn(job2, registry)
        self.assertIn(job3, registry)

        result = runner.invoke(main, [
            'requeue', '-u', self.redis_url, '--queue', 'requeue', '-S',
            'rq.serializers.JSONSerializer', '--all'
        ])
        self.assert_normal_execution(result)
        # With --all flag, all failed jobs are requeued
        self.assertNotIn(job2, registry)
        self.assertNotIn(job3, registry)
コード例 #25
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_requeue(self):
        """FailedJobRegistry.requeue works properly"""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(div_by_zero, failure_ttl=5)

        worker = Worker([queue])
        worker.work(burst=True)

        registry = FailedJobRegistry(connection=worker.connection)
        self.assertTrue(job in registry)

        registry.requeue(job.id)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # Should also work with job instance
        registry.requeue(job)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # requeue_job should work the same way
        requeue_job(job.id, connection=self.testconn)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # And so does job.requeue()
        job.requeue()
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
コード例 #26
0
ファイル: test_cli.py プロジェクト: nvie/rq
    def test_requeue(self):
        """rq requeue -u <url> --all"""
        connection = Redis.from_url(self.redis_url)
        queue = Queue('requeue', connection=connection)
        registry = queue.failed_job_registry

        runner = CliRunner()

        job = queue.enqueue(div_by_zero)
        job2 = queue.enqueue(div_by_zero)
        job3 = queue.enqueue(div_by_zero)

        worker = Worker([queue])
        worker.work(burst=True)

        self.assertIn(job, registry)
        self.assertIn(job2, registry)
        self.assertIn(job3, registry)

        result = runner.invoke(
            main,
            ['requeue', '-u', self.redis_url, '--queue', 'requeue', job.id]
        )
        self.assert_normal_execution(result)

        # Only the first specified job is requeued
        self.assertNotIn(job, registry)
        self.assertIn(job2, registry)
        self.assertIn(job3, registry)

        result = runner.invoke(
            main,
            ['requeue', '-u', self.redis_url, '--queue', 'requeue', '--all']
        )
        self.assert_normal_execution(result)
        # With --all flag, all failed jobs are requeued
        self.assertNotIn(job2, registry)
        self.assertNotIn(job3, registry)
コード例 #27
0
ファイル: worker.py プロジェクト: kevincon/utilityknife
import os
import urlparse
from redis import Redis
from rq import Queue, Connection
from rq.worker import Worker

listen = ['high', 'default', 'low']

redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')

urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(redis_url)
conn = Redis(host=url.hostname, port=url.port, db=0, password=url.password)

if __name__ == '__main__':
    with Connection(conn):
        worker = Worker(map(Queue, listen))
        worker.work()
コード例 #28
0
 def run_worker(self, options):
     with Connection(self._get_connection()):
         w = Worker([options.queue_name])
         w.work()