コード例 #1
0
ファイル: worker.py プロジェクト: EonYang/rq_example
def main():
    worker = Worker(
        ['high', 'default', 'low'],
        connection=redis_client
    )
    worker.work()
    return
コード例 #2
0
ファイル: test_cli.py プロジェクト: luisbc92/rq
    def test_cli_enqueue_with_serializer(self):
        """rq enqueue -u <url> -S rq.serializers.JSONSerializer tests.fixtures.say_hello"""
        queue = Queue(connection=self.connection, serializer=JSONSerializer)
        self.assertTrue(queue.is_empty())

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, '-S',
            'rq.serializers.JSONSerializer', 'tests.fixtures.say_hello'
        ])
        self.assert_normal_execution(result)

        prefix = 'Enqueued tests.fixtures.say_hello() with job-id \''
        suffix = '\'.\n'

        self.assertTrue(result.output.startswith(prefix))
        self.assertTrue(result.output.endswith(suffix))

        job_id = result.output[len(prefix):-len(suffix)]
        queue_key = 'rq:queue:default'
        self.assertEqual(self.connection.llen(queue_key), 1)
        self.assertEqual(
            self.connection.lrange(queue_key, 0, -1)[0].decode('ascii'),
            job_id)

        worker = Worker(queue, serializer=JSONSerializer)
        worker.work(True)
        self.assertEqual(
            Job(job_id, serializer=JSONSerializer).result,
            'Hi there, Stranger!')
コード例 #3
0
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     q.enqueue(fixtures.access_self
               )  # access_self calls get_current_job() and asserts
     w = Worker([q])
     w.work(burst=True)
コード例 #4
0
ファイル: test_cli.py プロジェクト: luisbc92/rq
    def test_cli_enqueue_schedule_in(self):
        """rq enqueue -u <url> tests.fixtures.say_hello --schedule-in 1s"""
        queue = Queue(connection=self.connection)
        registry = ScheduledJobRegistry(queue=queue)
        worker = Worker(queue)
        scheduler = RQScheduler(queue, self.connection)

        self.assertTrue(len(queue) == 0)
        self.assertTrue(len(registry) == 0)

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, 'tests.fixtures.say_hello',
            '--schedule-in', '10s'
        ])
        self.assert_normal_execution(result)

        scheduler.acquire_locks()
        scheduler.enqueue_scheduled_jobs()

        self.assertTrue(len(queue) == 0)
        self.assertTrue(len(registry) == 1)

        self.assertFalse(worker.work(True))

        sleep(11)

        scheduler.enqueue_scheduled_jobs()

        self.assertTrue(len(queue) == 1)
        self.assertTrue(len(registry) == 0)

        self.assertTrue(worker.work(True))
コード例 #5
0
ファイル: test_cli.py プロジェクト: luisbc92/rq
    def test_cli_enqueue_args(self):
        """rq enqueue -u <url> tests.fixtures.echo hello ':[1, {"key": "value"}]' json:=["abc"] nojson=def"""
        queue = Queue(connection=self.connection)
        self.assertTrue(queue.is_empty())

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, 'tests.fixtures.echo', 'hello',
            ':[1, {"key": "value"}]', ':@tests/test.json', '%1, 2',
            'json:=[3.0, true]', 'nojson=abc', 'file=@tests/test.json'
        ])
        self.assert_normal_execution(result)

        job_id = self.connection.lrange('rq:queue:default', 0,
                                        -1)[0].decode('ascii')

        worker = Worker(queue)
        worker.work(True)

        args, kwargs = Job(job_id).result

        self.assertEqual(args, ('hello', [1, {
            'key': 'value'
        }], {
            "test": True
        }, (1, 2)))
        self.assertEqual(
            kwargs, {
                'json': [3.0, True],
                'nojson': 'abc',
                'file': '{\n    "test": true\n}\n'
            })
コード例 #6
0
ファイル: test_queue.py プロジェクト: mindis/rq
    def test_all_queues(self):
        """All queues"""
        q1 = Queue('first-queue')
        q2 = Queue('second-queue')
        q3 = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEquals(len(Queue.all()), 0)
        q1.enqueue(say_hello)
        self.assertEquals(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        q2.enqueue(say_hello)
        q3.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEquals(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Now empty two queues
        w = Worker([q2, q3])
        w.work(burst=True)

        # Queue.all() should still report the empty queues
        self.assertEquals(len(Queue.all()), 3)
コード例 #7
0
    def test_job_execution(self):
        """Job is removed from StartedJobRegistry after execution."""
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        job = queue.enqueue(say_hello)
        self.assertTrue(job.is_queued)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())
        self.assertTrue(job.is_started)

        worker.perform_job(job, queue)
        self.assertNotIn(job.id, registry.get_job_ids())
        self.assertTrue(job.is_finished)

        # Job that fails
        job = queue.enqueue(div_by_zero)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        worker.perform_job(job, queue)
        self.assertNotIn(job.id, registry.get_job_ids())
コード例 #8
0
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     job = q.enqueue(fixtures.access_self)
     w = Worker([q])
     w.work(burst=True)
     # access_self calls get_current_job() and executes successfully
     self.assertEqual(job.get_status(), JobStatus.FINISHED)
コード例 #9
0
    def test_info_only_workers(self):
        """rq info -u <url> --only-workers (-W)"""
        runner = CliRunner()
        result = runner.invoke(
            main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 0 queue', result.output)

        queue = Queue(connection=self.connection)
        queue.enqueue(say_hello)
        result = runner.invoke(
            main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 1 queues', result.output)

        foo_queue = Queue(name='foo', connection=self.connection)
        foo_queue.enqueue(say_hello)

        bar_queue = Queue(name='bar', connection=self.connection)
        bar_queue.enqueue(say_hello)

        worker = Worker([foo_queue, bar_queue], connection=self.connection)
        worker.register_birth()

        worker_2 = Worker([foo_queue, bar_queue], connection=self.connection)
        worker_2.register_birth()
        worker_2.set_state(WorkerStatus.BUSY)

        result = runner.invoke(
            main,
            ['info', 'foo', 'bar', '-u', self.redis_url, '--only-workers'])

        self.assert_normal_execution(result)
        self.assertIn('2 workers, 2 queues', result.output)

        result = runner.invoke(main, [
            'info', 'foo', 'bar', '--by-queue', '-u', self.redis_url,
            '--only-workers'
        ])

        self.assert_normal_execution(result)
        # Ensure both queues' workers are shown
        self.assertIn('foo:', result.output)
        self.assertIn('bar:', result.output)
        self.assertIn('2 workers, 2 queues', result.output)
コード例 #10
0
    def test_can_enqueue_job_if_dependency_is_deleted(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello, result_ttl=0)

        w = Worker([queue])
        w.work(burst=True)

        assert queue.enqueue(fixtures.say_hello, depends_on=dependency_job)
コード例 #11
0
    def __init__(self):
        load_dotenv(verbose=True)

        listen = ['high', 'default', 'low']
        redis_host = os.getenv("REDIS_HOST", "localhost")
        redis_port = os.getenv("REDIS_PORT", "6379")
        redis_password = os.getenv("REDIS_PASSWORD", "")
        conn = Redis(host=redis_host, port=redis_port, password=redis_password, db=0)
        with Connection(conn):
            self.worker = Worker(map(Queue, listen))
コード例 #12
0
ファイル: fixtures.py プロジェクト: luisbc92/rq
def start_worker(queue_name, conn_kwargs, worker_name, burst):
    """
    Start a worker. We accept only serializable args, so that this can be
    executed via multiprocessing.
    """
    # Silence stdout (thanks to <https://stackoverflow.com/a/28321717/14153673>)
    with open(os.devnull, 'w') as devnull:
        with contextlib.redirect_stdout(devnull):
            w = Worker([queue_name], name=worker_name, connection=Redis(**conn_kwargs))
            w.work(burst=burst)
コード例 #13
0
    def test_requeue_with_serializer(self):
        """FailedJobRegistry.requeue works properly (with serializer)"""
        queue = Queue(connection=self.testconn, serializer=JSONSerializer)
        job = queue.enqueue(div_by_zero, failure_ttl=5)

        worker = Worker([queue], serializer=JSONSerializer)
        worker.work(burst=True)

        registry = FailedJobRegistry(connection=worker.connection,
                                     serializer=JSONSerializer)
        self.assertTrue(job in registry)

        registry.requeue(job.id)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
        self.assertEqual(job.started_at, None)
        self.assertEqual(job.ended_at, None)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # Should also work with job instance
        registry.requeue(job)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # requeue_job should work the same way
        requeue_job(job.id,
                    connection=self.testconn,
                    serializer=JSONSerializer)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # And so does job.requeue()
        job.requeue()
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
コード例 #14
0
    def test_dependents_are_met_if_dependency_is_deleted(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello, result_ttl=0)
        dependent_job = queue.enqueue(fixtures.say_hello, depends_on=dependency_job)

        w = Worker([queue])
        w.work(burst=True, max_jobs=1)

        assert dependent_job.dependencies_are_met()
        assert dependent_job.get_status() == JobStatus.QUEUED
コード例 #15
0
ファイル: test_job.py プロジェクト: f0cker/rq
    def test_dependencies_are_met_at_execution_time(self):
        queue = Queue(connection=self.testconn)

        queue.enqueue(fixtures.say_hello, job_id="A")
        queue.enqueue(fixtures.say_hello, job_id="B")
        job_C = queue.enqueue(fixtures.check_dependencies_are_met, job_id="C", depends_on=["A", "B"])

        w = Worker([queue])
        w.work(burst=True)

        assert job_C.result
コード例 #16
0
    def test_work(self):
        queue = Queue(connection=self.testconn)
        worker = Worker(queues=[queue], connection=self.testconn)
        p = Process(target=kill_worker, args=(os.getpid(), False, 5))

        p.start()
        queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc), say_hello)
        worker.work(burst=False, with_scheduler=True)
        p.join(1)
        self.assertIsNotNone(worker.scheduler)
        registry = FinishedJobRegistry(queue=queue)
        self.assertEqual(len(registry), 1)
コード例 #17
0
ファイル: test_registry.py プロジェクト: zachgoulet/rq
    def test_job_deletion(self):
        """Ensure job is removed from StartedJobRegistry when deleted."""
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        job = queue.enqueue(say_hello)
        self.assertTrue(job.is_queued)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        job.delete()
        self.assertNotIn(job.id, registry.get_job_ids())
コード例 #18
0
ファイル: pqbenchmark.py プロジェクト: vladignatyev/django-pq
def worker(worker_num, backend):
    import subprocess
    print('Worker %i started' % worker_num)
    if backend == 'pq':
        subprocess.call('django-admin.py pqworker benchmark -b', shell=True)
    elif backend == 'rq':
        from rq.worker import Worker
        from redis import Redis
        from rq import Queue
        q = Queue('benchmark', connection=Redis())
        w = Worker(q, connection=Redis())
        w.work(burst=False)
    print('Worker %i fin' % worker_num)
    return
コード例 #19
0
ファイル: test_registry.py プロジェクト: zcmander/rq
    def test_jobs_are_put_in_registry(self):
        """Completed jobs are added to FinishedJobRegistry."""
        self.assertEqual(self.registry.get_job_ids(), [])
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        # Completed jobs are put in FinishedJobRegistry
        job = queue.enqueue(say_hello)
        worker.perform_job(job, queue)
        self.assertEqual(self.registry.get_job_ids(), [job.id])

        # Failed jobs are not put in FinishedJobRegistry
        failed_job = queue.enqueue(div_by_zero)
        worker.perform_job(failed_job, queue)
        self.assertEqual(self.registry.get_job_ids(), [job.id])
コード例 #20
0
    def test_run_maintenance_tasks(self, mocked):
        """scheduler.acquire_locks() is called only when scheduled is enabled"""
        queue = Queue(connection=self.testconn)
        worker = Worker(queues=[queue], connection=self.testconn)

        worker.run_maintenance_tasks()
        self.assertEqual(mocked.call_count, 0)

        worker.last_cleaned_at = None
        worker.scheduler = RQScheduler([queue], connection=self.testconn)
        worker.run_maintenance_tasks()
        self.assertEqual(mocked.call_count, 0)

        worker.last_cleaned_at = datetime.now()
        worker.run_maintenance_tasks()
        self.assertEqual(mocked.call_count, 1)
コード例 #21
0
    def test_work_fails(self):
        """Non importable jobs should be put on the failed queue event with sentry"""
        q = Queue()
        failed_q = get_failed_queue()

        # Action
        q.enqueue('_non.importable.job')
        self.assertEqual(q.count, 1)

        w = Worker([q])
        register_sentry(FakeSentry(), w)

        w.work(burst=True)

        # Postconditions
        self.assertEqual(failed_q.count, 1)
        self.assertEqual(q.count, 0)
コード例 #22
0
ファイル: test_registry.py プロジェクト: zachgoulet/rq
    def test_jobs_are_put_in_registry(self):
        """Completed jobs are added to FinishedJobRegistry."""
        self.assertEqual(self.registry.get_job_ids(), [])
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        # Completed jobs are put in FinishedJobRegistry
        job = queue.enqueue(say_hello)
        worker.perform_job(job, queue)
        self.assertEqual(self.registry.get_job_ids(), [job.id])

        # When job is deleted, it should be removed from FinishedJobRegistry
        self.assertEqual(job.get_status(), JobStatus.FINISHED)
        job.delete()
        self.assertEqual(self.registry.get_job_ids(), [])

        # Failed jobs are not put in FinishedJobRegistry
        failed_job = queue.enqueue(div_by_zero)
        worker.perform_job(failed_job, queue)
        self.assertEqual(self.registry.get_job_ids(), [])
コード例 #23
0
ファイル: fixtures.py プロジェクト: luisbc92/rq
def burst_two_workers(queue, timeout=2, tries=5, pause=0.1):
    """
    Get two workers working simultaneously in burst mode, on a given queue.
    Return after both workers have finished handling jobs, up to a fixed timeout
    on the worker that runs in another process.
    """
    w1 = start_worker_process(queue.name, worker_name='w1', burst=True)
    w2 = Worker(queue, name='w2')
    jobs = queue.jobs
    if jobs:
        first_job = jobs[0]
        # Give the first worker process time to get started on the first job.
        # This is helpful in tests where we want to control which worker takes which job.
        n = 0
        while n < tries and not first_job.is_started:
            time.sleep(pause)
            n += 1
    # Now can start the second worker.
    w2.work(burst=True)
    w1.join(timeout)
コード例 #24
0
ファイル: test_registry.py プロジェクト: zachgoulet/rq
    def test_worker_handle_job_failure(self):
        """Failed jobs are added to FailedJobRegistry"""
        q = Queue(connection=self.testconn)

        w = Worker([q])
        registry = FailedJobRegistry(connection=w.connection)

        timestamp = current_timestamp()

        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        # job is added to FailedJobRegistry with default failure ttl
        self.assertIn(job.id, registry.get_job_ids())
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + DEFAULT_FAILURE_TTL + 5)

        # job is added to FailedJobRegistry with specified ttl
        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + 7)
コード例 #25
0
ファイル: test_cli.py プロジェクト: luisbc92/rq
    def test_requeue_with_serializer(self):
        """rq requeue -u <url> -S <serializer> --all"""
        connection = Redis.from_url(self.redis_url)
        queue = Queue('requeue',
                      connection=connection,
                      serializer=JSONSerializer)
        registry = queue.failed_job_registry

        runner = CliRunner()

        job = queue.enqueue(div_by_zero)
        job2 = queue.enqueue(div_by_zero)
        job3 = queue.enqueue(div_by_zero)

        worker = Worker([queue], serializer=JSONSerializer)
        worker.work(burst=True)

        self.assertIn(job, registry)
        self.assertIn(job2, registry)
        self.assertIn(job3, registry)

        result = runner.invoke(main, [
            'requeue', '-u', self.redis_url, '--queue', 'requeue', '-S',
            'rq.serializers.JSONSerializer', job.id
        ])
        self.assert_normal_execution(result)

        # Only the first specified job is requeued
        self.assertNotIn(job, registry)
        self.assertIn(job2, registry)
        self.assertIn(job3, registry)

        result = runner.invoke(main, [
            'requeue', '-u', self.redis_url, '--queue', 'requeue', '-S',
            'rq.serializers.JSONSerializer', '--all'
        ])
        self.assert_normal_execution(result)
        # With --all flag, all failed jobs are requeued
        self.assertNotIn(job2, registry)
        self.assertNotIn(job3, registry)
コード例 #26
0
ファイル: test_retry.py プロジェクト: retouchee/rq
    def test_retry_interval(self):
        """Retries with intervals are scheduled"""
        connection = self.testconn
        queue = Queue(connection=connection)
        retry = Retry(max=1, interval=5)
        job = queue.enqueue(div_by_zero, retry=retry)

        worker = Worker([queue])
        registry = queue.scheduled_job_registry
        # If job if configured to retry with interval, it will be scheduled,
        # not directly put back in the queue
        queue.empty()
        worker.handle_job_failure(job, queue)
        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.SCHEDULED)
        self.assertEqual(job.retries_left, 0)
        self.assertEqual(len(registry), 1)
        self.assertEqual(queue.job_ids, [])
        # Scheduled time is roughly 5 seconds from now
        scheduled_time = registry.get_scheduled_time(job)
        now = datetime.now(timezone.utc)
        self.assertTrue(now + timedelta(seconds=4) < scheduled_time < now +
                        timedelta(seconds=6))
コード例 #27
0
ファイル: updater.py プロジェクト: produvia/kryptos
def start_worker():
    with Connection(CONN):
        log.info("Starting update worker")
        worker = Worker("updates")
        worker.work()
コード例 #28
0
def connect_worker():
    with Connection(conn):
        worker = Worker(map(Q, listen))
        worker.work()
コード例 #29
0
ファイル: tasks.py プロジェクト: yishenggudou/hadooptools
def GetWorksByQueueName(name, host=None, port=None):
    from rq.worker import Worker
    Q = GetQueue(name, host, port)
    w = Worker(Q, connection=default_redis)
    return w.all(connection=default_redis)
コード例 #30
0
import time
from redis import Redis
from rq import Queue, Connection
from rq.worker import HerokuWorker as Worker

listen = ['high', 'default', 'low']

conn = Redis(host='redis', port=6379, db=0)

if __name__ == '__main__':
    with Connection(conn):
        worker = Worker(map(Queue, listen))
        worker.work()