Пример #1
0
 def test_cancel(self):
     """job.cancel() deletes itself & dependents mapping from Redis."""
     job = Job.create(func=say_hello)
     job2 = Job.create(func=say_hello, depends_on=job)
     job2.register_dependency()
     job.cancel()
     self.assertFalse(self.testconn.exists(job.key))
     self.assertFalse(self.testconn.exists(job.dependents_key))
Пример #2
0
 def test_get_job_ttl(self):
     """Getting job TTL."""
     ttl = 1
     job = Job.create(func=fixtures.say_hello, ttl=ttl)
     job.save()
     self.assertEqual(job.get_ttl(), ttl)
     job = Job.create(func=fixtures.say_hello)
     job.save()
     self.assertEqual(job.get_ttl(), None)
Пример #3
0
    def test_failure_ttl_is_persisted(self):
        """Ensure job.failure_ttl is set and restored properly"""
        job = Job.create(func=fixtures.say_hello, args=('Lionel',), failure_ttl=15)
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.failure_ttl, 15)

        job = Job.create(func=fixtures.say_hello, args=('Lionel',))
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.failure_ttl, None)
Пример #4
0
    def test_result_ttl_is_persisted(self):
        """Ensure that job's result_ttl is set properly"""
        job = Job.create(func=fixtures.say_hello, args=('Lionel',), result_ttl=10)
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, 10)

        job = Job.create(func=fixtures.say_hello, args=('Lionel',))
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, None)
Пример #5
0
 def test_get_result_ttl(self):
     """Getting job result TTL."""
     job_result_ttl = 1
     default_ttl = 2
     job = Job.create(func=fixtures.say_hello, result_ttl=job_result_ttl)
     job.save()
     self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), job_result_ttl)
     self.assertEqual(job.get_result_ttl(), job_result_ttl)
     job = Job.create(func=fixtures.say_hello)
     job.save()
     self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), default_ttl)
     self.assertEqual(job.get_result_ttl(), None)
Пример #6
0
    def test_description_is_persisted(self):
        """Ensure that job's custom description is set properly"""
        job = Job.create(func=say_hello, args=('Lionel',), description=u'Say hello!')
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.description, u'Say hello!')

        # Ensure job description is constructed from function call string
        job = Job.create(func=say_hello, args=('Lionel',))
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.description, "tests.fixtures.say_hello('Lionel')")
Пример #7
0
    def test_persistence_of_parent_job(self):
        """Storing jobs with parent job, either instance or key."""
        parent_job = Job.create(func=fixtures.some_calculation)
        parent_job.save()
        job = Job.create(func=fixtures.some_calculation, depends_on=parent_job)
        job.save()
        stored_job = Job.fetch(job.id)
        self.assertEqual(stored_job._dependency_id, parent_job.id)
        self.assertEqual(stored_job.dependency, parent_job)

        job = Job.create(func=fixtures.some_calculation, depends_on=parent_job.id)
        job.save()
        stored_job = Job.fetch(job.id)
        self.assertEqual(stored_job._dependency_id, parent_job.id)
        self.assertEqual(stored_job.dependency, parent_job)
Пример #8
0
    def test_fetch_many(self):
        """Fetching many jobs at once."""
        data = {
            'func': fixtures.some_calculation,
            'args': (3, 4),
            'kwargs': dict(z=2),
            'connection': self.testconn,
        }
        job = Job.create(**data)
        job.save()

        job2 = Job.create(**data)
        job2.save()

        jobs = Job.fetch_many([job.id, job2.id, 'invalid_id'], self.testconn)
        self.assertEqual(jobs, [job, job2, None])
Пример #9
0
    def test_enqueue_dependents_on_multiple_queues(self):
        """Enqueueing dependent jobs on multiple queues pushes jobs in the queues
        and removes them from DeferredJobRegistry for each different queue."""
        q_1 = Queue("queue_1")
        q_2 = Queue("queue_2")
        parent_job = Job.create(func=say_hello)
        parent_job.save()
        job_1 = q_1.enqueue(say_hello, depends_on=parent_job)
        job_2 = q_2.enqueue(say_hello, depends_on=parent_job)

        # Each queue has its own DeferredJobRegistry
        registry_1 = DeferredJobRegistry(q_1.name, connection=self.testconn)
        self.assertEqual(
            set(registry_1.get_job_ids()),
            set([job_1.id])
        )
        registry_2 = DeferredJobRegistry(q_2.name, connection=self.testconn)
        self.assertEqual(
            set(registry_2.get_job_ids()),
            set([job_2.id])
        )

        # After dependents is enqueued, job_1 on queue_1 and
        # job_2 should be in queue_2
        self.assertEqual(q_1.job_ids, [])
        self.assertEqual(q_2.job_ids, [])
        q_1.enqueue_dependents(parent_job)
        q_2.enqueue_dependents(parent_job)
        self.assertEqual(set(q_1.job_ids), set([job_1.id]))
        self.assertEqual(set(q_2.job_ids), set([job_2.id]))
        self.assertFalse(self.testconn.exists(parent_job.dependents_key))

        # DeferredJobRegistry should also be empty
        self.assertEqual(registry_1.get_job_ids(), [])
        self.assertEqual(registry_2.get_job_ids(), [])
Пример #10
0
    def test_work_is_unreadable(self):
        """Unreadable jobs are put on the failed queue."""
        q = Queue()
        failed_q = get_failed_queue()

        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # NOTE: We have to fake this enqueueing for this test case.
        # What we're simulating here is a call to a function that is not
        # importable from the worker process.
        job = Job.create(func=div_by_zero, args=(3,))
        job.save()
        data = self.testconn.hget(job.key, 'data')
        invalid_data = data.replace(b'div_by_zero', b'nonexisting')
        assert data != invalid_data
        self.testconn.hset(job.key, 'data', invalid_data)

        # We use the low-level internal function to enqueue any data (bypassing
        # validity checks)
        q.push_job_id(job.id)

        self.assertEquals(q.count, 1)

        # All set, we're going to process it
        w = Worker([q])
        w.work(burst=True)   # should silently pass
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 1)
Пример #11
0
    def test_persistence_of_parent_job(self):
        """Storing jobs with parent job, either instance or key."""
        parent_job = Job.create(func=fixtures.some_calculation,
                                connection=self.testconn)
        parent_job.save()
        job = Job.create(func=fixtures.some_calculation, depends_on=parent_job,
                         connection=self.testconn,)
        job.save()
        stored_job = self.conn.get_job(job.id)
        self.assertEqual(stored_job.parent_ids[0], parent_job.id)

        job = Job.create(func=fixtures.some_calculation,
                         depends_on=parent_job.id, connection=self.testconn)
        job.save()
        stored_job = self.conn.get_job(job.id)
        self.assertEqual(stored_job.parent_ids[0], parent_job.id)
Пример #12
0
    def test_register_dependency(self):
        """Ensure dependency registration works properly."""

        finished_job = Job.create(func=fixtures.say_hello, status='finished')
        finished_job.save()
        job1 = Job.create(func=fixtures.say_hello)
        job1.save()
        job2 = Job.create(func=fixtures.say_hello, depends_on=[job1, finished_job])
        job2.register_dependencies([job1])
        self.assertEqual(
            list(map(as_text, self.testconn.smembers(Job.dependents_key_for(job1.id)))),
            [job2.id]
        )
        # Should not register itself as being dependent on a finished job
        self.assertEqual(self.testconn.smembers(Job.dependents_key_for(finished_job.id)), set())
        self.assertEqual(self.testconn.smembers(Job.dependents_key_for(job2.id)), set())
Пример #13
0
    def test_create_job_from_callable_class(self):
        """Creation of jobs using a callable class specifier."""
        kallable = fixtures.CallableObject()
        job = Job.create(func=kallable)

        self.assertEqual(job.func, kallable.__call__)
        self.assertEqual(job.instance, kallable)
Пример #14
0
    def test_work_is_unreadable(self):
        """Unreadable jobs are put on the failed job registry."""
        q = Queue()
        self.assertEqual(q.count, 0)

        # NOTE: We have to fake this enqueueing for this test case.
        # What we're simulating here is a call to a function that is not
        # importable from the worker process.
        job = Job.create(func=div_by_zero, args=(3,), origin=q.name)
        job.save()

        job_data = job.data
        invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
        assert job_data != invalid_data
        self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))

        # We use the low-level internal function to enqueue any data (bypassing
        # validity checks)
        q.push_job_id(job.id)

        self.assertEqual(q.count, 1)

        # All set, we're going to process it
        w = Worker([q])
        w.work(burst=True)   # should silently pass
        self.assertEqual(q.count, 0)

        failed_job_registry = FailedJobRegistry(queue=q)
        self.assertTrue(job in failed_job_registry)
Пример #15
0
 def test_register_dependency(self):
     """Test that jobs updates the correct job dependents."""
     job = Job.create(func=say_hello)
     job._dependency_id = 'id'
     job.save()
     job.register_dependency()
     self.assertEqual(as_text(self.testconn.spop('rq:job:id:dependents')), job.id)
Пример #16
0
def bulk_invoke(func, args, nargs):
    """Bulk invoke a function via queues

    Uses internal implementation details of rq.
    """
    # for comparison, simplest thing that works
    # for i in nargs:
    #    argv = list(args)
    #    argv.append(i)
    #    func.delay(*argv)

    # some variances between cpy and pypy, sniff detect
    for closure in func.delay.func_closure:
        if getattr(closure.cell_contents, 'queue', None):
            ctx = closure.cell_contents
            break
    q = Queue(ctx.queue, connection=connection)
    argv = list(args)
    argv.append(None)
    job = Job.create(
        func, args=argv, connection=connection,
        description="bucket-%s" % func.func_name,
        origin=q.name, status=JobStatus.QUEUED, timeout=ctx.timeout,
        result_ttl=0, ttl=ctx.ttl)

    for n in chunks(nargs, 100):
        job.created_at = datetime.utcnow()
        with connection.pipeline() as pipe:
            for s in n:
                argv[-1] = s
                job._id = unicode(uuid4())
                job.args = argv
                q.enqueue_job(job, pipeline=pipe)
            pipe.execute()
Пример #17
0
 def create_job(self, status=None, depends_on=None):
     """
     Creates Job object with given job ID
     """
     args = (self.instance,)
     return Job.create(self.func, args=args, id=self.id, connection=self.connection,
                       depends_on=depends_on, status=status)
Пример #18
0
    def test_enqueue_waitlist(self):
        """Enqueueing a waitlist pushes all jobs in waitlist to queue"""
        q = Queue()
        parent_job = Job.create(func=say_hello)
        parent_job.save()
        job_1 = Job.create(func=say_hello, dependency=parent_job)
        job_1.save()
        job_1.register_dependency()
        job_2 = Job.create(func=say_hello, dependency=parent_job)
        job_2.save()
        job_2.register_dependency()

        # After waitlist is enqueued, job_1 and job_2 should be in queue
        self.assertEqual(q.job_ids, [])
        q.enqueue_waitlist(parent_job)
        self.assertEqual(q.job_ids, [job_1.id, job_2.id])
        self.assertFalse(self.testconn.exists(parent_job.waitlist_key))
Пример #19
0
    def create_job(self, *args, **kwargs):
        if 'origin' not in kwargs:
            kwargs['origin'] = str(uuid4())
        if 'func' not in kwargs and len(args) == 0:
            kwargs['func'] = 'tests.fixtures.say_hello'

        job = Job.create(connection=self.conn, *args, **kwargs)
        return job
Пример #20
0
    def test_description_is_persisted(self):
        """Ensure that job's custom description is set properly"""
        job = Job.create(connection=self.testconn, func=fixtures.say_hello,
                         args=('Lionel',), description='Say hello!')
        job.save()
        self.conn.get_job(job.id)
        self.assertEqual(job.description, 'Say hello!')

        # Ensure job description is constructed from function call string
        job = Job.create(connection=self.testconn, func=fixtures.say_hello,
                         args=('Lionel',))
        job.save()
        self.conn.get_job(job.id)
        if PY2:
            self.assertEqual(job.description, "tests.fixtures.say_hello(u'Lionel')")
        else:
            self.assertEqual(job.description, "tests.fixtures.say_hello('Lionel')")
Пример #21
0
    def test_enqueue_dependents(self):
        """Enqueueing the dependent jobs pushes all jobs in the depends set to the queue."""
        q = Queue()
        parent_job = Job.create(func=say_hello)
        parent_job.save()
        job_1 = Job.create(func=say_hello, depends_on=parent_job)
        job_1.save()
        job_1.register_dependency()
        job_2 = Job.create(func=say_hello, depends_on=parent_job)
        job_2.save()
        job_2.register_dependency()

        # After dependents is enqueued, job_1 and job_2 should be in queue
        self.assertEqual(q.job_ids, [])
        q.enqueue_dependents(parent_job)
        self.assertEqual(set(q.job_ids), set([job_1.id, job_2.id]))
        self.assertFalse(self.testconn.exists(parent_job.dependents_key))
Пример #22
0
    def test_create_job_from_string_function(self):
        """Creation of jobs using string specifier."""
        job = Job.create(func='tests.fixtures.say_hello', args=('World',))

        # Job data is set
        self.assertEqual(job.func, fixtures.say_hello)
        self.assertIsNone(job.instance)
        self.assertEqual(job.args, ('World',))
Пример #23
0
    def test_requeue_sets_status_to_queued(self):
        """Requeueing a job should set its status back to QUEUED."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))
        get_failed_queue().requeue(job.id)

        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
Пример #24
0
    def test_quarantine_preserves_timeout(self):
        """Quarantine preserves job timeout."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.timeout = 200
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))

        self.assertEquals(job.timeout, 200)
Пример #25
0
    def setUp(self):
        super(TestRQCli, self).setUp()
        db_num = self.testconn.connection_pool.connection_kwargs['db']
        self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
        self.connection = Redis.from_url(self.redis_url)

        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.save()
Пример #26
0
    def setUp(self):
        super(TestRQCli, self).setUp()
        db_num = self.testconn.connection_pool.connection_kwargs['db']
        self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num

        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))  # noqa
Пример #27
0
    def test_create_instance_method_job(self):
        """Creation of jobs for instance methods."""
        n = fixtures.Number(2)
        job = Job.create(func=n.div, args=(4,))

        # Job data is set
        self.assertEqual(job.func, n.div)
        self.assertEqual(job.instance, n)
        self.assertEqual(job.args, (4,))
Пример #28
0
    def test_create_instance_method_job(self):
        """Creation of jobs for instance methods."""
        c = Calculator(2)
        job = Job.create(func=c.calculate, args=(3, 4))

        # Job data is set
        self.assertEquals(job.func, c.calculate)
        self.assertEquals(job.instance, c)
        self.assertEquals(job.args, (3, 4))
Пример #29
0
    def test_fetching_unreadable_data(self):
        """Fetching fails on unreadable data."""
        # Set up
        job = Job.create(func=some_calculation, args=(3, 4), kwargs=dict(z=2))
        job.save()

        # Just replace the data hkey with some random noise
        self.testconn.hset(job.key, "data", "this is no pickle string")
        with self.assertRaises(UnpickleError):
            job.refresh()
Пример #30
0
    def test_fetching_unreadable_data(self):
        """Fetching fails on unreadable data."""
        # Set up
        job = Job.create(some_calculation, 3, 4, z=2)
        job.save()

        # Just replace the data hkey with some random noise
        self.testconn.hset(job.key, 'data', 'this is no pickle string')
        with self.assertRaises(UnpickleError):
            job.refresh()
Пример #31
0
    def test_dependent_job_deletes_dependencies_key(self):
        """
        job.delete() deletes itself from Redis.
        """
        queue = Queue(connection=self.testconn)
        dependency_job = queue.enqueue(fixtures.say_hello)
        dependent_job = Job.create(func=fixtures.say_hello,
                                   depends_on=dependency_job)

        dependent_job.register_dependency()
        dependent_job.save()
        dependent_job.delete()

        self.assertTrue(self.testconn.exists(dependency_job.key))
        self.assertFalse(self.testconn.exists(dependent_job.dependencies_key))
        self.assertFalse(self.testconn.exists(dependent_job.key))
Пример #32
0
    def test_fetch_dependencies_raises_if_dependency_deleted(self):
        queue = Queue(connection=self.testconn)
        dependency_job = queue.enqueue(fixtures.say_hello)
        dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job)

        dependent_job.register_dependency()
        dependent_job.save()

        dependency_job.delete()

        self.assertNotIn(
            dependent_job.id,
            [job.id for job in dependent_job.fetch_dependencies(
                pipeline=self.testconn
            )]
        )
Пример #33
0
 def enqueue_job(self, job_id: Union[UUID, str], user: str) -> Type[Job]:
     # check if job already exists
     try:
         job = Job.fetch(str(job_id), connection=self.redis_conn)
     except NoSuchJobError:
         job = Job.create(
             self.job_func,
             args=(job_id, user),
             id=str(job_id),
             result_ttl=0,
             timeout="10m",
             failure_ttl=3600 * 24 * 14,
             connection=self.redis_conn,
         )
         self.q.enqueue_job(job)
     return job
Пример #34
0
    def test_enqueue_job_with_multiple_finished_dependencies(self):

        parent_jobs = [Job.create(func=say_hello) for _ in range(2)]

        for job in parent_jobs:
            job._status = JobStatus.FINISHED
            job.save()

        q = Queue()
        with patch('rq.queue.Job.create', new=MultipleDependencyJob.create):
            job = q.enqueue(say_hello,
                            depends_on=parent_jobs[0],
                            _dependency_ids=[job.id for job in parent_jobs])
            self.assertEqual(job.get_status(), JobStatus.QUEUED)
            self.assertEqual(q.job_ids, [job.id])
            self.assertEqual(job.fetch_dependencies(), parent_jobs)
Пример #35
0
    def test_store_then_fetch(self):
        """Store, then fetch."""
        job = Job.create(func=fixtures.some_calculation,
                         timeout='1h',
                         args=(3, 4),
                         kwargs=dict(z=2))
        job.save()

        job2 = Job.fetch(job.id)
        self.assertEqual(job.func, job2.func)
        self.assertEqual(job.args, job2.args)
        self.assertEqual(job.kwargs, job2.kwargs)
        self.assertEqual(job.timeout, job2.timeout)

        # Mathematical equation
        self.assertEqual(job, job2)
Пример #36
0
    def test_enqueue_job_with_dependency_and_timeout(self):
        """Jobs remember their timeout when enqueued as a dependency."""
        # Job with unfinished dependency is not immediately enqueued
        parent_job = Job.create(func=say_hello)
        parent_job.save()
        q = Queue()
        job = q.enqueue_call(say_hello, depends_on=parent_job, timeout=123)
        self.assertEqual(q.job_ids, [])
        self.assertEqual(job.timeout, 123)

        # Jobs dependent on finished jobs are immediately enqueued
        parent_job.set_status(JobStatus.FINISHED)
        parent_job.save()
        job = q.enqueue_call(say_hello, depends_on=parent_job, timeout=123)
        self.assertEqual(q.job_ids, [job.id])
        self.assertEqual(job.timeout, 123)
Пример #37
0
    def test_job_is_unimportable(self):
        """Jobs that cannot be imported throw exception on access."""
        job = Job.create(func=fixtures.say_hello, args=('Lionel', ))
        job.save()

        # Now slightly modify the job to make it unimportable (this is
        # equivalent to a worker not having the most up-to-date source code
        # and unable to import the function)
        job_data = job.data
        unimportable_data = job_data.replace(b'say_hello', b'nay_hello')

        self.testconn.hset(job.key, 'data', zlib.compress(unimportable_data))

        job.refresh()
        with self.assertRaises(AttributeError):
            job.func  # accessing the func property should fail
Пример #38
0
    def test_cleanup(self):
        """Test that jobs and results are expired properly."""
        job = Job.create(func=fixtures.say_hello)
        job.save()

        # Jobs with negative TTLs don't expire
        job.cleanup(ttl=-1)
        self.assertEqual(self.testconn.ttl(job.key), -1)

        # Jobs with positive TTLs are eventually deleted
        job.cleanup(ttl=100)
        self.assertEqual(self.testconn.ttl(job.key), 100)

        # Jobs with 0 TTL are immediately deleted
        job.cleanup(ttl=0)
        self.assertRaises(NoSuchJobError, Job.fetch, job.id, self.testconn)
Пример #39
0
    def test_compressed_job_data_handling(self):
        """Jobs handle both compressed and uncompressed data"""

        job = Job.create(func=fixtures.say_hello, args=('Lionel',))
        job.save()

        # Job data is stored in compressed format
        job_data = job.data
        self.assertEqual(
            zlib.compress(job_data),
            self.testconn.hget(job.key, 'data')
        )

        self.testconn.hset(job.key, 'data', job_data)
        job.refresh()
        self.assertEqual(job.data, job_data)
Пример #40
0
    def test_job_deletion(self):
        """Ensure job.delete() removes itself from FailedQueue."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.timeout = 200
        job.save()

        job.set_status(JobStatus.FAILED)

        failed_queue = get_failed_queue()
        failed_queue.quarantine(job, Exception('Some fake error'))

        self.assertTrue(job.id in failed_queue.get_job_ids())

        job.delete()
        self.assertFalse(job.id in failed_queue.get_job_ids())
Пример #41
0
    def test_get_scheduled_time(self):
        """get_scheduled_time() returns job's scheduled datetime"""
        queue = Queue(connection=self.testconn)
        registry = ScheduledJobRegistry(queue=queue)

        job = Job.create('myfunc', connection=self.testconn)
        job.save()
        dt = datetime(2019, 1, 1, tzinfo=timezone.utc)
        registry.schedule(job, datetime(2019, 1, 1, tzinfo=timezone.utc))
        self.assertEqual(registry.get_scheduled_time(job), dt)
        # get_scheduled_time() should also work with job ID
        self.assertEqual(registry.get_scheduled_time(job.id), dt)

        # registry.get_scheduled_time() raises NoSuchJobError if
        # job.id is not found
        self.assertRaises(NoSuchJobError, registry.get_scheduled_time, '123')
Пример #42
0
    def test_requeue_job(self):
        """Requeueing existing jobs."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.save()
        get_failed_queue().quarantine(job,
                                      Exception('Some fake error'))  # noqa

        self.assertEqual(Queue.all(), [get_failed_queue()])  # noqa
        self.assertEqual(get_failed_queue().count, 1)

        requeued_job = get_failed_queue().requeue(job.id)

        self.assertEqual(get_failed_queue().count, 0)
        self.assertEqual(Queue('fake').count, 1)
        self.assertEqual(requeued_job.origin, job.origin)
Пример #43
0
    def test_enqueue_job_with_dependency(self):
        """Jobs are enqueued only when their dependencies are finished."""
        # Job with unfinished dependency is not immediately enqueued
        parent_job = Job.create(func=say_hello)
        q = Queue()
        job = q.enqueue_call(say_hello, depends_on=parent_job)
        self.assertEqual(q.job_ids, [])
        self.assertEqual(job.get_status(), JobStatus.DEFERRED)

        # Jobs dependent on finished jobs are immediately enqueued
        parent_job.set_status(JobStatus.FINISHED)
        parent_job.save()
        job = q.enqueue_call(say_hello, depends_on=parent_job)
        self.assertEqual(q.job_ids, [job.id])
        self.assertEqual(job.timeout, Queue.DEFAULT_TIMEOUT)
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
Пример #44
0
    def test_queues_delete_multiple(self):
        some_queues = ['q1', 'q2', 'q3', 'q4']
        some_queues_instances = []
        for queue in some_queues:
            some_queues_instances.append(Queue(name=queue))

        for queue in some_queues_instances:
            job = Job.create(func=fixtures.some_calculation,
                             args=(3, 4),
                             kwargs=dict(z=2))
            queue.enqueue_job(job)

        response = self.client.post('/queues/empty/all')

        self.assertEqual(response.status_code, HTTP_OK)
        for queue in some_queues_instances:
            self.assertEqual(queue.is_empty(), True)
Пример #45
0
    def test_enqueue_sets_metadata(self):
        """Enqueueing job onto queues modifies meta data."""
        q = Queue()
        job = Job.create(func=say_hello,
                         args=('Nick', ),
                         kwargs=dict(foo='bar'))

        # Preconditions
        self.assertIsNone(job.origin)
        self.assertIsNone(job.enqueued_at)

        # Action
        q.enqueue_job(job)

        # Postconditions
        self.assertEquals(job.origin, q.name)
        self.assertIsNotNone(job.enqueued_at)
Пример #46
0
    def test_job_with_dependents_delete_all_with_saved(self):
        """job.delete() deletes itself from Redis. Dependents need to be
        deleted explictely. Without a save, the dependent job is never saved
        into redis. The delete method will get and pass a NoSuchJobError.
        """
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = Job.create(func=fixtures.say_hello, depends_on=job)
        job2.register_dependency()
        job2.save()

        job.delete(delete_dependents=True)
        self.assertFalse(self.testconn.exists(job.key))
        self.assertFalse(self.testconn.exists(job.dependents_key))
        self.assertFalse(self.testconn.exists(job2.key))

        self.assertNotIn(job.id, queue.get_job_ids())
Пример #47
0
def create_job(paths, args):
    job_id = get_hash(paths, args)
    try:
        job = Job.fetch(job_id, connection=redis)
        return get_response(job, 200)
    except NoSuchJobError:
        job = Job.create(run_task,
                         id=job_id,
                         args=[paths, args],
                         timeout=WORKER_TIMEOUT,
                         ttl=WORKER_TTL,
                         result_ttl=WORKER_RESULT_TTL,
                         failure_ttl=WORKER_FAILURE_TTL,
                         connection=redis)
        queue = Queue(connection=redis)
        queue.enqueue_job(job)
        return get_response(job, 201)
Пример #48
0
 def _create_job(self, func, args=None, kwargs=None, commit=True,
                 result_ttl=None, ttl=None, id=None, description=None,
                 queue_name=None, timeout=None):
     """
     Creates an RQ job and saves it to Redis.
     """
     if args is None:
         args = ()
     if kwargs is None:
         kwargs = {}
     job = Job.create(func, args=args, connection=self.connection,
                      kwargs=kwargs, result_ttl=result_ttl, ttl=ttl, id=id,
                      description=description, timeout=timeout)
     job.origin = queue_name or self.queue_name
     if commit:
         job.save()
     return job
Пример #49
0
def create_scheduling_job(
    asset_id: int,
    start_of_schedule: datetime,
    end_of_schedule: datetime,
    belief_time: datetime,
    resolution: timedelta = DEFAULT_RESOLUTION,
    soc_at_start: Optional[float] = None,
    soc_targets: Optional[pd.Series] = None,
    udi_event_ea: Optional[str] = None,
    enqueue: bool = True,
) -> Job:
    """Supporting quick retrieval of the scheduling job, the job id is the unique entity address of the UDI event.
    That means one event leads to one job (i.e. actions are event driven).

    Target SOC values should be indexed by their due date. For example, for quarter-hourly targets between 5 and 6 AM:
    >>> df = pd.Series(data=[1, 2, 2.5, 3], index=pd.date_range(datetime(2010,1,1,5), datetime(2010,1,1,6), freq=timedelta(minutes=15), closed="right"))
    >>> print(df)
        2010-01-01 05:15:00    1.0
        2010-01-01 05:30:00    2.0
        2010-01-01 05:45:00    2.5
        2010-01-01 06:00:00    3.0
        Freq: 15T, dtype: float64
    """
    job = Job.create(
        make_schedule,
        kwargs=dict(
            asset_id=asset_id,
            start=start_of_schedule,
            end=end_of_schedule,
            belief_time=belief_time,
            resolution=resolution,
            soc_at_start=soc_at_start,
            soc_targets=soc_targets,
        ),
        id=udi_event_ea,
        connection=current_app.queues["scheduling"].connection,
        result_ttl=int(
            current_app.config.get(
                "FLEXMEASURES_PLANNING_TTL", timedelta(-1)
            ).total_seconds()
        ),  # NB job.cleanup docs says a negative number of seconds means persisting forever
    )
    if enqueue:
        current_app.queues["scheduling"].enqueue_job(job)
    return job
Пример #50
0
def submit():
    params = _get_form_params("name", "url")
    if any([_is_missing(p) for p in params]):
        return "Parameters missing..."
    name, url = params

    job = Job.create(count_dollars, (url, ),
                     meta={
                         "name": name,
                         "tag": "tag" + str(uuid.uuid4()).replace('-', '')
                     },
                     connection=conn,
                     ttl=500,
                     failure_ttl=500)
    q.enqueue_job(job)
    time.sleep(1)

    return redirect("/jobs", code=302)
Пример #51
0
    def test_dependencies_finished_watches_job(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello)

        dependent_job = Job.create(func=fixtures.say_hello)
        dependent_job._dependency_ids = [dependency_job.id]
        dependent_job.register_dependency()

        with self.testconn.pipeline() as pipeline:
            dependent_job.dependencies_are_met(pipeline=pipeline, )

            dependency_job.set_status(JobStatus.FAILED, pipeline=self.testconn)
            pipeline.multi()

            with self.assertRaises(WatchError):
                pipeline.touch(Job.key_for(dependent_job.id))
                pipeline.execute()
Пример #52
0
    def test_fetch_dependencies_watches(self):
        queue = Queue(connection=self.testconn)
        dependency_job = queue.enqueue(fixtures.say_hello)
        dependent_job = Job.create(func=fixtures.say_hello,
                                   depends_on=dependency_job)

        dependent_job.register_dependency()
        dependent_job.save()

        with self.testconn.pipeline() as pipeline:
            dependent_job.fetch_dependencies(watch=True, pipeline=pipeline)

            pipeline.multi()

            with self.assertRaises(WatchError):
                self.testconn.set(dependency_job.id, 'somethingelsehappened')
                pipeline.touch(dependency_job.id)
                pipeline.execute()
Пример #53
0
    def test_job_with_dependents_delete_parent_with_saved(self):
        """job.delete() deletes itself from Redis but not dependents. If the
        dependent job was saved, it will remain in redis."""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = Job.create(func=fixtures.say_hello, depends_on=job)
        job2.register_dependency()
        job2.save()

        job.delete()
        self.assertFalse(self.testconn.exists(job.key))
        self.assertFalse(self.testconn.exists(job.dependents_key))

        # By default, dependents are not deleted, but The job is in redis only
        # if it was saved!
        self.assertTrue(self.testconn.exists(job2.key))

        self.assertNotIn(job.id, queue.get_job_ids())
Пример #54
0
    def handle(self, *args, **options):
        redis_conn = django_rq.get_connection()

        task_name = "xkw_doc_task"

        q = Queue(task_name, connection=redis_conn)
        q.empty()
        all_job_ids = (job for job in q.job_ids)
        locks = BaseSpiderTask.objects.filter(task_name=task_name, status=1)
        locks.update(status=0)
        tasks = BaseSpiderTask.objects.filter(task_name=task_name, status=0)
        for task in tasks:
            if str(task.id) not in all_job_ids:
                job = Job.create(id="%s" % task.id,
                                 func=lambda x: True,
                                 connection=redis_conn)
                q.enqueue_job(job)
        print "\ncurrent judge task count:[%s]\n" % q.count
Пример #55
0
    def test_create_typical_job(self):
        """Creation of jobs for function calls."""
        job = Job.create(some_calculation, 3, 4, z=2)

        # Jobs have a random UUID
        self.assertIsNotNone(job.id)
        self.assertIsNotNone(job.created_at)
        self.assertIsNotNone(job.description)

        # Job data is set...
        self.assertEquals(job.func, some_calculation)
        self.assertEquals(job.args, (3, 4))
        self.assertEquals(job.kwargs, {'z': 2})

        # ...but metadata is not
        self.assertIsNone(job.origin)
        self.assertIsNone(job.enqueued_at)
        self.assertIsNone(job.return_value)
Пример #56
0
    def test_job_with_dependents_delete_parent(self):
        """job.delete() deletes itself from Redis but not dependents.
        Wthout a save, the dependent job is never saved into redis. The delete
        method will get and pass a NoSuchJobError.
        """
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = Job.create(func=fixtures.say_hello, depends_on=job)
        job2.register_dependency()

        job.delete()
        self.assertFalse(self.testconn.exists(job.key))
        self.assertFalse(self.testconn.exists(job.dependents_key))

        # By default, dependents are not deleted, but The job is in redis only
        # if it was saved!
        self.assertFalse(self.testconn.exists(job2.key))

        self.assertNotIn(job.id, queue.get_job_ids())
Пример #57
0
    def test_custom_meta_is_rewriten_by_save_meta(self):
        """New meta data can be stored by save_meta."""
        job = Job.create(func=fixtures.say_hello, args=('Lionel', ))
        job.save()
        serialized = job.to_dict()

        job.meta['foo'] = 'bar'
        job.save_meta()

        raw_meta = self.testconn.hget(job.key, 'meta')
        self.assertEqual(loads(raw_meta)['foo'], 'bar')

        job2 = Job.fetch(job.id)
        self.assertEqual(job2.meta['foo'], 'bar')

        # nothing else was changed
        serialized2 = job2.to_dict()
        serialized2.pop('meta')
        self.assertDictEqual(serialized, serialized2)
Пример #58
0
 def _create_job(self, func, args=None, kwargs=None, commit=True,
                 result_ttl=None):
     """
     Creates an RQ job and saves it to Redis.
     """
     if func.__module__ == '__main__':
         raise ValueError(
                 'Functions from the __main__ module cannot be processed '
                 'by workers.')
     if args is None:
         args = ()
     if kwargs is None:
         kwargs = {}
     job = Job.create(func, args=args, connection=self.connection,
                      kwargs=kwargs, result_ttl=result_ttl)
     job.origin = self.queue_name
     if commit:
         job.save()
     return job
Пример #59
0
    def test_create_typical_job(self):
        """Creation of jobs for function calls."""
        job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2))

        # Jobs have a random UUID
        self.assertIsNotNone(job.id)
        self.assertIsNotNone(job.created_at)
        self.assertIsNotNone(job.description)
        self.assertIsNone(job.instance)

        # Job data is set...
        self.assertEqual(job.func, fixtures.some_calculation)
        self.assertEqual(job.args, (3, 4))
        self.assertEqual(job.kwargs, {'z': 2})

        # ...but metadata is not
        self.assertIsNone(job.origin)
        self.assertIsNone(job.enqueued_at)
        self.assertIsNone(job.result)
Пример #60
0
    def test_job_access_within_job_function(self):
        """The current job is accessible within the job function."""
        # Executing the job function from outside of RQ throws an exception
        self.assertIsNone(get_current_job())

        # Executing the job function from within the job works (and in
        # this case leads to the job ID being returned)
        job = Job.create(func=access_self)
        job.save()
        id = job.perform()
        self.assertEqual(job.id, id)
        self.assertEqual(job.func, access_self)

        # Ensure that get_current_job also works from within synchronous jobs
        queue = Queue(async=False)
        job = queue.enqueue(access_self)
        id = job.perform()
        self.assertEqual(job.id, id)
        self.assertEqual(job.func, access_self)