""" from .settings import QUEUES_LIST return get_redis_connection(QUEUES_LIST[index]['connection_config']) def get_queue(name='default', default_timeout=None, async=None): """ Returns an rq Queue using parameters defined in ``RQ_QUEUES`` """ from .settings import QUEUES # If async is provided, use it, otherwise, get it from the configuration if async is None: async = QUEUES[name].get('ASYNC', True) return Queue(name, default_timeout=default_timeout, connection=get_connection(name), async=async) def get_queue_by_index(index): """ Returns an rq Queue using parameters defined in ``QUEUES_LIST`` """ from .settings import QUEUES_LIST config = QUEUES_LIST[int(index)] if config['name'] == 'failed': return FailedQueue(connection=get_redis_connection(config['connection_config'])) return Queue(config['name'], connection=get_redis_connection(config['connection_config']), async=config.get('ASYNC', True))
def test_create_job_with_ttl_should_have_ttl_after_enqueued(self): """test creating jobs with ttl and checks if get_jobs returns it properly [issue502]""" queue = Queue(connection=self.testconn) queue.enqueue(fixtures.say_hello, job_id="1234", ttl=10) job = queue.get_jobs()[0] self.assertEqual(job.ttl, 10)
def test_create_job_with_ttl_should_expire(self): """test if a job created with ttl expires [issue502]""" queue = Queue(connection=self.testconn) queue.enqueue(fixtures.say_hello, job_id="1234", ttl=1) time.sleep(1.1) self.assertEqual(0, len(queue.get_jobs()))
def test_enqueue_job_async_status_finished(self): queue = Queue(is_async=False) job = Job.create(func=fixtures.say_hello) job = queue.enqueue_job(job) self.assertEqual(job.result, 'Hi there, Stranger!') self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_ttl_via_enqueue(self): ttl = 1 queue = Queue(connection=self.testconn) job = queue.enqueue(fixtures.say_hello, ttl=ttl) self.assertEqual(job.get_ttl(), ttl)
def test_jobs_pagination_non_overlap(self): q1 = Queue('q1') q2 = Queue('q2') for i in range(12): job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2)) q1.enqueue_job(job) for i in range(13): job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2)) q2.enqueue_job(job) self.assertEqual(q1.count+q2.count, 25) query_string1 = { 'start': 0, 'length': 10, 'draw': 1, 'queues[]': ['q1', 'q2'], 'jobstatus[]': ['queued', 'failed'] } query_string2 = { 'start': 10, 'length': 10, 'draw': 2, 'queues[]': ['q1', 'q2'], 'jobstatus[]': ['queued', 'failed'] } query_string3 = { 'start': 20, 'length': 10, 'draw': 3, 'queues[]': ['q1', 'q2'], 'jobstatus[]': ['queued', 'failed'] } response1 = self.client.get('/jobs', query_string=query_string1) response1_json = json.loads(response1.data.decode('utf-8')) self.assertEqual(response1_json['draw'], 1) self.assertEqual(response1_json['recordsTotal'], 25) self.assertEqual(response1_json['recordsFiltered'], 25) self.assertEqual(len(response1_json['data']), 10) data1 = response1_json['data'] data1_ids = [job['job_info']['job_id'] for job in data1] response2 = self.client.get('/jobs', query_string=query_string2) response2_json = json.loads(response2.data.decode('utf-8')) self.assertEqual(response2_json['draw'], 2) self.assertEqual(response2_json['recordsTotal'], 25) self.assertEqual(response2_json['recordsFiltered'], 25) self.assertEqual(len(response2_json['data']), 10) data2 = response2_json['data'] data2_ids = [job['job_info']['job_id'] for job in data2] for job_id in data1_ids: self.assertNotIn(job_id, data2_ids) for job_id in data2_ids: self.assertNotIn(job_id, data1_ids) response3 = self.client.get('/jobs', query_string=query_string3) response3_json = json.loads(response3.data.decode('utf-8')) self.assertEqual(response3_json['draw'], 3) self.assertEqual(response3_json['recordsTotal'], 25) self.assertEqual(response3_json['recordsFiltered'], 25) self.assertEqual(len(response3_json['data']), 5) data3 = response3_json['data'] for job_data in data2: self.assertNotIn(job_data, data3) for job_id in data1: self.assertNotIn(job_id, data2)
def test_job_access_within_synchronous_job_function(self): queue = Queue(is_async=False) queue.enqueue(fixtures.access_self)
def test_job_access_within_job_function(self): """The current job is accessible within the job function.""" q = Queue() q.enqueue(fixtures.access_self) # access_self calls get_current_job() and asserts w = Worker([q]) w.work(burst=True)
def _add_jobs_to_queue(self, queue_name, num): queue = Queue(queue_name, connection=FakeStrictRedis()) for _ in range(num): queue.enqueue(self._dummy_func)
def failures(): """Show any unexpected failures""" q = Queue('failed', connection=worker.connection) for i in q.get_jobs(): click.echo("%s on %s" % (i.func_name, i.origin)) click.echo(i.exc_info)
def spawn_worker(self, job): connection = Connection() worker = Worker(connection, job, self.log) worker_pid = os.fork() if worker_pid == 0: try: self.log.info("Booting worker with pid: %s", worker_pid) worker.init_process() sys.exit(0) except SystemExit: raise except: self.log.exception("Exception in worker process:\n%s", traceback.format_exc()) sys.exit(-1) else: # We are in the parent self.WORKERS[worker_pid] = worker @property def pid(self): return os.getpid() if __name__ == '__main__': print "PID: %r" % (os.getpid(),) with Connection(): q_builds = Queue('builds') arbiter = Arbiter([q_builds], number_of_processes=4) arbiter.run()
def enqueue(function, *args, **kwargs): setup_rq_connection() queue = kwargs.pop('queue', 'default') timeout = kwargs.pop('timeout', 3600) return Queue(queue).enqueue(function, *args, timeout=timeout, **kwargs)