def test_finished_jobs(self): """Ensure that finished jobs page works properly.""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') job = queue.enqueue(access_self) registry = FinishedJobRegistry(queue.name, queue.connection) registry.add(job, 2) response = self.client.get( reverse('rq_finished_jobs', args=[queue_index])) self.assertEqual(response.context['jobs'], [job])
def test_finished_jobs(self): """Ensure that finished jobs page works properly.""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') job = queue.enqueue(access_self) registry = FinishedJobRegistry(queue.name, queue.connection) registry.add(job, 2) response = self.client.get( reverse('rq_finished_jobs', args=[queue_index]) ) self.assertEqual(response.context['jobs'], [job])
def perform_job(self, job): """Performs the actual work of a job. Will/should only be called inside the work horse's process. """ self.prepare_job_execution(job) with self.connection._pipeline() as pipeline: started_job_registry = StartedJobRegistry(job.origin, self.connection) try: logging.debug('perform_job in sw') job.matlab_engine = self.matlab_engine logging.debug('pj engine:' + str(self.matlab_engine)) # logging.debug('pj args,kwargs:'+str(job._args)+','+str(job._kwargs)) if len(job._args) > 0: new_args = (self.matlab_engine, ) + job._args logging.debug('tg pj new args:' + str(new_args)) job._args = new_args elif len(job._kwargs) > 0: job._kwargs['matlab_engine'] = self.matlab_engine logging.debug('tg pj new kwargs:' + str(job._kwargs)) with self.death_penalty_class( job.timeout or self.queue_class.DEFAULT_TIMEOUT): rv = job.perform() # Pickle the result in the same try-except block since we need # to use the same exc handling when pickling fails job._result = rv self.set_current_job_id(None, pipeline=pipeline) result_ttl = job.get_result_ttl(self.default_result_ttl) if result_ttl != 0: job.ended_at = utcnow() job._status = JobStatus.FINISHED job.save(pipeline=pipeline) finished_job_registry = FinishedJobRegistry( job.origin, self.connection) finished_job_registry.add(job, result_ttl, pipeline) job.cleanup(result_ttl, pipeline=pipeline) started_job_registry.remove(job, pipeline=pipeline) pipeline.execute() except Exception: job.set_status(JobStatus.FAILED, pipeline=pipeline) started_job_registry.remove(job, pipeline=pipeline) try: pipeline.execute() except Exception: pass self.handle_exception(job, *sys.exc_info()) return False if rv is None: self.log.info('Job OK') else: self.log.info('Job OK, result = {0!r}'.format(yellow( text_type(rv)))) if result_ttl == 0: self.log.info('Result discarded immediately') elif result_ttl > 0: self.log.info('Result is kept for {0} seconds'.format(result_ttl)) else: self.log.warning( 'Result will never expire, clean up result key manually') return True