Esempio n. 1
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        from  ..admin import timethis, save_profile
        with self.connection._pipeline() as pipeline:
            self.heartbeat((job.timeout or 180) + 60, pipeline=pipeline)
            self.set_state('busy', pipeline=pipeline)
            self.set_current_job_id(job.id, pipeline=pipeline)
            job.set_status(Status.STARTED, pipeline=pipeline)
            pipeline.execute()
        with self.connection._pipeline() as pipeline:
            try:
                with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    save_profile('start', time.time(), job.id)
                    rv = job.perform()
                # Pickle the result in the same try-except block since we need to
                # use the same exc handling when pickling fails
                job._result = rv
                job._status = Status.FINISHED
                self.set_current_job_id(None, pipeline=pipeline)
                result_ttl = job.get_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.save(pipeline=pipeline)
                job.cleanup(result_ttl, pipeline=pipeline)
                job.push_status(status=Status.FINISHED, pipeline=pipeline)
                with timethis('result save', jid=job.id):
                    pipeline.execute()
                save_profile('end', time.time(), job.id)

            except Exception:
                # Use the public setter here, to immediately update Redis
                self.handle_exception(job, *sys.exc_info())
                job.set_status(Status.FAILED)
                job.push_status()
                return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s', rv)

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True
Esempio n. 2
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' %
                      (job.func_name, job.origin, time.time()))

        try:
            # I have DISABLED the time limit!
            rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            job._result = rv
            job.set_status(JobStatus.FINISHED, pipeline=None)
            job.ended_at = times.now()

            result_ttl = job.get_ttl(self.default_result_ttl)
            pipeline = self.connection._pipeline()
            if result_ttl != 0:
                job.save(pipeline=pipeline)
            job.cleanup(result_ttl, pipeline=pipeline)
            pipeline.execute()

        except:
            # Use the public setter here, to immediately update Redis
            job.set_status(JobStatus.FAILED, pipeline=None)

            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' %
                          (rq.worker.yellow(rq.compat.text_type(rv)), ))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning(
                'Result will never expire, clean up result key manually.')

        return True
Esempio n. 3
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            # I have DISABLED the time limit!
            rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            job._result = rv
            job.set_status(JobStatus.FINISHED, pipeline=None)
            job.ended_at = times.now()

            result_ttl = job.get_ttl(self.default_result_ttl)
            pipeline = self.connection._pipeline()
            if result_ttl != 0:
                job.save(pipeline=pipeline)
            job.cleanup(result_ttl, pipeline=pipeline)
            pipeline.execute()

        except:
            # Use the public setter here, to immediately update Redis
            job.set_status(JobStatus.FAILED, pipeline=None)

            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (rq.worker.yellow(rq.compat.text_type(rv)),))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True