Example #1
0
File: job.py Project: wangyibing/rq
 def get_id(self):  # noqa
     """The job ID for this job instance. Generates an ID lazily the
     first time the ID is requested.
     """
     if self._id is None:
         self._id = text_type(uuid4())
     return self._id
Example #2
0
 def get_id(self):  # noqa
     """The job ID for this job instance. Generates an ID lazily the
     first time the ID is requested.
     """
     if self._id is None:
         self._id = text_type(uuid4())
     return self._id
Example #3
0
    def __init__(self, id=None, connection=None):
        from .connections import resolve_connection
        self.connection = resolve_connection(connection)

        if id is not None:
            self._id = id
        else:
            self._id = text_type(uuid4())

        self.created_at = utcnow()
        self._data = UNEVALUATED
        self._func_name = UNEVALUATED
        self._instance = UNEVALUATED
        self._args = UNEVALUATED
        self._kwargs = UNEVALUATED
        self.description = None
        self.origin = None
        self.enqueued_at = None
        self.started_at = None
        self.ended_at = None
        self._result = None
        self.exc_info = None
        self.timeout = None
        self.result_ttl = None
        self.ttl = None
        self._status = None
        self._parent_ids = []
        self.meta = {}
Example #4
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job)

        with self.connection._pipeline() as pipeline:
            started_job_registry = StartedJobRegistry(job.origin, self.connection)

            try:
                with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()  # 执行job

                # 更新job在redis中的信息

                # Pickle the result in the same try-except block since we need
                # to use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_result_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.ended_at = utcnow()
                    job._status = JobStatus.FINISHED
                    job.save(pipeline=pipeline)

                    finished_job_registry = FinishedJobRegistry(job.origin, self.connection)
                    finished_job_registry.add(job, result_ttl, pipeline)

                job.cleanup(result_ttl, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)  # 从队列中移除job

                pipeline.execute()

            except Exception:
                job.set_status(JobStatus.FAILED, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)
                try:
                    pipeline.execute()
                except Exception:
                    # Ensure that custom exception handlers are called
                    # even if Redis is down
                    pass
                self.handle_exception(job, *sys.exc_info())
                return False

        self.log.info(green('Job OK'))
        if rv:
            log_result = "{0!r}".format(as_text(text_type(rv)))
            self.log.debug('Result: {0}'.format(yellow(log_result)))

        if result_ttl == 0:
            self.log.info('Result discarded immediately')
        elif result_ttl > 0:
            self.log.info('Result is kept for {0} seconds'.format(result_ttl))
        else:
            self.log.warning('Result will never expire, clean up result key manually')

        return True
Example #5
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job)

        with self.connection._pipeline() as pipeline:
            started_job_registry = StartedJobRegistry(job.origin, self.connection)

            try:
                with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()

                # Pickle the result in the same try-except block since we need
                # to use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_result_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.ended_at = utcnow()
                    job.set_status(JobStatus.FINISHED, pipeline=pipeline)
                    job.save(pipeline=pipeline)

                    finished_job_registry = FinishedJobRegistry(job.origin, self.connection)
                    finished_job_registry.add(job, result_ttl, pipeline)

                job.cleanup(result_ttl, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)

                pipeline.execute()

            except Exception:
                job.set_status(JobStatus.FAILED, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)
                self.set_current_job_id(None, pipeline=pipeline)
                try:
                    pipeline.execute()
                except Exception:
                    # Ensure that custom exception handlers are called
                    # even if Redis is down
                    pass
                self.handle_exception(job, *sys.exc_info())
                return False

        self.log.info('{0}: {1} ({2})'.format(green(job.origin), blue('Job OK'), job.id))
        if rv:
            log_result = "{0!r}".format(as_text(text_type(rv)))
            self.log.debug('Result: {0}'.format(yellow(log_result)))

        if result_ttl == 0:
            self.log.info('Result discarded immediately')
        elif result_ttl > 0:
            self.log.info('Result is kept for {0} seconds'.format(result_ttl))
        else:
            self.log.warning('Result will never expire, clean up result key manually')

        return True
Example #6
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            with death_penalty_after(job.timeout or Queue.DEFAULT_TIMEOUT):
                job.started_at = times.now()
                rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            job._result = rv
            job._status = Status.FINISHED
            job.ended_at = times.now()

            keys = self.connection.hgetall(job._annotations)
            p = self.connection.pipeline()
            ingress = int(keys['ingress'])
            egress = int(keys['egress'])
            mean = float(keys['mean'])
            delta = job.ended_at - job.started_at
            delta = delta.seconds + (delta.microseconds / 1e6)
            mean = (mean + delta) / (max(1, ingress + egress))
            p.hincrby(job._annotations, 'ingress', amount=-1)
            p.hincrby(job._annotations, 'egress',  amount=1)
            p.hset(job._annotations, 'mean', mean)
            p.execute()

            result_ttl = job.get_ttl(self.default_result_ttl)
            pipeline = self.connection._pipeline()
            if result_ttl != 0:
                job.save(pipeline=pipeline)
            job.cleanup(result_ttl, pipeline=pipeline)
            pipeline.execute()

        except:
            # Use the public setter here, to immediately update Redis
            job.status = Status.FAILED
            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(text_type(rv)),))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True
Example #7
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job)

        with self.connection._pipeline() as pipeline:
            started_job_registry = StartedJobRegistry(job.origin,
                                                      self.connection)

            try:
                with self.death_penalty_class(
                        job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()

                # Pickle the result in the same try-except block since we need
                # to use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_result_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.ended_at = utcnow()
                    job._status = Status.FINISHED
                    job.save(pipeline=pipeline)

                    finished_job_registry = FinishedJobRegistry(
                        job.origin, self.connection)
                    finished_job_registry.add(job, result_ttl, pipeline)

                job.cleanup(result_ttl, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)

                pipeline.execute()

            except Exception:
                job.set_status(Status.FAILED, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)
                pipeline.execute()
                self.handle_exception(job, *sys.exc_info())
                return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(text_type(rv)), ))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning(
                'Result will never expire, clean up result key manually.')

        return True
Example #8
0
    def perform_job(self, job, queue, heartbeat_ttl=None):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job, heartbeat_ttl)

        push_connection(self.connection)

        started_job_registry = StartedJobRegistry(job.origin,
                                                  self.connection,
                                                  job_class=self.job_class)

        try:
            job.started_at = utcnow()
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            with self.death_penalty_class(timeout, JobTimeoutException, job_id=job.id):
                rv = job.perform(self.workerKwargs)

            job.ended_at = utcnow()

            # Pickle the result in the same try-except block since we need
            # to use the same exc handling when pickling fails
            job._result = rv

            self.handle_job_success(job=job,
                                    queue=queue,
                                    started_job_registry=started_job_registry)
        except:
            job.ended_at = utcnow()
            self.handle_job_failure(job=job,
                                    started_job_registry=started_job_registry)
            self.handle_exception(job, *sys.exc_info())
            return False

        finally:
            pop_connection()

        self.log.info('{0}: {1} ({2})'.format(
            green(job.origin), blue('Job OK'), job.id))
        if rv is not None:
            log_result = "{0!r}".format(as_text(text_type(rv)))
            self.log.debug('Result: %s', yellow(log_result))

        if self.log_result_lifespan:
            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl == 0:
                self.log.info('Result discarded immediately')
            elif result_ttl > 0:
                self.log.info(
                    'Result is kept for {0} seconds'.format(result_ttl))
            else:
                self.log.warning(
                    'Result will never expire, clean up result key manually')

        return True
Example #9
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """

        self.set_state('busy')
        self.set_current_job_id(job.id)
        self.heartbeat((job.timeout or 180) + 60)

        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        with self.connection._pipeline() as pipeline:
            try:
                job.set_status(Status.STARTED)
                with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()

                # Pickle the result in the same try-except block since we need to
                # use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.ended_at = utcnow()
                    job._status = Status.FINISHED
                    job.save(pipeline=pipeline)
                job.cleanup(result_ttl, pipeline=pipeline)

                pipeline.execute()

            except Exception:
                # Use the public setter here, to immediately update Redis
                job.set_status(Status.FAILED)
                self.handle_exception(job, *sys.exc_info())
                return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(text_type(rv)),))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True
Example #10
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """

        self.set_state('busy')
        self.set_current_job_id(job.id)
        self.heartbeat((job.timeout or 180) + 60)

        self.procline('Processing %s from %s since %s' %
                      (job.func_name, job.origin, time.time()))

        with self.connection._pipeline() as pipeline:
            try:
                with self.death_penalty_class(
                        job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()

                # Pickle the result in the same try-except block since we need to
                # use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.save(pipeline=pipeline)
                job.cleanup(result_ttl, pipeline=pipeline)

                pipeline.execute()

            except Exception as e:
                # Use the public setter here, to immediately update Redis
                job.set_status(Status.FAILED)
                self.handle_exception(job, *sys.exc_info())
                if isinstance(e, JobTimeoutException):
                    raise SystemExit(1)
                return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(text_type(rv)), ))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning(
                'Result will never expire, clean up result key manually.')

        return True
Example #11
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job)

        with self.connection._pipeline() as pipeline:
            started_job_registry = StartedJobRegistry(job.origin, self.connection)

            try:
                with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()

                # Pickle the result in the same try-except block since we need
                # to use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_result_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.ended_at = utcnow()
                    job._status = Status.FINISHED
                    job.save(pipeline=pipeline)

                    finished_job_registry = FinishedJobRegistry(job.origin, self.connection)
                    finished_job_registry.add(job, result_ttl, pipeline)

                job.cleanup(result_ttl, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)

                pipeline.execute()

            except Exception:
                job.set_status(Status.FAILED, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)
                pipeline.execute()
                self.handle_exception(job, *sys.exc_info())
                return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(text_type(rv)),))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True
Example #12
0
    def perform_job(self, job, queue):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job)

        push_connection(self.connection)

        started_job_registry = StartedJobRegistry(job.origin, self.connection)

        try:
            with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                rv = job.perform()

            job.ended_at = utcnow()

            # Pickle the result in the same try-except block since we need
            # to use the same exc handling when pickling fails
            job._result = rv

            self.handle_job_success(
                job=job,
                queue=queue,
                started_job_registry=started_job_registry
            )
        except Exception:
            self.handle_job_failure(
                job=job,
                started_job_registry=started_job_registry
            )
            self.handle_exception(job, *sys.exc_info())
            return False

        finally:
            pop_connection()

        self.log.info('{0}: {1} ({2})'.format(green(job.origin), blue('Job OK'), job.id))
        if rv is not None:
            log_result = "{0!r}".format(as_text(text_type(rv)))
            self.log.debug('Result: {0}'.format(yellow(log_result)))

        result_ttl = job.get_result_ttl(self.default_result_ttl)
        if result_ttl == 0:
            self.log.info('Result discarded immediately')
        elif result_ttl > 0:
            self.log.info('Result is kept for {0} seconds'.format(result_ttl))
        else:
            self.log.warning('Result will never expire, clean up result key manually')

        return True
Example #13
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            with death_penalty_after(job.timeout or 180):
                rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            job._result = rv
            job._status = Status.FINISHED
            job.ended_at = times.now()

            result_ttl = job.get_ttl(self.default_result_ttl)
            pipeline = self.connection._pipeline()
            if result_ttl != 0:
                job.save(pipeline=pipeline)
            job.cleanup(result_ttl, pipeline=pipeline)
            pipeline.execute()

        except:
            # Use the public setter here, to immediately update Redis
            job.status = Status.FAILED
            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(text_type(rv)),))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True
Example #14
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            # I have DISABLED the time limit!
            rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            job._result = rv
            self.set_state(Status.FINISHED)
            job.ended_at = times.now()

            result_ttl = job.get_ttl(self.default_result_ttl)
            pipeline = self.connection._pipeline()
            if result_ttl != 0:
                job.save(pipeline=pipeline)
            job.cleanup(result_ttl, pipeline=pipeline)
            pipeline.execute()

        except:
            # Use the public setter here, to immediately update Redis
            self.set_state(Status.FAILED)
            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(text_type(rv)),))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True
Example #15
0
    def at_requests(self, at):
        """
        {
            "desc": "AT request tasks entrance point. <br/> \
            It would provide at (linux command) feature to help you reserve a request event by your parameter.<br/> \
            It needs a json data have (url,method,auth,header,data) properties. <br/> \
            The auth and header are optional property. <br/> \
            Parameter:  at=20190502133000 it means when at that time the task would occurred. <br/> \
            The at parameter format should be: %Y%m%d%H%M%S",
            "mediaType": "application/json",
            "data": {
                        "url": ".....",
                        "method": "post",
                        "auth": "user:pass",
                        "header": {"apikey": "****"},
                        "data": {"parame1":"value1","parame2":"value2"}
                    }
        }
        """
        try:
            if len(at) != 14:
                raise Exception('Wrong datetime format, it should be %Y%m%d%H%M%S')
            at = datetime.datetime.strptime(at, "%Y%m%d%H%M%S")
            at = at.astimezone(pytz.utc)
#             at = pytz.timezone('America/Chicago').localize(at).astimezone(pytz.utc)
        except Exception as e:
            log.error("format parameter at error=%s", e)
            return jsonify({'message': 'Wrong parameter, it should be %Y%m%d%H%M%S', 'code': 400}), 400
        try:
            data = request.get_json()
            basic_auth = data.get('auth', None)
            if basic_auth:
                basic_auth = basic_auth.split(':')
                basic_auth = HTTPBasicAuth(basic_auth[0], basic_auth[1])
            header = data.get('header', None)
            job_id = text_type(uuid4())
            schedule_requests_task.schedule(at, data['url'], data['method'], basic_auth, header, \
                                            job_id=job_id, **data['data'])
        except Exception as e:
            log.error(e)
            return jsonify({'code': 400, 'message': str(e)}), 400

        return jsonify({'message': 'success', 'id': job_id})
Example #16
0
    def at_test(self, at):
        """
        Test schedule_requests_task execution test Flask-rq2 post fork feature
        """
        try:
            if len(at) != 14:
                raise Exception('Wrong datetime format, it should be %Y%m%d%H%M%S')
            at = datetime.datetime.strptime(at, "%Y%m%d%H%M%S")
            at = at.astimezone(pytz.utc)
        except Exception as e:
            log.error("format parameter at error=%s", e)
            return jsonify({'message': 'Wrong parameter, it should be %Y%m%d%H%M%S', 'code': 400}), 400
        try:
            data = request.get_json()
            name = data.get('name', None)
            job_id = text_type(uuid4())
            schedule_test_task.schedule(at, name, job_id=job_id)
        except Exception as e:
            log.error(e)
            return jsonify({'code': 400, 'message': str(e)}), 400

        return jsonify({'message': 'success', 'id': job_id})
Example #17
0
    def perform_job(self, job, *, loop=None):
        """Performs the actual work of a job."""

        yield from self.prepare_job_execution(job)

        pipe = self.connection.multi_exec()
        started_job_registry = StartedJobRegistry(job.origin, self.connection)

        try:
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            try:
                rv = yield from asyncio.wait_for(
                    job.perform(), timeout, loop=loop)
            except asyncio.TimeoutError as error:
                raise JobTimeoutException from error

            # Pickle the result in the same try-except block since we
            # need to use the same exc handling when pickling fails
            yield from self.set_current_job_id(None, pipeline=pipe)

            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl != 0:
                job.ended_at = utcnow()
                job._status = JobStatus.FINISHED
                yield from job.save(pipeline=pipe)

                finished_job_registry = FinishedJobRegistry(
                    job.origin, self.connection)
                yield from finished_job_registry.add(job, result_ttl, pipe)

            yield from job.cleanup(result_ttl, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)

            yield from pipe.execute()

        except Exception:
            # TODO: if `pipe.execute()` throws exception
            # `ConnectionClosedError` and we try to add actions to the
            # pipeline which was already executed then line below will
            # throw "AssertionError: Pipeline already executed. Create
            # new one."
            yield from job.set_status(JobStatus.FAILED, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)
            yield from self.set_current_job_id(None, pipeline=pipe)
            try:
                yield from pipe.execute()
            except Exception:
                # Ensure that custom exception handlers are called
                # even if Redis is down
                pass
            yield from self.handle_exception(job, *sys.exc_info())
            return False

        logger.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id)
        if rv:
            log_result = "{!r}".format(as_text(text_type(rv)))
            logger.debug('Result: %s', yellow(log_result))

        if result_ttl == 0:
            logger.info('Result discarded immediately')
        elif result_ttl > 0:
            logger.info('Result is kept for %s seconds', result_ttl)
        else:
            logger.warning(
                'Result will never expire, clean up result key manually')

        return True
Example #18
0
File: fixtures.py Project: nvie/rq
def say_hello_unicode(name=None):
    """A job with a single argument and a return value."""
    return text_type(say_hello(name))  # noqa
Example #19
0
    def __init__(self):
        super(WordSetCreate, self).__init__()

        # id to use for django-rq job for form processing
        self.job_id = text_type(uuid.uuid4())
Example #20
0
def say_hello_unicode(name=None):
    """A job with a single argument and a return value."""
    return text_type(say_hello(name))  # noqa
Example #21
0
    def perform_job(self, job, *, loop=None):
        """Performs the actual work of a job."""

        yield from self.prepare_job_execution(job)

        pipe = self.connection.multi_exec()
        started_job_registry = StartedJobRegistry(job.origin, self.connection)

        try:
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            try:
                rv = yield from asyncio.wait_for(job.perform(),
                                                 timeout,
                                                 loop=loop)
            except asyncio.TimeoutError as error:
                raise JobTimeoutException from error

            # Pickle the result in the same try-except block since we
            # need to use the same exc handling when pickling fails
            yield from self.set_current_job_id(None, pipeline=pipe)

            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl != 0:
                job.ended_at = utcnow()
                job._status = JobStatus.FINISHED
                yield from job.save(pipeline=pipe)

                finished_job_registry = FinishedJobRegistry(
                    job.origin, self.connection)
                yield from finished_job_registry.add(job, result_ttl, pipe)

            yield from job.cleanup(result_ttl, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)

            yield from pipe.execute()

        except Exception:
            # TODO: if `pipe.execute()` throws exception
            # `ConnectionClosedError` and we try to add actions to the
            # pipeline which was already executed then line below will
            # throw "AssertionError: Pipeline already executed. Create
            # new one."
            yield from job.set_status(JobStatus.FAILED, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)
            yield from self.set_current_job_id(None, pipeline=pipe)
            try:
                yield from pipe.execute()
            except Exception:
                # Ensure that custom exception handlers are called
                # even if Redis is down
                pass
            yield from self.handle_exception(job, *sys.exc_info())
            return False

        logger.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id)
        if rv:
            log_result = "{!r}".format(as_text(text_type(rv)))
            logger.debug('Result: %s', yellow(log_result))

        if result_ttl == 0:
            logger.info('Result discarded immediately')
        elif result_ttl > 0:
            logger.info('Result is kept for %s seconds', result_ttl)
        else:
            logger.warning(
                'Result will never expire, clean up result key manually')

        return True
Example #22
0
    def perform_job(self, job, queue):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job)

        with self.connection._pipeline() as pipeline:

            push_connection(self.connection)

            started_job_registry = StartedJobRegistry(job.origin, self.connection)

            try:
                with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()

                # Pickle the result in the same try-except block since we need
                # to use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_result_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.ended_at = utcnow()
                    job.set_status(JobStatus.FINISHED, pipeline=pipeline)
                    job.save(pipeline=pipeline)

                    finished_job_registry = FinishedJobRegistry(job.origin,
                                                                self.connection)
                    finished_job_registry.add(job, result_ttl, pipeline)

                queue.enqueue_dependents(job, pipeline=pipeline)
                job.cleanup(result_ttl, pipeline=pipeline,
                            remove_from_queue=False)
                started_job_registry.remove(job, pipeline=pipeline)

                pipeline.execute()

            except Exception:
                self.handle_job_failure(
                    job=job,
                    started_job_registry=started_job_registry,
                    pipeline=pipeline
                )
                try:
                    pipeline.execute()
                except Exception:
                    # Ensure that custom exception handlers are called
                    # even if Redis is down
                    pass
                self.handle_exception(job, *sys.exc_info())
                return False

            finally:
                pop_connection()

        self.log.info('{0}: {1} ({2})'.format(green(job.origin), blue('Job OK'), job.id))
        if rv is not None:
            if(type(rv).__name__=='unicode'):
                rv = rv.encode('utf8')
            
            log_result = "{0!r}".format(as_text(text_type(rv)))
            self.log.debug('Result: {0}'.format(yellow(log_result)))

        if result_ttl == 0:
            self.log.info('Result discarded immediately')
        elif result_ttl > 0:
            self.log.info('Result is kept for {0} seconds'.format(result_ttl))
        else:
            self.log.warning('Result will never expire, clean up result key manually')

        return True
Example #23
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job)

        with self.connection._pipeline() as pipeline:
            started_job_registry = StartedJobRegistry(job.origin,
                                                      self.connection)

            try:
                logging.debug('perform_job in sw')
                job.matlab_engine = self.matlab_engine
                logging.debug('pj engine:' + str(self.matlab_engine))
                #   logging.debug('pj args,kwargs:'+str(job._args)+','+str(job._kwargs))
                if len(job._args) > 0:
                    new_args = (self.matlab_engine, ) + job._args
                    logging.debug('tg pj  new args:' + str(new_args))
                    job._args = new_args
                elif len(job._kwargs) > 0:
                    job._kwargs['matlab_engine'] = self.matlab_engine
                    logging.debug('tg pj new kwargs:' + str(job._kwargs))
                with self.death_penalty_class(
                        job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()
        # Pickle the result in the same try-except block since we need
        # to use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_result_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.ended_at = utcnow()
                    job._status = JobStatus.FINISHED
                    job.save(pipeline=pipeline)

                    finished_job_registry = FinishedJobRegistry(
                        job.origin, self.connection)
                    finished_job_registry.add(job, result_ttl, pipeline)

                job.cleanup(result_ttl, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)

                pipeline.execute()

            except Exception:
                job.set_status(JobStatus.FAILED, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)
                try:
                    pipeline.execute()
                except Exception:
                    pass
                self.handle_exception(job, *sys.exc_info())
                return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = {0!r}'.format(yellow(
                text_type(rv))))

        if result_ttl == 0:
            self.log.info('Result discarded immediately')
        elif result_ttl > 0:
            self.log.info('Result is kept for {0} seconds'.format(result_ttl))
        else:
            self.log.warning(
                'Result will never expire, clean up result key manually')

        return True