Beispiel #1
0
    def dequeue_job_and_maintain_ttl(self, timeout):

        result = None
        qnames = self.queue_names()

        yield from self.set_state(WorkerStatus.IDLE)
        logger.info('')
        logger.info('*** Listening on %s...', green(', '.join(qnames)))

        while True:
            yield from self.heartbeat()

            try:
                result = yield from self.queue_class.dequeue_any(
                    self.queues, timeout, connection=self.connection)
                if result is not None:
                    job, queue = result
                    logger.info('%s: %s (%s)', green(queue.name),
                                blue(job.description), job.id)

                break
            except DequeueTimeout:
                pass

        yield from self.heartbeat()
        return result
Beispiel #2
0
    def dequeue_job_and_maintain_ttl(self, timeout):

        result = None
        qnames = self.queue_names()

        yield from self.set_state(WorkerStatus.IDLE)
        logger.info('')
        logger.info('*** Listening on %s...', green(', '.join(qnames)))

        while True:
            yield from self.heartbeat()

            try:
                result = yield from self.queue_class.dequeue_any(
                    self.queues, timeout, connection=self.connection)
                if result is not None:
                    job, queue = result
                    logger.info('%s: %s (%s)', green(queue.name),
                                blue(job.description), job.id)

                break
            except DequeueTimeout:
                pass

        yield from self.heartbeat()
        return result
Beispiel #3
0
    def work(self, burst=False):  # noqa
        """Starts the work loop.

        Pops and performs all jobs on the current list of queues.  When all
        queues are empty, block and wait for new jobs to arrive on any of the
        queues, unless `burst` mode is enabled.

        The return value indicates whether any jobs were processed.
        """
        self._install_signal_handlers()

        did_perform_work = False
        self.register_birth()
        self.log.info("RQ worker started, version %s" % rq.version.VERSION)
        self.state = "starting"
        try:
            while True:
                if self.stopped:
                    self.log.info("Stopping on request.")
                    break
                self.state = "idle"
                qnames = self.queue_names()
                self.procline("Listening on %s" % ",".join(qnames))
                self.log.info("")
                self.log.info("*** Listening on %s..." % green(", ".join(qnames)))
                wait_for_job = not burst
                try:
                    result = Queue.dequeue_any(self.queues, False, connection=self.connection)
                    while not result and wait_for_job and self.status_callback():
                        time.sleep(0.1)
                        result = Queue.dequeue_any(self.queues, False, connection=self.connection)
                    if result is None:
                        break
                except StopRequested:
                    break
                except UnpickleError as e:
                    msg = "*** Ignoring unpickleable data on %s." % green(e.queue.name)
                    self.log.warning(msg)
                    self.log.debug("Data follows:")
                    self.log.debug(e.raw_data)
                    self.log.debug("End of unreadable data.")
                    self.failed_queue.push_job_id(e.job_id)
                    continue

                self.state = "busy"

                job, queue = result
                self.log.info("%s: %s (%s)" % (green(queue.name), blue(job.description), job.id))

                self.fork_and_perform_job(job)

                did_perform_work = True
        finally:
            if not self.is_horse:
                self.register_death()
        return did_perform_work
Beispiel #4
0
    def dispatch(self, burst=False):  # noqa
        self._install_signal_handlers()

        did_perform_work = False
        self.register_birth()
        self.log.info('RQ dispatcher started, version %s' % __version__)
        self.state = 'starting'
        qnames = self.queue_names()
        self.procline('Listening on %s' % ','.join(qnames))
        self.log.info('')
        self.log.info('*** Listening on %s...' % green(', '.join(qnames)))
        try:
            while True:
                if self.stopped:
                    self.log.info('Stopping on request.')
                    break
                self.state = 'idle'
                wait_for_job = not burst

                try:
                    result = WaitingQueue.dequeue_any(
                        self.queues, wait_for_job, connection=self.connection)
                except StopRequested:
                    break
                except UnpickleError as e:
                    msg = '*** Ignoring unpickleable data on %s.' % green(
                        e.queue.name)
                    self.log.warning(msg)
                    self.log.debug('Data follows:')
                    self.log.debug(e.raw_data)
                    self.log.debug('End of unreadable data.')

                    self.failed_queue.push_job_id(e.job_id)
                    continue
                else:
                    for job, queue in result:
                        self.state = 'busy'

                        self.log.info(
                            '%s: %s (%s)' %
                            (green(queue.name), blue(job.description), job.id))

                        self.fork_and_perform_job(job)

                        did_perform_work = True

                    time.sleep(1)
        finally:
            if not self.is_horse:
                self.register_death()
        return did_perform_work
Beispiel #5
0
    def dispatch(self, burst=False):  # noqa
        self._install_signal_handlers()

        did_perform_work = False
        self.register_birth()
        self.log.info('RQ dispatcher started, version %s' % __version__)
        self.state = 'starting'
        qnames = self.queue_names()
        self.procline('Listening on %s' % ','.join(qnames))
        self.log.info('')
        self.log.info('*** Listening on %s...' %
                      green(', '.join(qnames)))
        try:
            while True:
                if self.stopped:
                    self.log.info('Stopping on request.')
                    break
                self.state = 'idle'
                wait_for_job = not burst

                try:
                    result = WaitingQueue.dequeue_any(self.queues, wait_for_job,
                                                      connection=self.connection)
                except StopRequested:
                    break
                except UnpickleError as e:
                    msg = '*** Ignoring unpickleable data on %s.' % green(e.queue.name)
                    self.log.warning(msg)
                    self.log.debug('Data follows:')
                    self.log.debug(e.raw_data)
                    self.log.debug('End of unreadable data.')

                    self.failed_queue.push_job_id(e.job_id)
                    continue
                else:
                    for job, queue in result:
                        self.state = 'busy'

                        self.log.info('%s: %s (%s)' % (green(queue.name),
                                                       blue(job.description), job.id))

                        self.fork_and_perform_job(job)

                        did_perform_work = True

                    time.sleep(1)
        finally:
            if not self.is_horse:
                self.register_death()
        return did_perform_work
    def dequeue_job_and_maintain_ttl(self, timeout):
        if self._stop_requested:
            raise StopRequested()

        result = None
        while True:
            if self._stop_requested:
                raise StopRequested()

            self.heartbeat()

            if self.gevent_pool.full():
                self.set_state(WorkerStatus.BUSY)
                self.log.warning("RQ GEVENT worker greenlet pool empty current size %s", self.gevent_pool.size)

            while self.gevent_pool.full():
                gevent.sleep(0.1)
                if self._stop_requested:
                    raise StopRequested()

            try:
                result = self.queue_class.dequeue_any(self.queues, timeout, connection=self.connection)
                self.set_state(WorkerStatus.IDLE)
                if result is not None:
                    job, queue = result
                    self.log.info('%s: %s (%s)' % (green(queue.name),
                                  blue(job.description), job.id))
                break
            except DequeueTimeout:
                pass

        self.heartbeat()
        return result
    def dequeue_job_and_maintain_ttl(self, timeout):
        if self._stopped:
            raise StopRequested()

        result = None
        while True:
            if self._stopped:
                raise StopRequested()

            self.heartbeat()

            while self.gevent_pool.full():
                gevent.sleep(0.1)
                if self._stopped:
                    raise StopRequested()

            try:
                result = self.queue_class.dequeue_any(self.queues, timeout, connection=self.connection)
                if result is not None:
                    job, queue = result
                    self.log.info('%s: %s (%s)' % (green(queue.name),
                                  blue(job.description), job.id))
                break
            except DequeueTimeout:
                pass

        self.heartbeat()
        return result
Beispiel #8
0
    def dequeue_job_and_maintain_ttl(self, timeout):
        if self._stopped:
            raise StopRequested()

        result = None
        while True:
            if self._stopped:
                raise StopRequested()

            self.heartbeat()

            while self.gevent_pool.full():
                gevent.sleep(0.1)
                if self._stopped:
                    raise StopRequested()

            try:
                result = self.queue_class.dequeue_any(
                    self.queues, 5, connection=self.connection)
                if result is None and timeout is None:
                    self.gevent_pool.join()
                if result is not None:
                    job, queue = result
                    self.log.info(
                        '%s: %s (%s)' %
                        (green(queue.name), blue(job.description), job.id))
                break
            except DequeueTimeout:
                pass

        self.heartbeat()
        return result
Beispiel #9
0
    def dequeue_job_and_maintain_ttl(self, timeout):
        if self._stop_requested:
            raise StopRequested()

        result = None

        while True:
            self.heartbeat()
            # do jobs in pool before adding new one
            while not self.gevent_pool.free_count() > 0:
                gevent.sleep(0)

            try:
                result = self.queue_class.dequeue_any(
                    self.queues, timeout, connection=self.connection)
                if result is None and timeout is None:
                    self.gevent_pool.join()
                if result is not None:
                    job, queue = result
                    self.log.info(
                        '%s: %s (%s)' %
                        (green(queue.name), blue(job.description), job.id))
                break
            except DequeueTimeout:
                pass

        self.heartbeat()
        return result
Beispiel #10
0
 def test_command_queues_default(self):
     """
     Ensure that used default queue if no arguments provided for
     `rqworker` command
     """
     call_command('rqworker', burst=True)
     self.assertIn("*** Listening on {}...\n".format(green("default")),
                   self.logger_output.getvalue())
Beispiel #11
0
 def test_command_queues_same_connection_default_port(self):
     """
     Ensure that `rqworker` command correctly parse queues list
     """
     queues = ['default', 'django_rq_test']
     call_command('rqworker', *queues, burst=True)
     self.assertIn("*** Listening on {}...\n".format(
         green(", ".join(queues))),
         self.logger_output.getvalue())
Beispiel #12
0
    def perform_job(self, job, queue, heartbeat_ttl=None):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job, heartbeat_ttl)

        push_connection(self.connection)

        started_job_registry = StartedJobRegistry(job.origin,
                                                  self.connection,
                                                  job_class=self.job_class)

        try:
            job.started_at = utcnow()
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            with self.death_penalty_class(timeout, JobTimeoutException, job_id=job.id):
                rv = job.perform(self.workerKwargs)

            job.ended_at = utcnow()

            # Pickle the result in the same try-except block since we need
            # to use the same exc handling when pickling fails
            job._result = rv

            self.handle_job_success(job=job,
                                    queue=queue,
                                    started_job_registry=started_job_registry)
        except:
            job.ended_at = utcnow()
            self.handle_job_failure(job=job,
                                    started_job_registry=started_job_registry)
            self.handle_exception(job, *sys.exc_info())
            return False

        finally:
            pop_connection()

        self.log.info('{0}: {1} ({2})'.format(
            green(job.origin), blue('Job OK'), job.id))
        if rv is not None:
            log_result = "{0!r}".format(as_text(text_type(rv)))
            self.log.debug('Result: %s', yellow(log_result))

        if self.log_result_lifespan:
            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl == 0:
                self.log.info('Result discarded immediately')
            elif result_ttl > 0:
                self.log.info(
                    'Result is kept for {0} seconds'.format(result_ttl))
            else:
                self.log.warning(
                    'Result will never expire, clean up result key manually')

        return True
    def dequeue_job_and_maintain_ttl(self, timeout):
        if self._stop_requested:
            raise StopRequested()

        result = None

        if not self.gevent_greenlets and self.get_state() != WorkerStatus.IDLE:
            qnames = self.queue_names()
            self.set_state(WorkerStatus.IDLE)
            self.procline('Listening on {0}'.format(','.join(qnames)))
            self.log.info('')
            self.log.info('*** Listening on {0}...'.format(
                green(', '.join(qnames))))

        while True:
            if self._stop_requested:
                raise StopRequested()

            self.heartbeat()

            while self.gevent_pool.full():
                gevent.sleep(0.1)
                if self._stop_requested:
                    raise StopRequested()

            try:
                result = self.queue_class.dequeue_any(
                    self.queues, timeout, connection=self.connection)
                if result is not None:
                    job, queue = result
                    self.log.info('{0}: {1} ({2})'.format(
                        green(queue.name), blue(job.description), job.id))

                break
            except DequeueTimeout:
                pass

        self.heartbeat()
        return result
    def dequeue_job_and_maintain_ttl(self, timeout):
        if self._stop_requested:
            raise StopRequested()

        result = None

        if not self.gevent_greenlets and self.get_state() != WorkerStatus.IDLE:
            qnames = self.queue_names()
            self.set_state(WorkerStatus.IDLE)
            self.procline('Listening on {0}'.format(','.join(qnames)))
            self.log.info('')
            self.log.info('*** Listening on {0}...'.format(green(', '.join(qnames))))

        while True:
            if self._stop_requested:
                raise StopRequested()

            self.heartbeat()

            while self.gevent_pool.full():
                gevent.sleep(0.1)
                if self._stop_requested:
                    raise StopRequested()

            try:
                result = self.queue_class.dequeue_any(self.queues, timeout,
                                                      connection=self.connection)
                if result is not None:
                    job, queue = result
                    self.log.info('{0}: {1} ({2})'.format(green(queue.name),
                                                          blue(job.description), job.id))

                break
            except DequeueTimeout:
                pass

        self.heartbeat()
        return result
        def job_done(child):
            self.gevent_greenlets.remove(child)
            self.did_perform_work = True
            self.heartbeat()

            if not self.gevent_greenlets and self.get_state() != WorkerStatus.IDLE:
                qnames = self.queue_names()
                self.set_state(WorkerStatus.IDLE)
                self.procline('Listening on {0}'.format(','.join(qnames)))
                self.log.info('')
                self.log.info('*** Listening on {0}...'.format(green(', '.join(qnames))))

            if job.get_status() == JobStatus.FINISHED:
                queue.enqueue_dependents(job)
        def job_done(child):
            self.gevent_greenlets.remove(child)
            self.did_perform_work = True
            self.heartbeat()

            if not self.gevent_greenlets and self.get_state(
            ) != WorkerStatus.IDLE:
                qnames = self.queue_names()
                self.set_state(WorkerStatus.IDLE)
                self.procline('Listening on {0}'.format(','.join(qnames)))
                self.log.info('')
                self.log.info('*** Listening on {0}...'.format(
                    green(', '.join(qnames))))

            if job.get_status() == JobStatus.FINISHED:
                queue.enqueue_dependents(job)
Beispiel #17
0
    def dequeue_job_and_maintain_ttl(self, timeout):
        if self._stop_requested:
            raise StopRequested()

        result = None
        while True:
            if self._stop_requested:
                raise StopRequested()

            self.heartbeat()

            if self.gevent_pool.full():
                self.set_state(WorkerStatus.BUSY)
                self.log.warning(
                    "RQ GEVENT worker greenlet pool empty current size %s",
                    self.gevent_pool.size,
                )

            while self.gevent_pool.full():
                gevent.sleep(0.1)
                if self._stop_requested:
                    raise StopRequested()

            try:
                result = self.queue_class.dequeue_any(
                    self.queues, timeout, connection=self.connection
                )
                self.set_state(WorkerStatus.IDLE)
                if result is not None:
                    job, queue = result
                    self.log.info(
                        "%s: %s (%s)"
                        % (green(queue.name), blue(job.description), job.id)
                    )
                break
            except DequeueTimeout:
                pass

        self.heartbeat()
        return result
Beispiel #18
0
    def perform_job(self, job, *, loop=None):
        """Performs the actual work of a job."""

        yield from self.prepare_job_execution(job)

        pipe = self.connection.multi_exec()
        started_job_registry = StartedJobRegistry(job.origin, self.connection)

        try:
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            try:
                rv = yield from asyncio.wait_for(job.perform(),
                                                 timeout,
                                                 loop=loop)
            except asyncio.TimeoutError as error:
                raise JobTimeoutException from error

            # Pickle the result in the same try-except block since we
            # need to use the same exc handling when pickling fails
            yield from self.set_current_job_id(None, pipeline=pipe)

            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl != 0:
                job.ended_at = utcnow()
                job._status = JobStatus.FINISHED
                yield from job.save(pipeline=pipe)

                finished_job_registry = FinishedJobRegistry(
                    job.origin, self.connection)
                yield from finished_job_registry.add(job, result_ttl, pipe)

            yield from job.cleanup(result_ttl, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)

            yield from pipe.execute()

        except Exception:
            # TODO: if `pipe.execute()` throws exception
            # `ConnectionClosedError` and we try to add actions to the
            # pipeline which was already executed then line below will
            # throw "AssertionError: Pipeline already executed. Create
            # new one."
            yield from job.set_status(JobStatus.FAILED, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)
            yield from self.set_current_job_id(None, pipeline=pipe)
            try:
                yield from pipe.execute()
            except Exception:
                # Ensure that custom exception handlers are called
                # even if Redis is down
                pass
            yield from self.handle_exception(job, *sys.exc_info())
            return False

        logger.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id)
        if rv:
            log_result = "{!r}".format(as_text(text_type(rv)))
            logger.debug('Result: %s', yellow(log_result))

        if result_ttl == 0:
            logger.info('Result discarded immediately')
        elif result_ttl > 0:
            logger.info('Result is kept for %s seconds', result_ttl)
        else:
            logger.warning(
                'Result will never expire, clean up result key manually')

        return True
Beispiel #19
0
    def perform_job(self, job, *, loop=None):
        """Performs the actual work of a job."""

        yield from self.prepare_job_execution(job)

        pipe = self.connection.multi_exec()
        started_job_registry = StartedJobRegistry(job.origin, self.connection)

        try:
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            try:
                rv = yield from asyncio.wait_for(
                    job.perform(), timeout, loop=loop)
            except asyncio.TimeoutError as error:
                raise JobTimeoutException from error

            # Pickle the result in the same try-except block since we
            # need to use the same exc handling when pickling fails
            yield from self.set_current_job_id(None, pipeline=pipe)

            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl != 0:
                job.ended_at = utcnow()
                job._status = JobStatus.FINISHED
                yield from job.save(pipeline=pipe)

                finished_job_registry = FinishedJobRegistry(
                    job.origin, self.connection)
                yield from finished_job_registry.add(job, result_ttl, pipe)

            yield from job.cleanup(result_ttl, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)

            yield from pipe.execute()

        except Exception:
            # TODO: if `pipe.execute()` throws exception
            # `ConnectionClosedError` and we try to add actions to the
            # pipeline which was already executed then line below will
            # throw "AssertionError: Pipeline already executed. Create
            # new one."
            yield from job.set_status(JobStatus.FAILED, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)
            yield from self.set_current_job_id(None, pipeline=pipe)
            try:
                yield from pipe.execute()
            except Exception:
                # Ensure that custom exception handlers are called
                # even if Redis is down
                pass
            yield from self.handle_exception(job, *sys.exc_info())
            return False

        logger.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id)
        if rv:
            log_result = "{!r}".format(as_text(text_type(rv)))
            logger.debug('Result: %s', yellow(log_result))

        if result_ttl == 0:
            logger.info('Result discarded immediately')
        elif result_ttl > 0:
            logger.info('Result is kept for %s seconds', result_ttl)
        else:
            logger.warning(
                'Result will never expire, clean up result key manually')

        return True