Пример #1
0
def discard_all(connection=None,
        connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
    """Discard all waiting tasks.

    This will ignore all tasks waiting for execution, and they will
    be deleted from the messaging server.

    :returns: the number of tasks discarded.

    """
    consumer = TaskConsumer(connection=connection)
    try:
        return consumer.discard_all()
    finally:
        consumer.close()
Пример #2
0
def discard_all():
    """Discard all waiting tasks.

    This will ignore all tasks waiting for execution, and they will
    be deleted from the messaging server.

    :returns: the number of tasks discarded.

    :rtype: int

    """
    amqp_connection = DjangoAMQPConnection()
    consumer = TaskConsumer(connection=amqp_connection)
    discarded_count = consumer.discard_all()
    amqp_connection.close()
    return discarded_count
Пример #3
0
    def reset_connection(self):
        """Reset the AMQP connection, and reinitialize the
        :class:`celery.messaging.TaskConsumer` instance.

        Resets the task consumer in :attr:`task_consumer`.

        """
        if self.task_consumer:
            self.task_consumer.connection.close()
        amqp_connection = DjangoAMQPConnection()
        self.task_consumer = TaskConsumer(connection=amqp_connection)
        self.task_consumer_it = self.task_consumer.iterqueue(infinite=True)
Пример #4
0
    def get_consumer(self, connection=None,
            connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
        """Get a celery task message consumer.

        :rtype :class:`celery.messaging.TaskConsumer`:

        Please be sure to close the AMQP connection when you're done
        with this object. i.e.:

            >>> consumer = self.get_consumer()
            >>> # do something with consumer
            >>> consumer.connection.close()

        """
        connection = connection or self.establish_connection(connect_timeout)
        return TaskConsumer(connection=connection, exchange=self.exchange,
                            routing_key=self.routing_key)
Пример #5
0
 def _discard(connection):
     consumer = TaskConsumer(connection=connection)
     try:
         return consumer.discard_all()
     finally:
         consumer.close()
Пример #6
0
class WorkController(object):
    """Executes tasks waiting in the task queue.

    :param concurrency: see :attr:`concurrency`.

    :param logfile: see :attr:`logfile`.

    :param loglevel: see :attr:`loglevel`.

    :param queue_wakeup_after: see :attr:`queue_wakeup_after`.


    .. attribute:: concurrency

        The number of simultaneous processes doing work (default:
        :const:`celery.conf.DAEMON_CONCURRENCY`)

    .. attribute:: loglevel

        The loglevel used (default: :const:`logging.INFO`)

    .. attribute:: logfile

        The logfile used, if no logfile is specified it uses ``stderr``
        (default: :const:`celery.conf.DAEMON_LOG_FILE`).

    .. attribute:: queue_wakeup_after

        The time it takes for the daemon to wake up after the queue is empty,
        so it can check for more work
        (default: :const:`celery.conf.QUEUE_WAKEUP_AFTER`).

    .. attribute:: empty_msg_emit_every

        How often the daemon emits the ``"Waiting for queue..."`` message.
        If this is ``None``, the message will never be logged.
        (default: :const:`celery.conf.EMPTY_MSG_EMIT_EVERY`)

    .. attribute:: logger

        The :class:`logging.Logger` instance used for logging.

    .. attribute:: pool

        The :class:`multiprocessing.Pool` instance used.

    .. attribute:: task_consumer

        The :class:`celery.messaging.TaskConsumer` instance used.

    """
    loglevel = logging.ERROR
    concurrency = DAEMON_CONCURRENCY
    logfile = DAEMON_LOG_FILE
    queue_wakeup_after = QUEUE_WAKEUP_AFTER
    empty_msg_emit_every = EMPTY_MSG_EMIT_EVERY

    def __init__(self, concurrency=None, logfile=None, loglevel=None,
            queue_wakeup_after=None, is_detached=False):
        self.loglevel = loglevel or self.loglevel
        self.concurrency = concurrency or self.concurrency
        self.logfile = logfile or self.logfile
        self.queue_wakeup_after = queue_wakeup_after or \
                                    self.queue_wakeup_after
        self.logger = setup_logger(loglevel, logfile)
        self.pool = TaskPool(self.concurrency, logger=self.logger)
        self.task_consumer = None
        self.task_consumer_it = None
        self.is_detached = is_detached
        self.reset_connection()

    def reset_connection(self):
        """Reset the AMQP connection, and reinitialize the
        :class:`celery.messaging.TaskConsumer` instance.

        Resets the task consumer in :attr:`task_consumer`.

        """
        if self.task_consumer:
            self.task_consumer.connection.close()
        amqp_connection = DjangoAMQPConnection()
        self.task_consumer = TaskConsumer(connection=amqp_connection)
        self.task_consumer_it = self.task_consumer.iterqueue(infinite=True)

    def connection_diagnostics(self):
        """Diagnose the AMQP connection, and reset connection if
        necessary."""
        connection = self.task_consumer.backend.channel.connection

        if not connection:
            self.logger.info(
                    "AMQP Connection has died, restoring connection.")
            self.reset_connection()

    def receive_message(self):
        """Receive the next message from the message broker.

        Tries to reset the AMQP connection if not available.
        Returns ``None`` if no message is waiting on the queue.

        :rtype: :class:`carrot.messaging.Message` instance.

        """
        message = self.task_consumer_it.next()
        if not message:
            raise EmptyQueue()
        return message

    def process_task(self, message):
        """Process task message by passing it to the pool of workers."""
        task = TaskWrapper.from_message(message, logger=self.logger)
        self.logger.info("Got task from broker: %s[%s]" % (
            task.task_name, task.task_id))
        self.logger.debug("Got a task: %s. Trying to execute it..." % task)

        result = task.execute_using_pool(self.pool, self.loglevel,
                                         self.logfile)

        self.logger.debug("Task %s has been executed asynchronously." % task)

        return result

    def execute_next_task(self):
        """Execute the next task on the queue using the multiprocessing pool.

        Catches all exceptions and logs them with level
        :const:`logging.CRITICAL`.

        Raises :exc:`EmptyQueue` exception if there is no message
        waiting on the queue.

        """
        self.process_task(self.receive_message())

    def schedule_retry_tasks(self):
        """Reschedule all requeued tasks waiting for retry."""
        pass


    def run(self):
        """Starts the workers main loop."""
        log_wait = lambda: self.logger.info("Waiting for queue...")
        ev_msg_waiting = EventTimer(log_wait, self.empty_msg_emit_every)

        self.pool.run()
        PeriodicWorkController().start()

        # If not running as daemon, and DEBUG logging level is enabled,
        # print pool PIDs and sleep for a second before we start.
        if self.logger.isEnabledFor(logging.DEBUG):
            self.logger.debug("Pool child processes: [%s]" % (
                "|".join(map(str, self.pool.get_worker_pids()))))
            if not self.is_detached:
                time.sleep(1)

        while True:
            try:
                self.execute_next_task()
            except ValueError:
                # execute_next_task didn't return a r/name/id tuple,
                # probably because it got an exception.
                continue
            except EmptyQueue:
                ev_msg_waiting.tick()
                time.sleep(self.queue_wakeup_after)
                continue
            except UnknownTask, exc:
                self.logger.info("Unknown task ignored: %s" % (exc))
                continue
            except Exception, exc:
                self.logger.critical("Message queue raised %s: %s\n%s" % (
                             exc.__class__, exc, traceback.format_exc()))
                continue