class EventletCallback(object): def __init__(self): self.event = Event() def wait(self): with eventlet.Timeout(10): return self.event.wait() def success(self, result): self.event.send(result) def failure(self, exc): self.event.send_exception(exc)
class QueueConsumer(SharedExtension, ProviderCollector, ConsumerMixin): def __init__(self): self._consumers = {} self._pending_messages = set() self._pending_ack_messages = [] self._pending_requeue_messages = [] self._pending_remove_providers = {} self._gt = None self._starting = False self._consumers_ready = Event() super(QueueConsumer, self).__init__() @property def amqp_uri(self): return self.container.config[AMQP_URI_CONFIG_KEY] @property def prefetch_count(self): # The prefetch_count should be larger than max_workers. # If the max_workers <= max_workers, # then there will be a dead lock between # drain_events and on_iteration(since msg.ack in it) # which leads slow down the throughout capacity. return self.container.max_workers + 1 @property def accept(self): return self.container.accept def _handle_thread_exited(self, gt): exc = None try: gt.wait() except Exception as e: exc = e if not self._consumers_ready.ready(): self._consumers_ready.send_exception(exc) def setup(self): verify_amqp_uri(self.amqp_uri) def start(self): if not self._starting: self._starting = True _log.debug('starting %s', self) self._gt = self.container.spawn_managed_thread(self.run) self._gt.link(self._handle_thread_exited) try: _log.debug('waiting for consumer ready %s', self) self._consumers_ready.wait() except QueueConsumerStopped: _log.debug('consumer was stopped before it started %s', self) except Exception as exc: _log.debug('consumer failed to start %s (%s)', self, exc) else: _log.debug('started %s', self) def stop(self): """ Stop the queue-consumer gracefully. Wait until the last provider has been unregistered and for the ConsumerMixin's greenthread to exit (i.e. until all pending messages have been acked or requeued and all consumers stopped). """ if not self._consumers_ready.ready(): _log.debug('stopping while consumer is starting %s', self) stop_exc = QueueConsumerStopped() # stopping before we have started successfully by brutally # killing the consumer thread as we don't have a way to hook # into the pre-consumption startup process self._gt.kill(stop_exc) self.wait_for_providers() try: _log.debug('waiting for consumer death %s', self) self._gt.wait() except QueueConsumerStopped: pass super(QueueConsumer, self).stop() _log.debug('stopped %s', self) def kill(self): """ Kill the queue-consumer. Unlike `stop()` any pending message ack or requeue-requests, requests to remove providers, etc are lost and the consume thread is asked to terminate as soon as possible. """ # greenlet has a magic attribute ``dead`` - pylint: disable=E1101 if self._gt is not None and not self._gt.dead: # we can't just kill the thread because we have to give # ConsumerMixin a chance to close the sockets properly. self._providers = set() self._pending_messages = set() self._pending_ack_messages = [] self._pending_requeue_messages = [] self._pending_remove_providers = {} self.should_stop = True try: self._gt.wait() except Exception as exc: # discard the exception since we're already being killed _log.warn('QueueConsumer %s raised `%s` during kill', self, exc) super(QueueConsumer, self).kill() _log.debug('killed %s', self) def unregister_provider(self, provider): if not self._consumers_ready.ready(): # we cannot handle the situation where we are starting up and # want to remove a consumer at the same time # TODO: With the upcomming error handling mechanism, this needs # TODO: to be thought through again. self._last_provider_unregistered.send() return removed_event = Event() # we can only cancel a consumer from within the consumer thread self._pending_remove_providers[provider] = removed_event # so we will just register the consumer to be canceled removed_event.wait() super(QueueConsumer, self).unregister_provider(provider) def ack_message(self, message): _log.debug("stashing message-ack: %s", message) self._pending_messages.remove(message) self._pending_ack_messages.append(message) def requeue_message(self, message): _log.debug("stashing message-requeue: %s", message) self._pending_messages.remove(message) self._pending_requeue_messages.append(message) def _on_message(self, body, message): _log.debug("received message: %s", message) self._pending_messages.add(message) def _cancel_consumers_if_requested(self): provider_remove_events = self._pending_remove_providers.items() self._pending_remove_providers = {} for provider, removed_event in provider_remove_events: consumer = self._consumers.pop(provider) _log.debug('cancelling consumer [%s]: %s', provider, consumer) consumer.cancel() removed_event.send() def _process_pending_message_acks(self): messages = self._pending_ack_messages if messages: _log.debug('ack() %d processed messages', len(messages)) while messages: msg = messages.pop() msg.ack() eventlet.sleep() messages = self._pending_requeue_messages if messages: _log.debug('requeue() %d processed messages', len(messages)) while messages: msg = messages.pop() msg.requeue() eventlet.sleep() @property def connection(self): """ Provide the connection parameters for kombu's ConsumerMixin. The `Connection` object is a declaration of connection parameters that is lazily evaluated. It doesn't represent an established connection to the broker at this point. """ heartbeat = self.container.config.get(HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT) return Connection(self.amqp_uri, heartbeat=heartbeat) def get_consumers(self, consumer_cls, channel): """ Kombu callback to set up consumers. Called after any (re)connection to the broker. """ _log.debug('setting up consumers %s', self) for provider in self._providers: callbacks = [self._on_message, provider.handle_message] consumer = consumer_cls(queues=[provider.queue], callbacks=callbacks, accept=self.accept) consumer.qos(prefetch_count=self.prefetch_count) self._consumers[provider] = consumer return self._consumers.values() def on_iteration(self): """ Kombu callback for each `drain_events` loop iteration.""" self._cancel_consumers_if_requested() self._process_pending_message_acks() num_consumers = len(self._consumers) num_pending_messages = len(self._pending_messages) if num_consumers + num_pending_messages == 0: _log.debug('requesting stop after iteration') self.should_stop = True def on_connection_error(self, exc, interval): _log.warn("Error connecting to broker at {} ({}).\n" "Retrying in {} seconds.".format(self.amqp_uri, exc, interval)) def on_consume_ready(self, connection, channel, consumers, **kwargs): """ Kombu callback when consumers are ready to accept messages. Called after any (re)connection to the broker. """ if not self._consumers_ready.ready(): _log.debug('consumer started %s', self) self._consumers_ready.send(None) for provider in self._providers: try: callback = provider.on_consume_ready except AttributeError: pass else: callback()
class QueueConsumer(SharedExtension, ProviderCollector, ConsumerMixin): amqp_uri = None prefetch_count = None def __init__(self): self._connection = None self._consumers = {} self._pending_messages = set() self._pending_ack_messages = [] self._pending_requeue_messages = [] self._pending_remove_providers = {} self._gt = None self._starting = False self._consumers_ready = Event() super(QueueConsumer, self).__init__() def _handle_thread_exited(self, gt): exc = None try: gt.wait() except Exception as e: exc = e if not self._consumers_ready.ready(): self._consumers_ready.send_exception(exc) def setup(self): self.amqp_uri = self.container.config[AMQP_URI_CONFIG_KEY] self.accept = self.container.accept self.prefetch_count = self.container.max_workers verify_amqp_uri(self.amqp_uri) def start(self): if not self._starting: self._starting = True _log.debug('starting %s', self) self._gt = self.container.spawn_managed_thread(self.run, protected=True) self._gt.link(self._handle_thread_exited) try: _log.debug('waiting for consumer ready %s', self) self._consumers_ready.wait() except QueueConsumerStopped: _log.debug('consumer was stopped before it started %s', self) except Exception as exc: _log.debug('consumer failed to start %s (%s)', self, exc) else: _log.debug('started %s', self) def stop(self): """ Stop the queue-consumer gracefully. Wait until the last provider has been unregistered and for the ConsumerMixin's greenthread to exit (i.e. until all pending messages have been acked or requeued and all consumers stopped). """ if not self._consumers_ready.ready(): _log.debug('stopping while consumer is starting %s', self) stop_exc = QueueConsumerStopped() # stopping before we have started successfully by brutally # killing the consumer thread as we don't have a way to hook # into the pre-consumption startup process self._gt.kill(stop_exc) self.wait_for_providers() try: _log.debug('waiting for consumer death %s', self) self._gt.wait() except QueueConsumerStopped: pass super(QueueConsumer, self).stop() _log.debug('stopped %s', self) def kill(self): """ Kill the queue-consumer. Unlike `stop()` any pending message ack or requeue-requests, requests to remove providers, etc are lost and the consume thread is asked to terminate as soon as possible. """ # greenlet has a magic attribute ``dead`` - pylint: disable=E1101 if self._gt is not None and not self._gt.dead: # we can't just kill the thread because we have to give # ConsumerMixin a chance to close the sockets properly. self._providers = set() self._pending_messages = set() self._pending_ack_messages = [] self._pending_requeue_messages = [] self._pending_remove_providers = {} self.should_stop = True try: self._gt.wait() except Exception as exc: # discard the exception since we're already being killed _log.warn('QueueConsumer %s raised `%s` during kill', self, exc) super(QueueConsumer, self).kill() _log.debug('killed %s', self) def unregister_provider(self, provider): if not self._consumers_ready.ready(): # we cannot handle the situation where we are starting up and # want to remove a consumer at the same time # TODO: With the upcomming error handling mechanism, this needs # TODO: to be thought through again. self._last_provider_unregistered.send() return removed_event = Event() # we can only cancel a consumer from within the consumer thread self._pending_remove_providers[provider] = removed_event # so we will just register the consumer to be canceled removed_event.wait() super(QueueConsumer, self).unregister_provider(provider) def ack_message(self, message): _log.debug("stashing message-ack: %s", message) self._pending_messages.remove(message) self._pending_ack_messages.append(message) def requeue_message(self, message): _log.debug("stashing message-requeue: %s", message) self._pending_messages.remove(message) self._pending_requeue_messages.append(message) def _on_message(self, body, message): _log.debug("received message: %s", message) self._pending_messages.add(message) def _cancel_consumers_if_requested(self): provider_remove_events = self._pending_remove_providers.items() self._pending_remove_providers = {} for provider, removed_event in provider_remove_events: consumer = self._consumers.pop(provider) _log.debug('cancelling consumer [%s]: %s', provider, consumer) consumer.cancel() removed_event.send() def _process_pending_message_acks(self): messages = self._pending_ack_messages if messages: _log.debug('ack() %d processed messages', len(messages)) while messages: msg = messages.pop() msg.ack() eventlet.sleep() messages = self._pending_requeue_messages if messages: _log.debug('requeue() %d processed messages', len(messages)) while messages: msg = messages.pop() msg.requeue() eventlet.sleep() @property def connection(self): """ Kombu requirement """ if self.amqp_uri is None: return # don't cache a connection during introspection if self._connection is None: self._connection = Connection(self.amqp_uri) return self._connection def get_consumers(self, Consumer, channel): """ Kombu callback to set up consumers. Called after any (re)connection to the broker. """ _log.debug('setting up consumers %s', self) for provider in self._providers: callbacks = [self._on_message, provider.handle_message] consumer = Consumer(queues=[provider.queue], callbacks=callbacks, accept=self.accept) consumer.qos(prefetch_count=self.prefetch_count) self._consumers[provider] = consumer return self._consumers.values() def on_iteration(self): """ Kombu callback for each `drain_events` loop iteration.""" self._cancel_consumers_if_requested() self._process_pending_message_acks() num_consumers = len(self._consumers) num_pending_messages = len(self._pending_messages) if num_consumers + num_pending_messages == 0: _log.debug('requesting stop after iteration') self.should_stop = True def on_connection_error(self, exc, interval): _log.warn("Error connecting to broker at {} ({}).\n" "Retrying in {} seconds.".format(self.amqp_uri, exc, interval)) def on_consume_ready(self, connection, channel, consumers, **kwargs): """ Kombu callback when consumers are ready to accept messages. Called after any (re)connection to the broker. """ if not self._consumers_ready.ready(): _log.debug('consumer started %s', self) self._consumers_ready.send(None) for provider in self._providers: try: callback = provider.on_consume_ready except AttributeError: pass else: callback() def consume(self, limit=None, timeout=None, safety_interval=0.1, **kwargs): """ Lifted from Kombu. We switch the order of the `break` and `self.on_iteration()` to avoid waiting on a drain_events timeout before breaking the loop. """ elapsed = 0 with self.consumer_context(**kwargs) as (conn, channel, consumers): for i in limit and range(limit) or count(): self.on_iteration() if self.should_stop: break try: conn.drain_events(timeout=safety_interval) except socket.timeout: elapsed += safety_interval # Excluding the following clause from coverage, # as timeout never appears to be set - This method # is a lift from kombu so will leave in place for now. if timeout and elapsed >= timeout: # pragma: no cover raise except socket.error: if not self.should_stop: raise else: yield elapsed = 0
class QueueConsumer(DependencyProvider, ProviderCollector, ConsumerMixin): def __init__(self): super(QueueConsumer, self).__init__() self._connection = None self._consumers = {} self._pending_messages = set() self._pending_ack_messages = [] self._pending_requeue_messages = [] self._pending_remove_providers = {} self._gt = None self._starting = False self._consumers_ready = Event() @property def _amqp_uri(self): return self.container.config[AMQP_URI_CONFIG_KEY] @property def _prefetch_count(self): return self.container.max_workers def _handle_thread_exited(self, gt): exc = None try: gt.wait() except Exception as e: exc = e if not self._consumers_ready.ready(): self._consumers_ready.send_exception(exc) def start(self): if not self._starting: self._starting = True _log.debug('starting %s', self) self._gt = self.container.spawn_managed_thread( self.run, protected=True) self._gt.link(self._handle_thread_exited) try: _log.debug('waiting for consumer ready %s', self) self._consumers_ready.wait() except QueueConsumerStopped: _log.debug('consumer was stopped before it started %s', self) except Exception as exc: _log.debug('consumer failed to start %s (%s)', self, exc) else: _log.debug('started %s', self) def stop(self): """ Stop the queue-consumer gracefully. Wait until the last provider has been unregistered and for the ConsumerMixin's greenthread to exit (i.e. until all pending messages have been acked or requeued and all consumers stopped). """ if not self._consumers_ready.ready(): _log.debug('stopping while consumer is starting %s', self) stop_exc = QueueConsumerStopped() # stopping before we have started successfully by brutally # killing the consumer thread as we don't have a way to hook # into the pre-consumption startup process self._gt.kill(stop_exc) self.wait_for_providers() try: _log.debug('waiting for consumer death %s', self) self._gt.wait() except QueueConsumerStopped: pass super(QueueConsumer, self).stop() _log.debug('stopped %s', self) def kill(self): """ Kill the queue-consumer. Unlike `stop()` any pending message ack or requeue-requests, requests to remove providers, etc are lost and the consume thread is asked to terminate as soon as possible. """ # greenlet has a magic attribute ``dead`` - pylint: disable=E1101 if self._gt and not self._gt.dead: # we can't just kill the thread because we have to give # ConsumerMixin a chance to close the sockets properly. self._providers = set() self._pending_messages = set() self._pending_ack_messages = [] self._pending_requeue_messages = [] self._pending_remove_providers = {} self.should_stop = True self._gt.wait() super(QueueConsumer, self).kill() _log.debug('killed %s', self) def unregister_provider(self, provider): if not self._consumers_ready.ready(): # we cannot handle the situation where we are starting up and # want to remove a consumer at the same time # TODO: With the upcomming error handling mechanism, this needs # TODO: to be thought through again. self._last_provider_unregistered.send() return removed_event = Event() # we can only cancel a consumer from within the consumer thread self._pending_remove_providers[provider] = removed_event # so we will just register the consumer to be canceled removed_event.wait() super(QueueConsumer, self).unregister_provider(provider) def ack_message(self, message): _log.debug("stashing message-ack: %s", message) self._pending_messages.remove(message) self._pending_ack_messages.append(message) def requeue_message(self, message): _log.debug("stashing message-requeue: %s", message) self._pending_messages.remove(message) self._pending_requeue_messages.append(message) def _on_message(self, body, message): _log.debug("received message: %s", message) self._pending_messages.add(message) def _cancel_consumers_if_requested(self): provider_remove_events = self._pending_remove_providers.items() self._pending_remove_providers = {} for provider, removed_event in provider_remove_events: consumer = self._consumers.pop(provider) _log.debug('cancelling consumer [%s]: %s', provider, consumer) consumer.cancel() removed_event.send() def _process_pending_message_acks(self): messages = self._pending_ack_messages if messages: _log.debug('ack() %d processed messages', len(messages)) while messages: msg = messages.pop() msg.ack() eventlet.sleep() messages = self._pending_requeue_messages if messages: _log.debug('requeue() %d processed messages', len(messages)) while messages: msg = messages.pop() msg.requeue() eventlet.sleep() @property def connection(self): """ Kombu requirement """ if self._connection is None: self._connection = Connection(self._amqp_uri) return self._connection def get_consumers(self, Consumer, channel): """ Kombu callback to set up consumers. Called after any (re)connection to the broker. """ _log.debug('setting up consumers %s', self) for provider in self._providers: callbacks = [self._on_message, provider.handle_message] consumer = Consumer(queues=[provider.queue], callbacks=callbacks) consumer.qos(prefetch_count=self._prefetch_count) self._consumers[provider] = consumer return self._consumers.values() def on_iteration(self): """ Kombu callback for each `drain_events` loop iteration.""" self._cancel_consumers_if_requested() self._process_pending_message_acks() num_consumers = len(self._consumers) num_pending_messages = len(self._pending_messages) if num_consumers + num_pending_messages == 0: _log.debug('requesting stop after iteration') self.should_stop = True def on_connection_error(self, exc, interval): _log.warn('broker connection error: {}. ' 'Retrying in {} seconds.'.format(exc, interval)) def on_consume_ready(self, connection, channel, consumers, **kwargs): """ Kombu callback when consumers are ready to accept messages. Called after any (re)connection to the broker. """ if not self._consumers_ready.ready(): _log.debug('consumer started %s', self) self._consumers_ready.send(None) def consume(self, limit=None, timeout=None, safety_interval=0.1, **kwargs): """ Lifted from Kombu. We switch the order of the `break` and `self.on_iteration()` to avoid waiting on a drain_events timeout before breaking the loop. """ elapsed = 0 with self.consumer_context(**kwargs) as (conn, channel, consumers): for i in limit and range(limit) or count(): self.on_iteration() if self.should_stop: break try: conn.drain_events(timeout=safety_interval) except socket.timeout: elapsed += safety_interval # Excluding the following clause from coverage, # as timeout never appears to be set - This method # is a lift from kombu so will leave in place for now. if timeout and elapsed >= timeout: # pragma: no cover raise except socket.error: if not self.should_stop: raise else: yield elapsed = 0
class QueueConsumer(SharedExtension, ProviderCollector, ConsumerMixin): def __init__(self): self._consumers = {} self._pending_remove_providers = {} self._gt = None self._starting = False self._consumers_ready = Event() super(QueueConsumer, self).__init__() @property def amqp_uri(self): return self.container.config[AMQP_URI_CONFIG_KEY] @property def prefetch_count(self): return self.container.max_workers @property def accept(self): return self.container.accept def _handle_thread_exited(self, gt): exc = None try: gt.wait() except Exception as e: exc = e if not self._consumers_ready.ready(): self._consumers_ready.send_exception(exc) def start(self): if not self._starting: self._starting = True _log.debug('starting %s', self) self._gt = self.container.spawn_managed_thread(self.run) self._gt.link(self._handle_thread_exited) try: _log.debug('waiting for consumer ready %s', self) self._consumers_ready.wait() except QueueConsumerStopped: _log.debug('consumer was stopped before it started %s', self) except Exception as exc: _log.debug('consumer failed to start %s (%s)', self, exc) else: _log.debug('started %s', self) def stop(self): """ Stop the queue-consumer gracefully. Wait until the last provider has been unregistered and for the ConsumerMixin's greenthread to exit (i.e. until all pending messages have been acked or requeued and all consumers stopped). """ if not self._consumers_ready.ready(): _log.debug('stopping while consumer is starting %s', self) stop_exc = QueueConsumerStopped() # stopping before we have started successfully by brutally # killing the consumer thread as we don't have a way to hook # into the pre-consumption startup process self._gt.kill(stop_exc) self.wait_for_providers() try: _log.debug('waiting for consumer death %s', self) self._gt.wait() except QueueConsumerStopped: pass super(QueueConsumer, self).stop() _log.debug('stopped %s', self) def kill(self): """ Kill the queue-consumer. Unlike `stop()` any pending message ack or requeue-requests, requests to remove providers, etc are lost and the consume thread is asked to terminate as soon as possible. """ # greenlet has a magic attribute ``dead`` - pylint: disable=E1101 if self._gt is not None and not self._gt.dead: # we can't just kill the thread because we have to give # ConsumerMixin a chance to close the sockets properly. self._providers = set() self._pending_remove_providers = {} self.should_stop = True try: self._gt.wait() except Exception as exc: # discard the exception since we're already being killed _log.warn('QueueConsumer %s raised `%s` during kill', self, exc) super(QueueConsumer, self).kill() _log.debug('killed %s', self) def unregister_provider(self, provider): if not self._consumers_ready.ready(): # we cannot handle the situation where we are starting up and # want to remove a consumer at the same time # TODO: With the upcomming error handling mechanism, this needs # TODO: to be thought through again. self._last_provider_unregistered.send() return removed_event = Event() # we can only cancel a consumer from within the consumer thread self._pending_remove_providers[provider] = removed_event # so we will just register the consumer to be canceled removed_event.wait() super(QueueConsumer, self).unregister_provider(provider) def ack_message(self, message): # only attempt to ack if the message connection is alive; # otherwise the message will already have been reclaimed by the broker if message.channel.connection: try: message.ack() except ConnectionError: # pragma: no cover pass # ignore connection closing inside conditional def requeue_message(self, message): # only attempt to requeue if the message connection is alive; # otherwise the message will already have been reclaimed by the broker if message.channel.connection: try: message.requeue() except ConnectionError: # pragma: no cover pass # ignore connection closing inside conditional def _cancel_consumers_if_requested(self): provider_remove_events = self._pending_remove_providers.items() self._pending_remove_providers = {} for provider, removed_event in provider_remove_events: consumer = self._consumers.pop(provider) _log.debug('cancelling consumer [%s]: %s', provider, consumer) consumer.cancel() removed_event.send() @property def connection(self): """ Provide the connection parameters for kombu's ConsumerMixin. The `Connection` object is a declaration of connection parameters that is lazily evaluated. It doesn't represent an established connection to the broker at this point. """ heartbeat = self.container.config.get(HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT) transport_options = self.container.config.get( TRANSPORT_OPTIONS_CONFIG_KEY, DEFAULT_TRANSPORT_OPTIONS) ssl = self.container.config.get(AMQP_SSL_CONFIG_KEY) login_method = self.container.config.get(LOGIN_METHOD_CONFIG_KEY) conn = Connection(self.amqp_uri, transport_options=transport_options, heartbeat=heartbeat, ssl=ssl, login_method=login_method) return conn def handle_message(self, provider, body, message): ident = u"{}.handle_message[{}]".format( type(provider).__name__, message.delivery_info['routing_key']) self.container.spawn_managed_thread(partial(provider.handle_message, body, message), identifier=ident) def get_consumers(self, consumer_cls, channel): """ Kombu callback to set up consumers. Called after any (re)connection to the broker. """ _log.debug('setting up consumers %s', self) for provider in self._providers: callbacks = [partial(self.handle_message, provider)] consumer = consumer_cls(queues=[provider.queue], callbacks=callbacks, accept=self.accept) consumer.qos(prefetch_count=self.prefetch_count) self._consumers[provider] = consumer return self._consumers.values() def on_iteration(self): """ Kombu callback for each `drain_events` loop iteration.""" self._cancel_consumers_if_requested() if len(self._consumers) == 0: _log.debug('requesting stop after iteration') self.should_stop = True def on_connection_error(self, exc, interval): _log.warning("Error connecting to broker at {} ({}).\n" "Retrying in {} seconds.".format( sanitize_url(self.amqp_uri), exc, interval)) def on_consume_ready(self, connection, channel, consumers, **kwargs): """ Kombu callback when consumers are ready to accept messages. Called after any (re)connection to the broker. """ if not self._consumers_ready.ready(): _log.debug('consumer started %s', self) self._consumers_ready.send(None)
class QueueConsumer(SharedExtension, ProviderCollector, ConsumerMixin): def __init__(self): self._consumers = {} self._pending_remove_providers = {} self._gt = None self._starting = False self._consumers_ready = Event() super(QueueConsumer, self).__init__() @property def amqp_uri(self): return self.container.config[AMQP_URI_CONFIG_KEY] @property def prefetch_count(self): return self.container.max_workers @property def accept(self): return self.container.accept def _handle_thread_exited(self, gt): exc = None try: gt.wait() except Exception as e: exc = e if not self._consumers_ready.ready(): self._consumers_ready.send_exception(exc) def setup(self): ssl = self.container.config.get(AMQP_SSL_CONFIG_KEY) verify_amqp_uri(self.amqp_uri, ssl=ssl) def start(self): if not self._starting: self._starting = True _log.debug('starting %s', self) self._gt = self.container.spawn_managed_thread(self.run) self._gt.link(self._handle_thread_exited) try: _log.debug('waiting for consumer ready %s', self) self._consumers_ready.wait() except QueueConsumerStopped: _log.debug('consumer was stopped before it started %s', self) except Exception as exc: _log.debug('consumer failed to start %s (%s)', self, exc) else: _log.debug('started %s', self) def stop(self): """ Stop the queue-consumer gracefully. Wait until the last provider has been unregistered and for the ConsumerMixin's greenthread to exit (i.e. until all pending messages have been acked or requeued and all consumers stopped). """ if not self._consumers_ready.ready(): _log.debug('stopping while consumer is starting %s', self) stop_exc = QueueConsumerStopped() # stopping before we have started successfully by brutally # killing the consumer thread as we don't have a way to hook # into the pre-consumption startup process self._gt.kill(stop_exc) self.wait_for_providers() try: _log.debug('waiting for consumer death %s', self) self._gt.wait() except QueueConsumerStopped: pass super(QueueConsumer, self).stop() _log.debug('stopped %s', self) def kill(self): """ Kill the queue-consumer. Unlike `stop()` any pending message ack or requeue-requests, requests to remove providers, etc are lost and the consume thread is asked to terminate as soon as possible. """ # greenlet has a magic attribute ``dead`` - pylint: disable=E1101 if self._gt is not None and not self._gt.dead: # we can't just kill the thread because we have to give # ConsumerMixin a chance to close the sockets properly. self._providers = set() self._pending_remove_providers = {} self.should_stop = True try: self._gt.wait() except Exception as exc: # discard the exception since we're already being killed _log.warn( 'QueueConsumer %s raised `%s` during kill', self, exc) super(QueueConsumer, self).kill() _log.debug('killed %s', self) def unregister_provider(self, provider): if not self._consumers_ready.ready(): # we cannot handle the situation where we are starting up and # want to remove a consumer at the same time # TODO: With the upcomming error handling mechanism, this needs # TODO: to be thought through again. self._last_provider_unregistered.send() return removed_event = Event() # we can only cancel a consumer from within the consumer thread self._pending_remove_providers[provider] = removed_event # so we will just register the consumer to be canceled removed_event.wait() super(QueueConsumer, self).unregister_provider(provider) def ack_message(self, message): # only attempt to ack if the message connection is alive; # otherwise the message will already have been reclaimed by the broker if message.channel.connection: try: message.ack() except ConnectionError: # pragma: no cover pass # ignore connection closing inside conditional def requeue_message(self, message): # only attempt to requeue if the message connection is alive; # otherwise the message will already have been reclaimed by the broker if message.channel.connection: try: message.requeue() except ConnectionError: # pragma: no cover pass # ignore connection closing inside conditional def _cancel_consumers_if_requested(self): provider_remove_events = self._pending_remove_providers.items() self._pending_remove_providers = {} for provider, removed_event in provider_remove_events: consumer = self._consumers.pop(provider) _log.debug('cancelling consumer [%s]: %s', provider, consumer) consumer.cancel() removed_event.send() @property def connection(self): """ Provide the connection parameters for kombu's ConsumerMixin. The `Connection` object is a declaration of connection parameters that is lazily evaluated. It doesn't represent an established connection to the broker at this point. """ heartbeat = self.container.config.get( HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT ) ssl = self.container.config.get(AMQP_SSL_CONFIG_KEY) return Connection(self.amqp_uri, heartbeat=heartbeat, ssl=ssl) def handle_message(self, provider, body, message): ident = u"{}.handle_message[{}]".format( type(provider).__name__, message.delivery_info['routing_key'] ) self.container.spawn_managed_thread( partial(provider.handle_message, body, message), identifier=ident ) def get_consumers(self, consumer_cls, channel): """ Kombu callback to set up consumers. Called after any (re)connection to the broker. """ _log.debug('setting up consumers %s', self) for provider in self._providers: callbacks = [partial(self.handle_message, provider)] consumer = consumer_cls( queues=[provider.queue], callbacks=callbacks, accept=self.accept ) consumer.qos(prefetch_count=self.prefetch_count) self._consumers[provider] = consumer return self._consumers.values() def on_iteration(self): """ Kombu callback for each `drain_events` loop iteration.""" self._cancel_consumers_if_requested() if len(self._consumers) == 0: _log.debug('requesting stop after iteration') self.should_stop = True def on_connection_error(self, exc, interval): _log.warning( "Error connecting to broker at {} ({}).\n" "Retrying in {} seconds.".format(self.amqp_uri, exc, interval)) def on_consume_ready(self, connection, channel, consumers, **kwargs): """ Kombu callback when consumers are ready to accept messages. Called after any (re)connection to the broker. """ if not self._consumers_ready.ready(): _log.debug('consumer started %s', self) self._consumers_ready.send(None)
class ServiceContainer(object): def __init__(self, service_cls, worker_ctx_cls, config): self.service_cls = service_cls self.worker_ctx_cls = worker_ctx_cls self.service_name = get_service_name(service_cls) self.config = config self.max_workers = config.get(MAX_WORKERS_KEY) or DEFAULT_MAX_WORKERS self.dependencies = DependencySet() for dep in prepare_dependencies(self): self.dependencies.add(dep) self.started = False self._worker_pool = GreenPool(size=self.max_workers) self._active_threads = set() self._protected_threads = set() self._being_killed = False self._died = Event() @property def entrypoints(self): return filter(is_entrypoint_provider, self.dependencies) @property def injections(self): return filter(is_injection_provider, self.dependencies) def start(self): """ Start a container by starting all the dependency providers. """ _log.debug('starting %s', self) self.started = True with log_time(_log.debug, 'started %s in %0.3f sec', self): self.dependencies.all.prepare() self.dependencies.all.start() def stop(self): """ Stop the container gracefully. First all entrypoints are asked to ``stop()``. This ensures that no new worker threads are started. It is the providers' responsibility to gracefully shut down when ``stop()`` is called on them and only return when they have stopped. After all entrypoints have stopped the container waits for any active workers to complete. After all active workers have stopped the container stops all injections. At this point there should be no more managed threads. In case there are any managed threads, they are killed by the container. """ if self._died.ready(): _log.debug('already stopped %s', self) return _log.debug('stopping %s', self) with log_time(_log.debug, 'stopped %s in %0.3f sec', self): dependencies = self.dependencies # entrypoint deps have to be stopped before injection deps # to ensure that running workers can successfully complete dependencies.entrypoints.all.stop() # there might still be some running workers, which we have to # wait for to complete before we can stop injection dependencies self._worker_pool.waitall() # it should be safe now to stop any injection as there is no # active worker which could be using it dependencies.injections.all.stop() # finally, stop nested dependencies dependencies.nested.all.stop() # just in case there was a provider not taking care of its workers, # or a dependency not taking care of its protected threads self._kill_active_threads() self._kill_protected_threads() self.started = False self._died.send(None) def kill(self, exc): """ Kill the container in a semi-graceful way. All non-protected managed threads are killed first. This includes all active workers generated by :meth:`ServiceContainer.spawn_worker`. Next, dependencies are killed. Finally, any remaining protected threads are killed. The container dies with the given ``exc``. """ if self._being_killed: # this happens if a managed thread exits with an exception # while the container is being killed or another caller # behaves in a similar manner _log.debug('already killing %s ... waiting for death', self) self._died.wait() self._being_killed = True if self._died.ready(): _log.debug('already stopped %s', self) return _log.info('killing %s due to "%s"', self, exc) self.dependencies.entrypoints.all.kill(exc) self._kill_active_threads() self.dependencies.all.kill(exc) self._kill_protected_threads() self.started = False self._died.send_exception(exc) def wait(self): """ Block until the container has been stopped. If the container was stopped using ``kill(exc)``, ``wait()`` raises ``exc``. Any unhandled exception raised in a managed thread or in the life-cycle management code also causes the container to be ``kill()``ed, which causes an exception to be raised from ``wait()``. """ return self._died.wait() def spawn_worker(self, provider, args, kwargs, context_data=None, handle_result=None): """ Spawn a worker thread for running the service method decorated with an entrypoint ``provider``. ``args`` and ``kwargs`` are used as arguments for the service method. ``context_data`` is used to initialize a ``WorkerContext``. ``handle_result`` is an optional callback which may be passed in by the calling entrypoint provider. It is called with the result returned or error raised by the service method. """ service = self.service_cls() worker_ctx = self.worker_ctx_cls( self, service, provider.name, args, kwargs, data=context_data) _log.debug('spawning %s', worker_ctx, extra=worker_ctx.extra_for_logging) gt = self._worker_pool.spawn(self._run_worker, worker_ctx, handle_result) self._active_threads.add(gt) gt.link(self._handle_thread_exited) return worker_ctx def spawn_managed_thread(self, run_method, protected=False): """ Spawn a managed thread to run ``run_method``. Threads can be marked as ``protected``, which means the container will not forcibly kill them until after all dependencies have been killed. Dependencies that require a managed thread to complete their kill procedure should ensure to mark them as ``protected``. Any uncaught errors inside ``run_method`` cause the container to be killed. It is the caller's responsibility to terminate their spawned threads. Threads are killed automatically if they are still running after all dependencies are stopped during :meth:`ServiceContainer.stop`. Entrypoints may only create separate threads using this method, to ensure they are life-cycle managed. """ gt = eventlet.spawn(run_method) if not protected: self._active_threads.add(gt) else: self._protected_threads.add(gt) gt.link(self._handle_thread_exited) return gt def _run_worker(self, worker_ctx, handle_result): _log.debug('setting up %s', worker_ctx, extra=worker_ctx.extra_for_logging) if not worker_ctx.parent_call_stack: _log.debug('starting call chain', extra=worker_ctx.extra_for_logging) _log.debug('call stack for %s: %s', worker_ctx, '->'.join(worker_ctx.call_id_stack), extra=worker_ctx.extra_for_logging) with log_time(_log.debug, 'ran worker %s in %0.3fsec', worker_ctx): self.dependencies.injections.all.inject(worker_ctx) self.dependencies.all.worker_setup(worker_ctx) result = exc = None try: _log.debug('calling handler for %s', worker_ctx, extra=worker_ctx.extra_for_logging) method = getattr(worker_ctx.service, worker_ctx.method_name) with log_time(_log.debug, 'ran handler for %s in %0.3fsec', worker_ctx): result = method(*worker_ctx.args, **worker_ctx.kwargs) except Exception as e: log_worker_exception(worker_ctx, e) exc = e with log_time(_log.debug, 'tore down worker %s in %0.3fsec', worker_ctx): _log.debug('signalling result for %s', worker_ctx, extra=worker_ctx.extra_for_logging) self.dependencies.injections.all.worker_result( worker_ctx, result, exc) _log.debug('tearing down %s', worker_ctx, extra=worker_ctx.extra_for_logging) self.dependencies.all.worker_teardown(worker_ctx) self.dependencies.injections.all.release(worker_ctx) if handle_result is not None: _log.debug('handling result for %s', worker_ctx, extra=worker_ctx.extra_for_logging) with log_time(_log.debug, 'handled result for %s in %0.3fsec', worker_ctx): handle_result(worker_ctx, result, exc) def _kill_active_threads(self): """ Kill all managed threads that were not marked as "protected" when they were spawned. This set will include all worker threads generated by :meth:`ServiceContainer.spawn_worker`. See :meth:`ServiceContainer.spawn_managed_thread` """ num_active_threads = len(self._active_threads) if num_active_threads: _log.warning('killing %s active thread(s)', num_active_threads) for gt in list(self._active_threads): gt.kill() def _kill_protected_threads(self): """ Kill any managed threads marked as protected when they were spawned. See :meth:`ServiceContainer.spawn_managed_thread` """ num_protected_threads = len(self._protected_threads) if num_protected_threads: _log.warning('killing %s protected thread(s)', num_protected_threads) for gt in list(self._protected_threads): gt.kill() def _handle_thread_exited(self, gt): self._active_threads.discard(gt) self._protected_threads.discard(gt) try: gt.wait() except greenlet.GreenletExit: # we don't care much about threads killed by the container # this can happen in stop() and kill() if providers # don't properly take care of their threads _log.warning('%s thread killed by container', self) except Exception as exc: _log.error('%s thread exited with error', self, exc_info=True) # any error raised inside an active thread is unexpected behavior # and probably a bug in the providers or container # to be safe we kill the container self.kill(exc) def __str__(self): return '<ServiceContainer [{}] at 0x{:x}>'.format( self.service_name, id(self))
def send_exception(self, *throw_args): if self.ready(): self.reset() return BaseEvent.send_exception(self, *throw_args)
class BaseAMQPConsumer(ConsumerMixin, ControlExtension): def __init__(self, *args, **kwargs): self.gt = None self.started = False self.connection = None self.consumers_ready = Event() self.consumers_channels = set() super(BaseAMQPConsumer, self).__init__(*args, **kwargs) # Extension def _link_manage_results(self, gt): def exc_func(exc_info): exc_type, exc_value, exc_trace = exc_info self.consumers_ready.send_exception(exc_value) ignore_exception(gt.wait, exc_func=exc_func)() def setup(self): self.connection = AMQPConnect(self.container.config).instance def start(self): if self.started: return self.started = True self.gt = self.container.spawn_manage_thread(self.run) self.gt.link(self._link_manage_results) try: self.consumers_ready.wait() except Exception as e: msg = 'amqp consumers failed to start, {}'.format(e.message) logger.error(msg) else: msg = 'amqp consumers ready.' logger.debug(msg) return def stop(self): self.should_stop = True self.started = False self.gt.kill() def kill(self): self.should_stop = True self.started = False self.gt.kill() # ConsumerMixin def get_consumers(self, consumer_cls, channel): raise NotImplementedError def create_connection(self): return super(BaseAMQPConsumer, self).create_connection() def on_message(self, extension, body, message): self.container.spawn_manage_thread(extension.handle_message, args=(body, message)) def on_connection_revived(self): return super(BaseAMQPConsumer, self).on_connection_revived() def on_consume_ready(self, connection, channel, consumers, **kwargs): not self.consumers_ready.ready() and self.consumers_ready.send(None) return super(BaseAMQPConsumer, self).on_consume_ready(connection, channel, consumers, **kwargs) def on_consume_end(self, connection, channel): return super(BaseAMQPConsumer, self).on_consume_end(connection, channel) def on_iteration(self): return super(BaseAMQPConsumer, self).on_iteration() def on_decode_error(self, message, exc): return super(BaseAMQPConsumer, self).on_decode_error(message, exc) def on_connection_error(self, exc, interval): return super(BaseAMQPConsumer, self).on_connection_error(exc, interval)