def _get(self, queue, timeout=None): """ Get the first available message from the queue. Before it does so it acquires a lock on the store so only one node reads at the same time. This is for read consistency Arguments: queue (str): The name of the queue. timeout (int): Optional seconds to wait for a response. """ with self._queue_lock(queue): key = self._key_prefix(queue) logger.debug('Fetching key %s with index %s', key, self.index) try: result = self.client.read(key=key, recursive=True, index=self.index, timeout=self.timeout) if result is None: raise Empty() item = result._children[-1] logger.debug('Removing key {0}'.format(item['key'])) msg_content = loads(item['value']) self.client.delete(key=item['key']) return msg_content except (TypeError, IndexError, etcd.EtcdError) as error: logger.debug('_get failed: {0}:{1}'.format(type(error), error)) raise Empty()
def _receive(self): c = self.subclient response = None try: response = c.parse_response() except self.connection_errors: self._in_listen = False raise Empty() if response is not None: payload = self._handle_message(c, response) if bytes_to_str(payload["type"]).endswith("message"): channel = bytes_to_str(payload["channel"]) if payload["data"]: if channel[0] == "/": _, _, channel = channel.partition(".") try: message = loads(bytes_to_str(payload["data"])) except (TypeError, ValueError): warn( "Cannot process event on channel %r: %s", channel, repr(payload)[:4096], exc_info=1, ) raise Empty() exchange = channel.split("/", 1)[0] return message, self._fanout_to_queue[exchange] raise Empty()
def _get(self, queue, timeout=None): """Get the first available message from the queue. Before it does so it acquires a lock on the Key/Value store so only one node reads at the same time. This is for read consistency """ with self._queue_lock(queue, raising=Empty): key = '{0}/msg/'.format(self._key_prefix(queue)) logger.debug('Fetching key %s with index %s', key, self.index) self.index, data = self.client.kv.get( key=key, recurse=True, index=self.index, wait=self.timeout, ) try: if data is None: raise Empty() logger.debug('Removing key %s with modifyindex %s', data[0]['Key'], data[0]['ModifyIndex']) self.client.kv.delete(key=data[0]['Key'], cas=data[0]['ModifyIndex']) return loads(data[0]['Value']) except TypeError: pass raise Empty()
def _receive(self): c = self.subclient response = None try: response = c.parse_response() except self.connection_errors: self._in_listen = False raise if response is not None: payload = self._handle_message(c, response) if bytes_to_str(payload['type']).endswith('message'): channel = bytes_to_str(payload['channel']) if payload['data']: if channel[0] == '/': _, _, channel = channel.partition('.') try: message = loads(bytes_to_str(payload['data'])) except (TypeError, ValueError): warn('Cannot process event on channel %r: %s', channel, repr(payload)[:4096], exc_info=1) raise Empty() exchange = channel.split('/', 1)[0] return message, self._fanout_to_queue[exchange] raise Empty()
def _get(self, queue): try: if queue in self._fanout_queues: msg = next(self._queue_cursors[queue]) self._queue_readcounts[queue] += 1 return loads(msg['payload']) else: msg = self.client.command( 'findandmodify', 'messages', query={'queue': queue}, sort={'_id': pymongo.ASCENDING}, remove=True, ) except errors.OperationFailure as exc: if 'No matching object found' in exc.args[0]: raise Empty() raise except StopIteration: raise Empty() # as of mongo 2.0 empty results won't raise an error if msg['value'] is None: raise Empty() return loads(msg['value']['payload'])
def drain_events(self, timeout=None): """Return a single payload message from one of our queues. Raises: Queue.Empty: if no messages available. """ # If we're not allowed to consume or have no consumers, raise Empty if not self._consumers or not self.qos.can_consume(): raise Empty() message_cache = self._queue_message_cache # Check if there are any items in our buffer. If there are any, pop # off that queue first. try: return message_cache.popleft() except IndexError: pass # At this point, go and get more messages from SQS res, queue = self._poll(self.cycle, timeout=timeout) message_cache.extend((r, queue) for r in res) # Now try to pop off the queue again. try: return message_cache.popleft() except IndexError: raise Empty()
def _brpop_read(self, **options): client = self.client try: conn = options.pop('conn') try: resp = client.parse_response(conn, 'BRPOP', **options) except self.connection_errors: conn.disconnect() raise Empty() except MovedError as err: # copied from rediscluster/client.py client.refresh_table_asap = True client.connection_pool.nodes.increment_reinitialize_counter() node = client.connection_pool.nodes.set_node( err.host, err.port, server_type='master') client.connection_pool.nodes.slots[err.slot_id][0] = node raise Empty() if resp: dest, item = resp dest = bytes_to_str(dest).rsplit(self.sep, 1)[0] self._queue_cycle.rotate(dest) self.connection._deliver(loads(bytes_to_str(item)), dest) return True finally: self._in_poll = False
def _get(self, queue): result = self._query(queue, limit=1) if not result: raise Empty() try: item = result[0]['value'] except LookupError: raise Empty() self.client.delete(item['_id']) return loads(bytes_to_str(item['payload']))
def _get(self, queue): """Try to retrieve a single message off ``queue``.""" messages = self._get_from_sqs(queue, count=1) if messages: return self._messages_to_python(messages, queue)[0] raise Empty()
def _get_bulk(self, queue, max_if_unlimited=SQS_MAX_MESSAGES): """Try to retrieve multiple messages off ``queue``. Where _get() returns a single Payload object, this method returns a list of Payload objects. The number of objects returned is determined by the total number of messages available in the queue and the number of messages that the QoS object allows (based on the prefetch_count). .. note:: Ignores QoS limits so caller is responsible for checking that we are allowed to consume at least one message from the queue. get_bulk will then ask QoS for an estimate of the number of extra messages that we can consume. args: queue: The queue name (string) to pull from returns: payloads: A list of payload objects returned """ # drain_events calls `can_consume` first, consuming # a token, so we know that we are allowed to consume at least # one message. maxcount = self.qos.can_consume_max_estimate() maxcount = max_if_unlimited if maxcount is None else max(maxcount, 1) if maxcount: messages = self._get_from_sqs( queue, count=min(maxcount, SQS_MAX_MESSAGES), ) if messages: return self._messages_to_python(messages, queue) raise Empty()
def _get(self, queue): """Try to retrieve a single message off ``queue``.""" q = self._new_queue(queue) messages = q.get_messages(num_messages=1) if messages: return self._messages_to_python(messages, queue)[0] raise Empty()
def _get(self, queue): with self.conn_or_acquire() as client: for pri in PRIORITY_STEPS: item = client.rpop(self._q_for_pri(queue, pri)) if item: return loads(item) raise Empty()
def _get(self, queue): with self.conn_or_acquire() as client: for pri in self.priority_steps: item = client.rpop(self._q_for_pri(queue, pri)) if item: return loads(bytes_to_str(item)) raise Empty()
class Channel(virtual.Channel): _client = None supports_fanout = True _fanout_queues = {} def __init__(self, *vargs, **kwargs): super_ = super(Channel, self) super_.__init__(*vargs, **kwargs) self._queue_cursors = {} self._queue_readcounts = {} def _new_queue(self, queue, **kwargs): pass def _get(self, queue): try: if queue in self._fanout_queues: msg = self._queue_cursors[queue].next() self._queue_readcounts[queue] += 1 return loads(msg['payload']) else: msg = self.client.command('findandmodify', 'messages', query={'queue': queue}, sort={'_id': pymongo.ASCENDING}, remove=True) except errors.OperationFailure, exc: if 'No matching object found' in exc.args[0]: raise Empty() raise except StopIteration: raise Empty()
def drain_events(self, timeout=None, callback=None): callback = callback or self.connection._deliver if self._consumers and self.qos.can_consume(): if hasattr(self, '_get_many'): return self._get_many(self._active_queues, timeout=timeout) return self._poll(self.cycle, callback, timeout=timeout) raise Empty()
def _get_bulk(self, queue, max_if_unlimited=SQS_MAX_MESSAGES, callback=None): """Try to retrieve multiple messages off ``queue``. Where :meth:`_get` returns a single Payload object, this method returns a list of Payload objects. The number of objects returned is determined by the total number of messages available in the queue and the number of messages the QoS object allows (based on the prefetch_count). Note: Ignores QoS limits so caller is responsible for checking that we are allowed to consume at least one message from the queue. get_bulk will then ask QoS for an estimate of the number of extra messages that we can consume. Arguments: queue (str): The queue name to pull from. Returns: List[Message] """ # drain_events calls `can_consume` first, consuming # a token, so we know that we are allowed to consume at least # one message. maxcount = self._get_message_estimate() if maxcount: q = self._new_queue(queue) messages = q.get_messages(num_messages=maxcount) if messages: return self._messages_to_python(messages, queue) raise Empty()
def get(self, callback, timeout=None): self._in_protected_read = True try: for channel in self._channels: if channel.active_queues: # BRPOP mode? if channel.qos.can_consume(): self._register_BRPOP(channel) if channel.active_fanout_queues: # LISTEN mode? self._register_LISTEN(channel) events = self.poller.poll(timeout) if events: for fileno, event in events: ret = self.handle_event(fileno, event) if ret: return # - no new data, so try to restore messages. # - reset active redis commands. self.maybe_restore_messages() raise Empty() finally: self._in_protected_read = False while self.after_read: try: fun = self.after_read.pop() except KeyError: break else: fun()
def _get(self, queue, manager='objects', **kwargs): message = getattr(Message, manager).pop(queue) if message: payload = loads(bytes_to_str(message.payload)) payload['properties']['delivery_tag'] = message.pk return payload raise Empty()
def get(self, timeout=None): self.on_poll_start() events = self.poller.poll(timeout) for fileno, event in events or []: return self.handle_event(fileno, event) raise Empty()
def _get(self, queue): queue = self._get_queue(queue) msg = queue.get() if msg is None: raise Empty() return loads(bytes_to_str(msg))
def _get(self, queue): result = self._query(queue, limit=1) if not result: raise Empty() item = result.rows[0].value self.client.delete(item) return loads(item['payload'])
def _get(self, queue, timeout=None): try: return loads(self.client.get(queue, timeout)) except socket.error as exc: if exc.errno == errno.EAGAIN and timeout != 0: raise Empty() else: raise
def get(self, timeout=None): self.on_poll_start() events = self.poller.poll(timeout) for fileno, _ in events or []: return self.on_readable(fileno) raise Empty()
def parse_response(self, connection, type, **options): cmd, queues = self.connection._sock.data.pop() assert cmd == type self.connection._sock.data = [] if type == 'BRPOP': item = self.brpop(queues, 0.001) if item: return item raise Empty()
def get_nowait(self): n = len(self.inqueues) for i in xrange(n): i = (i + self.lastq) % n m = self.inqueues[i].get(no_ack=self.no_ack) if m is not None: self.lastq = i + 1 return m raise Empty()
def _get(self, queue, timeout=None): """Try to retrieve a single message off ``queue``.""" message = self.queue_service.receive_queue_message( self.entity_name(queue), timeout=timeout, peek_lock=False) if message.body is None: raise Empty() return loads(bytes_to_str(message.body))
def _get(self, queue): """Try to retrieve a single message off ``queue``.""" q_url = self._new_queue(queue) resp = self.sqs.receive_message(q_url) if resp['Messages']: body = AsyncMessage(body=resp['Messages'][0]['Body']).decode() resp['Messages'][0]['Body'] = body return self._messages_to_python(resp['Messages'], queue)[0] raise Empty()
def _brpop_read(self, **options): try: try: dest__item = self.client.parse_response( self.client.connection, "BRPOP", **options) except self.connection_errors: # if there's a ConnectionError, disconnect so the next # iteration will reconnect automatically. self.client.connection.disconnect() raise Empty() if dest__item: dest, item = dest__item dest = bytes_to_str(dest).rsplit(self.sep, 1)[0] self._rotate_cycle(dest) return loads(bytes_to_str(item)), dest else: raise Empty() finally: self._in_poll = False
def _get(self, queue): """Try to retrieve a single message off ``queue``.""" q_url = self._new_queue(queue) resp = self.sqs.receive_message(QueueUrl=q_url, MaxNumberOfMessages=1, WaitTimeSeconds=self.wait_time_seconds) if resp.get('Messages'): body = AsyncMessage(body=resp['Messages'][0]['Body']).decode() resp['Messages'][0]['Body'] = body return self._messages_to_python(resp['Messages'], queue)[0] raise Empty()
def parse_response(self, connection, type, **options): cmd, queues = self.connection._sock.data.pop() queues = list(queues) assert cmd == type self.connection._sock.data = [] if type == 'BRPOP': timeout = queues.pop() item = self.brpop(queues, timeout) if item: return item raise Empty()