def post(self, queue, messages, client_uuid, project=None): msgset_key = utils.msgset_key(queue, project) counter_key = utils.scope_queue_index(queue, project, MESSAGE_RANK_COUNTER_SUFFIX) message_ids = [] now = timeutils.utcnow_ts() with self._client.pipeline() as pipe: for msg in messages: prepared_msg = Message( ttl=msg['ttl'], created=now, client_uuid=client_uuid, claim_id=None, claim_expires=now, claim_count=0, delay_expires=now + msg.get('delay', 0), body=msg.get('body', {}), checksum=s_utils.get_checksum(msg.get('body', None)) if self.driver.conf.enable_checksum else None ) prepared_msg.to_redis(pipe) message_ids.append(prepared_msg.id) pipe.execute() # NOTE(kgriffs): If this call fails, we will return # an error to the client and the messages will be # orphaned, but Redis will remove them when they # expire, so we will just pretend they don't exist # in that case. self._index_messages(msgset_key, counter_key, message_ids) return message_ids
def post(self, queue, messages, client_uuid, project=None): msgset_key = utils.msgset_key(queue, project) counter_key = utils.scope_queue_index(queue, project, MESSAGE_RANK_COUNTER_SUFFIX) message_ids = [] now = timeutils.utcnow_ts() with self._client.pipeline() as pipe: for msg in messages: prepared_msg = Message( ttl=msg['ttl'], created=now, client_uuid=client_uuid, claim_id=None, claim_expires=now, body=msg.get('body', {}), ) prepared_msg.to_redis(pipe) message_ids.append(prepared_msg.id) pipe.execute() # NOTE(kgriffs): If this call fails, we will return # an error to the client and the messages will be # orphaned, but Redis will remove them when they # expire, so we will just pretend they don't exist # in that case. self._index_messages(msgset_key, counter_key, message_ids) return message_ids
def bulk_delete(self, queue, message_ids, project=None, claim_ids=None): claim_ctrl = self.driver.claim_controller if not self._queue_ctrl.exists(queue, project): return msgset_key = utils.msgset_key(queue, project) with self._client.pipeline() as pipe: for mid in message_ids: if not self._exists(mid): continue pipe.delete(mid) pipe.zrem(msgset_key, mid) msg_claim = self._get_claim(mid) if claim_ids and msg_claim is None: raise errors.MessageNotClaimed(mid) if msg_claim is not None: if claim_ids and (msg_claim['id'] not in claim_ids): raise errors.ClaimDoesNotMatch(msg_claim['id'], queue, project) claim_ctrl._del_message(queue, project, msg_claim['id'], mid, pipe) pipe.execute()
def post(self, queue, messages, client_uuid, project=None): if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) msgset_key = utils.msgset_key(queue, project) counter_key = utils.scope_queue_index(queue, project, MESSAGE_RANK_COUNTER_SUFFIX) message_ids = [] now = timeutils.utcnow_ts() with self._client.pipeline() as pipe: for msg in messages: prepared_msg = Message( ttl=msg['ttl'], created=now, client_uuid=client_uuid, claim_id=None, claim_expires=now, body=msg.get('body', {}), ) prepared_msg.to_redis(pipe) message_ids.append(prepared_msg.id) pipe.execute() # NOTE(kgriffs): If this call fails, we will return # an error to the client and the messages will be # orphaned, but Redis will remove them when they # expire, so we will just pretend they don't exist # in that case. self._index_messages(msgset_key, counter_key, message_ids) return message_ids
def _find_first_unclaimed(self, queue, project, limit): """Find the first unclaimed message in the queue.""" msgset_key = utils.msgset_key(queue, project) now = timeutils.utcnow_ts() # TODO(kgriffs): Generalize this paging pattern (DRY) offset = 0 while True: msg_keys = self._client.zrange(msgset_key, offset, offset + limit - 1) if not msg_keys: return None offset += len(msg_keys) messages = [ MessageEnvelope.from_redis(msg_key, self._client) for msg_key in msg_keys ] for msg in messages: if msg and not utils.msg_claimed_filter(msg, now): return msg.id
def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): claim_ttl = metadata['ttl'] grace = metadata['grace'] now = timeutils.utcnow_ts() msg_ttl = claim_ttl + grace claim_expires = now + claim_ttl msg_expires = claim_expires + grace claim_id = uuidutils.generate_uuid() claimed_msgs = [] # NOTE(kgriffs): Claim some messages msgset_key = utils.msgset_key(queue, project) claimed_ids = self._claim_messages(msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires) if claimed_ids: claimed_msgs = messages.Message.from_redis_bulk(claimed_ids, self._client) claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] # NOTE(kgriffs): Perist claim records with self._client.pipeline() as pipe: claim_msgs_key = utils.scope_claim_messages( claim_id, CLAIM_MESSAGES_SUFFIX) for mid in claimed_ids: pipe.rpush(claim_msgs_key, mid) pipe.expire(claim_msgs_key, claim_ttl) claim_info = { 'id': claim_id, 't': claim_ttl, 'e': claim_expires, 'n': len(claimed_ids), } pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) # NOTE(kgriffs): Add the claim ID to a set so that # existence checks can be performed quickly. This # is also used as a watch key in order to gaurd # against race conditions. # # A sorted set is used to facilitate cleaning # up the IDs of expired claims. claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, claim_expires, claim_id) pipe.execute() return claim_id, claimed_msgs
def _count(self, queue, project): """Return total number of messages in a queue. Note: Some expired messages may be included in the count if they haven't been GC'd yet. This is done for performance. """ return self._client.zcard(utils.msgset_key(queue, project))
def _list(self, queue, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False, to_basic=True): if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) msgset_key = utils.msgset_key(queue, project) client = self._client if not marker and not include_claimed: # NOTE(kgriffs): Skip claimed messages at the head # of the queue; otherwise we would just filter them all # out and likely end up with an empty list to return. marker = self._find_first_unclaimed(queue, project, limit) start = client.zrank(msgset_key, marker) or 0 else: rank = client.zrank(msgset_key, marker) start = rank + 1 if rank else 0 message_ids = client.zrange(msgset_key, start, start + (limit - 1)) messages = Message.from_redis_bulk(message_ids, client) # NOTE(prashanthr_): Build a list of filters for checking # the following: # # 1. Message is expired # 2. Message is claimed # 3. Message should not be echoed # now = timeutils.utcnow_ts() filters = [functools.partial(utils.msg_expired_filter, now=now)] if not include_claimed: filters.append(functools.partial(utils.msg_claimed_filter, now=now)) if not include_delayed: filters.append(functools.partial(utils.msg_delayed_filter, now=now)) if not echo: filters.append( functools.partial(utils.msg_echo_filter, client_uuid=client_uuid)) marker = {} yield _filter_messages(messages, filters, to_basic, marker) yield marker['next']
def _list(self, queue, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False, to_basic=True): if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) msgset_key = utils.msgset_key(queue, project) client = self._client if not marker and not include_claimed: # NOTE(kgriffs): Skip claimed messages at the head # of the queue; otherwise we would just filter them all # out and likely end up with an empty list to return. marker = self._find_first_unclaimed(queue, project, limit) if marker: start = client.zrank(msgset_key, marker) or 0 else: start = 0 else: rank = client.zrank(msgset_key, marker) start = rank + 1 if rank else 0 message_ids = client.zrange(msgset_key, start, start + (limit - 1)) messages = Message.from_redis_bulk(message_ids, client) # NOTE(prashanthr_): Build a list of filters for checking # the following: # # 1. Message is expired # 2. Message is claimed # 3. Message should not be echoed # now = timeutils.utcnow_ts() filters = [functools.partial(utils.msg_expired_filter, now=now)] if not include_claimed: filters.append(functools.partial(utils.msg_claimed_filter, now=now)) if not include_delayed: filters.append(functools.partial(utils.msg_delayed_filter, now=now)) if not echo: filters.append(functools.partial(utils.msg_echo_filter, client_uuid=client_uuid)) marker = {} yield _filter_messages(messages, filters, to_basic, marker) yield marker['next']
def _get_first_message_id(self, queue, project, sort): """Fetch head/tail of the Queue. Helper function to get the first message in the queue sort > 0 get from the left else from the right. """ msgset_key = utils.msgset_key(queue, project) zrange = self._client.zrange if sort == 1 else self._client.zrevrange message_ids = zrange(msgset_key, 0, 0) return message_ids[0] if message_ids else None
def _delete_queue_messages(self, queue, project, pipe): """Method to remove all the messages belonging to a queue. Will be referenced from the QueueController. The pipe to execute deletion will be passed from the QueueController executing the operation. """ client = self._client msgset_key = utils.msgset_key(queue, project) message_ids = client.zrange(msgset_key, 0, -1) pipe.delete(msgset_key) for msg_id in message_ids: pipe.delete(msg_id)
def delete(self, queue, message_id, project=None, claim=None): if not self._queue_ctrl.exists(queue, project): return # NOTE(kgriffs): The message does not exist, so # it is essentially "already" deleted. if not self._exists(message_id): return # TODO(kgriffs): Create decorator for validating claim and message # IDs, since those are not checked at the transport layer. This # decorator should be applied to all relevant methods. if claim is not None: try: uuid.UUID(claim) except ValueError: raise errors.ClaimDoesNotExist(claim, queue, project) msg_claim = self._get_claim(message_id) is_claimed = (msg_claim is not None) # Authorize the request based on having the correct claim ID if claim is None: if is_claimed: raise errors.MessageIsClaimed(message_id) elif not is_claimed: raise errors.MessageNotClaimed(message_id) elif msg_claim['id'] != claim: if not self._claim_ctrl._exists(queue, claim, project): raise errors.ClaimDoesNotExist(claim, queue, project) raise errors.MessageNotClaimedBy(message_id, claim) msgset_key = utils.msgset_key(queue, project) with self._client.pipeline() as pipe: pipe.delete(message_id) pipe.zrem(msgset_key, message_id) if is_claimed: self._claim_ctrl._del_message(queue, project, msg_claim['id'], message_id, pipe) pipe.execute()
def delete(self, queue, message_id, project=None, claim=None): claim_ctrl = self.driver.claim_controller if not self._queue_ctrl.exists(queue, project): return # NOTE(kgriffs): The message does not exist, so # it is essentially "already" deleted. if not self._exists(message_id): return # TODO(kgriffs): Create decorator for validating claim and message # IDs, since those are not checked at the transport layer. This # decorator should be applied to all relevant methods. if claim is not None: try: uuid.UUID(claim) except ValueError: raise errors.ClaimDoesNotExist(claim, queue, project) msg_claim = self._get_claim(message_id) is_claimed = (msg_claim is not None) # Authorize the request based on having the correct claim ID if claim is None: if is_claimed: raise errors.MessageIsClaimed(message_id) elif not is_claimed: raise errors.MessageNotClaimed(message_id) elif msg_claim['id'] != claim: if not claim_ctrl._exists(queue, claim, project): raise errors.ClaimDoesNotExist(claim, queue, project) raise errors.MessageNotClaimedBy(message_id, claim) msgset_key = utils.msgset_key(queue, project) with self._client.pipeline() as pipe: pipe.delete(message_id) pipe.zrem(msgset_key, message_id) if is_claimed: claim_ctrl._del_message(queue, project, msg_claim['id'], message_id, pipe) pipe.execute()
def bulk_delete(self, queue, message_ids, project=None): if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) msgset_key = utils.msgset_key(queue, project) with self._client.pipeline() as pipe: for mid in message_ids: if not self._exists(mid): continue pipe.delete(mid) pipe.zrem(msgset_key, mid) msg_claim = self._get_claim(mid) if msg_claim is not None: self._claim_ctrl._del_message(queue, project, msg_claim['id'], mid, pipe) pipe.execute()
def bulk_delete(self, queue, message_ids, project=None): claim_ctrl = self.driver.claim_controller if not self._queue_ctrl.exists(queue, project): return msgset_key = utils.msgset_key(queue, project) with self._client.pipeline() as pipe: for mid in message_ids: if not self._exists(mid): continue pipe.delete(mid) pipe.zrem(msgset_key, mid) msg_claim = self._get_claim(mid) if msg_claim is not None: claim_ctrl._del_message(queue, project, msg_claim['id'], mid, pipe) pipe.execute()
def bulk_delete(self, queue, message_ids, project=None): if not self._queue_ctrl.exists(queue, project): return msgset_key = utils.msgset_key(queue, project) with self._client.pipeline() as pipe: for mid in message_ids: if not self._exists(mid): continue pipe.delete(mid) pipe.zrem(msgset_key, mid) msg_claim = self._get_claim(mid) if msg_claim is not None: self._claim_ctrl._del_message(queue, project, msg_claim['id'], mid, pipe) pipe.execute()
def _find_first_unclaimed(self, queue, project, limit): """Find the first unclaimed message in the queue.""" msgset_key = utils.msgset_key(queue, project) now = timeutils.utcnow_ts() # TODO(kgriffs): Generalize this paging pattern (DRY) offset = 0 while True: msg_keys = self._client.zrange(msgset_key, offset, offset + limit - 1) if not msg_keys: return None offset += len(msg_keys) messages = [MessageEnvelope.from_redis(msg_key, self._client) for msg_key in msg_keys] for msg in messages: if msg and not utils.msg_claimed_filter(msg, now): return msg.id
def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): queue_ctrl = self.driver.queue_controller msg_ctrl = self.driver.message_controller claim_ttl = metadata['ttl'] grace = metadata['grace'] now = timeutils.utcnow_ts() msg_ttl = claim_ttl + grace claim_expires = now + claim_ttl msg_expires = claim_expires + grace # Get the maxClaimCount and deadLetterQueue from current queue's meta queue_meta = queue_ctrl.get(queue, project=project) claim_id = uuidutils.generate_uuid() claimed_msgs = [] # NOTE(kgriffs): Claim some messages msgset_key = utils.msgset_key(queue, project) claimed_ids = self._claim_messages(msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires) if claimed_ids: claimed_msgs = messages.Message.from_redis_bulk( claimed_ids, self._client) claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] # NOTE(kgriffs): Perist claim records with self._client.pipeline() as pipe: claim_msgs_key = utils.scope_claim_messages( claim_id, CLAIM_MESSAGES_SUFFIX) for mid in claimed_ids: pipe.rpush(claim_msgs_key, mid) pipe.expire(claim_msgs_key, claim_ttl) claim_info = { 'id': claim_id, 't': claim_ttl, 'e': claim_expires, 'n': len(claimed_ids), } pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) # NOTE(kgriffs): Add the claim ID to a set so that # existence checks can be performed quickly. This # is also used as a watch key in order to guard # against race conditions. # # A sorted set is used to facilitate cleaning # up the IDs of expired claims. claims_set_key = utils.scope_claims_set( queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, claim_expires, claim_id) pipe.execute() if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta): claimed_msgs_removed = [] for msg in claimed_msgs: if msg: claimed_count = msg['claim_count'] if claimed_count < queue_meta['_max_claim_count']: # 1. Save the new max claim count for message claim_count = claimed_count + 1 dic = {"c.c": claim_count} pipe.hmset(msg['id'], dic) pipe.execute() else: # 2. Check if the message's claim count has # exceeded the max claim count defined in the # queue, if so, move the message to the dead # letter queue and modify it's ttl. # NOTE(gengchc): We're moving message by # moving the message id from queue to dead # letter queue directly.That means, the queue # and dead letter queue must be created on # the same pool. ddl = utils.scope_queue_name( queue_meta['_dead_letter_queue'], project) ddl_ttl = queue_meta.get( "_dead_letter_queue_messages_ttl") dic = {"t": msg['ttl']} if ddl_ttl: dic = {"t": ddl_ttl} pipe.hmset(msg['id'], dic) queueproject = [s for s in ddl.split('.')] msgs_key_ddl = utils.msgset_key( queueproject[1], queueproject[0]) counter_key_ddl = utils.scope_queue_index( queueproject[1], queueproject[0], MESSAGE_RANK_COUNTER_SUFFIX) msgs_key = utils.msgset_key(queue, project=project) pipe.zrem(msgs_key, msg['id']) message_ids = [] message_ids.append(msg['id']) msg_ctrl._index_messages( msgs_key_ddl, counter_key_ddl, message_ids) pipe.execute() # Add dead letter message to # claimed_msgs_removed, finally remove # them from claimed_msgs. claimed_msgs_removed.append(msg) # Remove dead letter messages from claimed_msgs. for msg_remove in claimed_msgs_removed: claimed_msgs.remove(msg_remove) if len(claimed_msgs) == 0: return None, iter([]) return claim_id, claimed_msgs
def _create_msgset(self, queue, project, pipe): pipe.zadd(MSGSET_INDEX_KEY, 1, utils.msgset_key(queue, project))
def _delete_msgset(self, queue, project, pipe): pipe.zrem(MSGSET_INDEX_KEY, utils.msgset_key(queue, project))
def _create_msgset(self, queue, project, pipe): pipe.zadd(MSGSET_INDEX_KEY, {utils.msgset_key(queue, project): 1})
def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): queue_ctrl = self.driver.queue_controller msg_ctrl = self.driver.message_controller claim_ttl = metadata['ttl'] grace = metadata['grace'] now = timeutils.utcnow_ts() msg_ttl = claim_ttl + grace claim_expires = now + claim_ttl msg_expires = claim_expires + grace # Get the maxClaimCount and deadLetterQueue from current queue's meta queue_meta = queue_ctrl.get(queue, project=project) claim_id = uuidutils.generate_uuid() claimed_msgs = [] # NOTE(kgriffs): Claim some messages msgset_key = utils.msgset_key(queue, project) claimed_ids = self._claim_messages(msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires) if claimed_ids: claimed_msgs = messages.Message.from_redis_bulk(claimed_ids, self._client) claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] # NOTE(kgriffs): Perist claim records with self._client.pipeline() as pipe: claim_msgs_key = utils.scope_claim_messages( claim_id, CLAIM_MESSAGES_SUFFIX) for mid in claimed_ids: pipe.rpush(claim_msgs_key, mid) pipe.expire(claim_msgs_key, claim_ttl) claim_info = { 'id': claim_id, 't': claim_ttl, 'e': claim_expires, 'n': len(claimed_ids), } pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) # NOTE(kgriffs): Add the claim ID to a set so that # existence checks can be performed quickly. This # is also used as a watch key in order to guard # against race conditions. # # A sorted set is used to facilitate cleaning # up the IDs of expired claims. claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, {claim_id: claim_expires}) pipe.execute() if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta): claimed_msgs_removed = [] for msg in claimed_msgs: if msg: claimed_count = msg['claim_count'] if claimed_count < queue_meta['_max_claim_count']: # 1. Save the new max claim count for message claim_count = claimed_count + 1 dic = {"c.c": claim_count} pipe.hmset(msg['id'], dic) pipe.execute() else: # 2. Check if the message's claim count has # exceeded the max claim count defined in the # queue, if so, move the message to the dead # letter queue and modify it's ttl. # NOTE(gengchc): We're moving message by # moving the message id from queue to dead # letter queue directly.That means, the queue # and dead letter queue must be created on # the same pool. ddl = utils.scope_queue_name( queue_meta['_dead_letter_queue'], project) ddl_ttl = queue_meta.get( "_dead_letter_queue_messages_ttl") dic = {"t": msg['ttl']} if ddl_ttl: dic = {"t": ddl_ttl} pipe.hmset(msg['id'], dic) queueproject = [s for s in ddl.split('.')] msgs_key_ddl = utils.msgset_key( queueproject[1], queueproject[0]) counter_key_ddl = utils.scope_queue_index( queueproject[1], queueproject[0], MESSAGE_RANK_COUNTER_SUFFIX) msgs_key = utils.msgset_key( queue, project=project) pipe.zrem(msgs_key, msg['id']) message_ids = [] message_ids.append(msg['id']) msg_ctrl._index_messages(msgs_key_ddl, counter_key_ddl, message_ids) pipe.execute() # Add dead letter message to # claimed_msgs_removed, finally remove # them from claimed_msgs. claimed_msgs_removed.append(msg) # Remove dead letter messages from claimed_msgs. for msg_remove in claimed_msgs_removed: claimed_msgs.remove(msg_remove) if len(claimed_msgs) == 0: return None, iter([]) return claim_id, claimed_msgs