def post(self, queue, messages, client_uuid, project=None): msgset_key = utils.msgset_key(queue, project) counter_key = utils.scope_queue_index(queue, project, MESSAGE_RANK_COUNTER_SUFFIX) message_ids = [] now = timeutils.utcnow_ts() with self._client.pipeline() as pipe: for msg in messages: prepared_msg = Message( ttl=msg['ttl'], created=now, client_uuid=client_uuid, claim_id=None, claim_expires=now, claim_count=0, delay_expires=now + msg.get('delay', 0), body=msg.get('body', {}), checksum=s_utils.get_checksum(msg.get('body', None)) if self.driver.conf.enable_checksum else None ) prepared_msg.to_redis(pipe) message_ids.append(prepared_msg.id) pipe.execute() # NOTE(kgriffs): If this call fails, we will return # an error to the client and the messages will be # orphaned, but Redis will remove them when they # expire, so we will just pretend they don't exist # in that case. self._index_messages(msgset_key, counter_key, message_ids) return message_ids
def post(self, queue, messages, client_uuid, project=None): msgset_key = utils.msgset_key(queue, project) counter_key = utils.scope_queue_index(queue, project, MESSAGE_RANK_COUNTER_SUFFIX) message_ids = [] now = timeutils.utcnow_ts() with self._client.pipeline() as pipe: for msg in messages: prepared_msg = Message( ttl=msg['ttl'], created=now, client_uuid=client_uuid, claim_id=None, claim_expires=now, body=msg.get('body', {}), ) prepared_msg.to_redis(pipe) message_ids.append(prepared_msg.id) pipe.execute() # NOTE(kgriffs): If this call fails, we will return # an error to the client and the messages will be # orphaned, but Redis will remove them when they # expire, so we will just pretend they don't exist # in that case. self._index_messages(msgset_key, counter_key, message_ids) return message_ids
def post(self, queue, messages, client_uuid, project=None): if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) msgset_key = utils.msgset_key(queue, project) counter_key = utils.scope_queue_index(queue, project, MESSAGE_RANK_COUNTER_SUFFIX) message_ids = [] now = timeutils.utcnow_ts() with self._client.pipeline() as pipe: for msg in messages: prepared_msg = Message( ttl=msg['ttl'], created=now, client_uuid=client_uuid, claim_id=None, claim_expires=now, body=msg.get('body', {}), ) prepared_msg.to_redis(pipe) message_ids.append(prepared_msg.id) pipe.execute() # NOTE(kgriffs): If this call fails, we will return # an error to the client and the messages will be # orphaned, but Redis will remove them when they # expire, so we will just pretend they don't exist # in that case. self._index_messages(msgset_key, counter_key, message_ids) return message_ids
def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): queue_ctrl = self.driver.queue_controller msg_ctrl = self.driver.message_controller claim_ttl = metadata['ttl'] grace = metadata['grace'] now = timeutils.utcnow_ts() msg_ttl = claim_ttl + grace claim_expires = now + claim_ttl msg_expires = claim_expires + grace # Get the maxClaimCount and deadLetterQueue from current queue's meta queue_meta = queue_ctrl.get(queue, project=project) claim_id = uuidutils.generate_uuid() claimed_msgs = [] # NOTE(kgriffs): Claim some messages msgset_key = utils.msgset_key(queue, project) claimed_ids = self._claim_messages(msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires) if claimed_ids: claimed_msgs = messages.Message.from_redis_bulk(claimed_ids, self._client) claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] # NOTE(kgriffs): Perist claim records with self._client.pipeline() as pipe: claim_msgs_key = utils.scope_claim_messages( claim_id, CLAIM_MESSAGES_SUFFIX) for mid in claimed_ids: pipe.rpush(claim_msgs_key, mid) pipe.expire(claim_msgs_key, claim_ttl) claim_info = { 'id': claim_id, 't': claim_ttl, 'e': claim_expires, 'n': len(claimed_ids), } pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) # NOTE(kgriffs): Add the claim ID to a set so that # existence checks can be performed quickly. This # is also used as a watch key in order to guard # against race conditions. # # A sorted set is used to facilitate cleaning # up the IDs of expired claims. claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, {claim_id: claim_expires}) pipe.execute() if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta): claimed_msgs_removed = [] for msg in claimed_msgs: if msg: claimed_count = msg['claim_count'] if claimed_count < queue_meta['_max_claim_count']: # 1. Save the new max claim count for message claim_count = claimed_count + 1 dic = {"c.c": claim_count} pipe.hmset(msg['id'], dic) pipe.execute() else: # 2. Check if the message's claim count has # exceeded the max claim count defined in the # queue, if so, move the message to the dead # letter queue and modify it's ttl. # NOTE(gengchc): We're moving message by # moving the message id from queue to dead # letter queue directly.That means, the queue # and dead letter queue must be created on # the same pool. ddl = utils.scope_queue_name( queue_meta['_dead_letter_queue'], project) ddl_ttl = queue_meta.get( "_dead_letter_queue_messages_ttl") dic = {"t": msg['ttl']} if ddl_ttl: dic = {"t": ddl_ttl} pipe.hmset(msg['id'], dic) queueproject = [s for s in ddl.split('.')] msgs_key_ddl = utils.msgset_key( queueproject[1], queueproject[0]) counter_key_ddl = utils.scope_queue_index( queueproject[1], queueproject[0], MESSAGE_RANK_COUNTER_SUFFIX) msgs_key = utils.msgset_key( queue, project=project) pipe.zrem(msgs_key, msg['id']) message_ids = [] message_ids.append(msg['id']) msg_ctrl._index_messages(msgs_key_ddl, counter_key_ddl, message_ids) pipe.execute() # Add dead letter message to # claimed_msgs_removed, finally remove # them from claimed_msgs. claimed_msgs_removed.append(msg) # Remove dead letter messages from claimed_msgs. for msg_remove in claimed_msgs_removed: claimed_msgs.remove(msg_remove) if len(claimed_msgs) == 0: return None, iter([]) return claim_id, claimed_msgs
def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): queue_ctrl = self.driver.queue_controller msg_ctrl = self.driver.message_controller claim_ttl = metadata['ttl'] grace = metadata['grace'] now = timeutils.utcnow_ts() msg_ttl = claim_ttl + grace claim_expires = now + claim_ttl msg_expires = claim_expires + grace # Get the maxClaimCount and deadLetterQueue from current queue's meta queue_meta = queue_ctrl.get(queue, project=project) claim_id = uuidutils.generate_uuid() claimed_msgs = [] # NOTE(kgriffs): Claim some messages msgset_key = utils.msgset_key(queue, project) claimed_ids = self._claim_messages(msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires) if claimed_ids: claimed_msgs = messages.Message.from_redis_bulk( claimed_ids, self._client) claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] # NOTE(kgriffs): Perist claim records with self._client.pipeline() as pipe: claim_msgs_key = utils.scope_claim_messages( claim_id, CLAIM_MESSAGES_SUFFIX) for mid in claimed_ids: pipe.rpush(claim_msgs_key, mid) pipe.expire(claim_msgs_key, claim_ttl) claim_info = { 'id': claim_id, 't': claim_ttl, 'e': claim_expires, 'n': len(claimed_ids), } pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) # NOTE(kgriffs): Add the claim ID to a set so that # existence checks can be performed quickly. This # is also used as a watch key in order to guard # against race conditions. # # A sorted set is used to facilitate cleaning # up the IDs of expired claims. claims_set_key = utils.scope_claims_set( queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, claim_expires, claim_id) pipe.execute() if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta): claimed_msgs_removed = [] for msg in claimed_msgs: if msg: claimed_count = msg['claim_count'] if claimed_count < queue_meta['_max_claim_count']: # 1. Save the new max claim count for message claim_count = claimed_count + 1 dic = {"c.c": claim_count} pipe.hmset(msg['id'], dic) pipe.execute() else: # 2. Check if the message's claim count has # exceeded the max claim count defined in the # queue, if so, move the message to the dead # letter queue and modify it's ttl. # NOTE(gengchc): We're moving message by # moving the message id from queue to dead # letter queue directly.That means, the queue # and dead letter queue must be created on # the same pool. ddl = utils.scope_queue_name( queue_meta['_dead_letter_queue'], project) ddl_ttl = queue_meta.get( "_dead_letter_queue_messages_ttl") dic = {"t": msg['ttl']} if ddl_ttl: dic = {"t": ddl_ttl} pipe.hmset(msg['id'], dic) queueproject = [s for s in ddl.split('.')] msgs_key_ddl = utils.msgset_key( queueproject[1], queueproject[0]) counter_key_ddl = utils.scope_queue_index( queueproject[1], queueproject[0], MESSAGE_RANK_COUNTER_SUFFIX) msgs_key = utils.msgset_key(queue, project=project) pipe.zrem(msgs_key, msg['id']) message_ids = [] message_ids.append(msg['id']) msg_ctrl._index_messages( msgs_key_ddl, counter_key_ddl, message_ids) pipe.execute() # Add dead letter message to # claimed_msgs_removed, finally remove # them from claimed_msgs. claimed_msgs_removed.append(msg) # Remove dead letter messages from claimed_msgs. for msg_remove in claimed_msgs_removed: claimed_msgs.remove(msg_remove) if len(claimed_msgs) == 0: return None, iter([]) return claim_id, claimed_msgs