def _create(self, name, metadata=None, project=None): # TODO(prashanthr_): Implement as a lua script. queue_key = utils.scope_queue_name(name, project) qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) # Check if the queue already exists. if self._exists(name, project): return False queue = { 'c': 0, 'cl': 0, 'm': self._packer(metadata or {}), 't': timeutils.utcnow_ts() } # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zadd(qset_key, {queue_key: 1}).hmset(queue_key, queue) try: pipe.execute() except redis.exceptions.ResponseError: return False return True
def _list(self, project=None, kfilter={}, marker=None, limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False, name=None): client = self._client qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) marker = utils.scope_queue_name(marker, project) if marker: rank = client.zrank(qset_key, marker) else: rank = None start = rank + 1 if rank else 0 cursor = (q for q in client.zrange(qset_key, start, start + limit - 1)) marker_next = {} def denormalizer(info, name): queue = {'name': utils.descope_queue_name(name)} marker_next['next'] = queue['name'] if detailed: queue['metadata'] = info[1] return queue yield utils.QueueListCursor(self._client, cursor, denormalizer) yield marker_next and marker_next['next']
def _list(self, project=None, kfilter={}, marker=None, limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False, name=None): client = self._client qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) marker = utils.scope_queue_name(marker, project) if marker: rank = client.zrank(qset_key, marker) else: rank = None start = rank + 1 if rank else 0 cursor = (q for q in client.zrange(qset_key, start, start + limit - 1)) marker_next = {} def denormalizer(info, name): queue = {'name': utils.descope_queue_name(name)} marker_next['next'] = queue['name'] if detailed: queue['metadata'] = self._unpacker(info[1]) return queue yield utils.QueueListCursor(self._client, cursor, denormalizer) yield marker_next and marker_next['next']
def _create(self, name, metadata=None, project=None): # TODO(prashanthr_): Implement as a lua script. queue_key = utils.scope_queue_name(name, project) qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) # Check if the queue already exists. if self.exists(name, project): return False queue = { 'c': 0, 'cl': 0, 'm': self._packer(metadata or {}), 't': timeutils.utcnow_ts() } # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zadd(qset_key, 1, queue_key).hmset(queue_key, queue) self._message_ctrl._create_msgset(name, project, pipe) try: pipe.execute() except redis.exceptions.ResponseError: return False return True
def _delete(self, name, project=None): queue_key = utils.scope_queue_name(name, project) qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) # NOTE(prashanthr_): Pipelining is used to mitigate race conditions with self._client.pipeline() as pipe: pipe.zrem(qset_key, queue_key) pipe.delete(queue_key) pipe.execute()
def list(self, queue, project=None, marker=None, limit=10): client = self._client subset_key = utils.scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) marker = utils.scope_queue_name(marker, project) rank = client.zrank(subset_key, marker) start = rank + 1 if rank else 0 cursor = (q for q in client.zrange(subset_key, start, start + limit - 1)) marker_next = {} def denormalizer(record, sid): ret = { 'id': sid, 'source': record[0], 'subscriber': record[1], 'ttl': record[2], 'options': json.loads(record[3]), } marker_next['next'] = sid return ret yield utils.SubscriptionListCursor(self._client, cursor, denormalizer) yield marker_next and marker_next['next']
def _insert(self, project, queue, pool): queue_key = utils.scope_queue_name(queue, project) catalogue_project_key = utils.scope_pool_catalogue(project, CATALOGUE_SUFFIX) catalogue_queue_key = utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) # Check if the queue already exists. if self._exists(queue, project): return False catalogue = { 'p': project, 'p_q': queue, 'p_p': pool } # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zadd(catalogue_project_key, {queue_key: 1}) pipe.hmset(catalogue_queue_key, catalogue) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _(u'CatalogueController:insert %(prj)s:' '%(queue)s %(pool)s failed') LOG.exception(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool}) return False msgtmpl = _(u'CatalogueController:insert %(prj)s:%(queue)s' ':%(pool)s, success') LOG.info(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool}) return True
def delete(self, project, queue): # (gengchc): Check if the queue already exists. if not self._exists(project, queue): return True queue_key = utils.scope_queue_name(queue, project) catalogue_project_key = utils.scope_pool_catalogue(project, CATALOGUE_SUFFIX) catalogue_queue_key = utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) # (gengchc) Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zrem(catalogue_project_key, queue_key) pipe.delete(catalogue_queue_key) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _(u'CatalogueController:delete %(prj)s' ':%(queue)s failed') LOG.info(msgtmpl, {'prj': project, 'queue': queue}) return False msgtmpl = _(u'CatalogueController:delete %(prj)s:%(queue)s success') LOG.info(msgtmpl, {'prj': project, 'queue': queue})
def _insert(self, project, queue, pool): queue_key = utils.scope_queue_name(queue, project) catalogue_project_key = utils.scope_pool_catalogue( project, CATALOGUE_SUFFIX) catalogue_queue_key = utils.scope_pool_catalogue( queue_key, CATALOGUE_SUFFIX) # Check if the queue already exists. if self._exists(queue, project): return False catalogue = {'p': project, 'p_q': queue, 'p_p': pool} # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zadd(catalogue_project_key, {queue_key: 1}) pipe.hmset(catalogue_queue_key, catalogue) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _(u'CatalogueController:insert %(prj)s:' '%(queue)s %(pool)s failed') LOG.exception(msgtmpl, { 'prj': project, 'queue': queue, 'pool': pool }) return False msgtmpl = _(u'CatalogueController:insert %(prj)s:%(queue)s' ':%(pool)s, success') LOG.info(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool}) return True
def set_metadata(self, name, metadata, project=None): if not self.exists(name, project): raise errors.QueueDoesNotExist(name, project) key = utils.scope_queue_name(name, project) fields = {'m': self._packer(metadata)} self._client.hmset(key, fields)
def get_metadata(self, name, project=None): if not self.exists(name, project): raise errors.QueueDoesNotExist(name, project) queue_key = utils.scope_queue_name(name, project) metadata = self._get_queue_info(queue_key, b'm', None)[0] return self._unpacker(metadata)
def get(self, project, queue): queue_key = utils.scope_queue_name(queue, project) catalogue_queue_key = \ utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) ctlg = self._client.hgetall(catalogue_queue_key) if ctlg is None or len(ctlg) == 0: raise errors.QueueNotMapped(queue, project) return _normalize(ctlg)
def test_scope_queue_name(self): self.assertEqual('.my-q', utils.scope_queue_name('my-q')) self.assertEqual('.my-q', utils.scope_queue_name('my-q', None)) self.assertEqual('123.my-q', utils.scope_queue_name('my-q', '123')) self.assertEqual('123.my-q_1', utils.scope_queue_name('my-q_1', '123')) self.assertEqual('.', utils.scope_queue_name()) self.assertEqual('123.', utils.scope_queue_name(None, '123'))
def _update(self, project, queue, pool): # Check if the queue already exists. if not self._exists(project, queue): raise errors.QueueNotMapped(queue, project) queue_key = utils.scope_queue_name(queue, project) catalogue_queue_key = utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) with self._client.pipeline() as pipe: pipe.hset(catalogue_queue_key, "pl", pool) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _(u'CatalogueController:_update %(prj)s' ':%(queue)s:%(pool)s failed') LOG.exception(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool}) return False msgtmpl = _(u'CatalogueController:_update %(prj)s:%(queue)s' ':%(pool)s') LOG.info(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool})
def delete(self, project, queue): # (gengchc): Check if the queue already exists. if not self._exists(project, queue): return True queue_key = utils.scope_queue_name(queue, project) catalogue_project_key = utils.scope_pool_catalogue( project, CATALOGUE_SUFFIX) catalogue_queue_key = utils.scope_pool_catalogue( queue_key, CATALOGUE_SUFFIX) # (gengchc) Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zrem(catalogue_project_key, queue_key) pipe.delete(catalogue_queue_key) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _(u'CatalogueController:delete %(prj)s' ':%(queue)s failed') LOG.info(msgtmpl, {'prj': project, 'queue': queue}) return False msgtmpl = _(u'CatalogueController:delete %(prj)s:%(queue)s success') LOG.info(msgtmpl, {'prj': project, 'queue': queue})
def _update(self, project, queue, pool): # Check if the queue already exists. if not self._exists(project, queue): raise errors.QueueNotMapped(queue, project) queue_key = utils.scope_queue_name(queue, project) catalogue_queue_key = utils.scope_pool_catalogue( queue_key, CATALOGUE_SUFFIX) with self._client.pipeline() as pipe: pipe.hset(catalogue_queue_key, "pl", pool) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _(u'CatalogueController:_update %(prj)s' ':%(queue)s:%(pool)s failed') LOG.exception(msgtmpl, { 'prj': project, 'queue': queue, 'pool': pool }) return False msgtmpl = _(u'CatalogueController:_update %(prj)s:%(queue)s' ':%(pool)s') LOG.info(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool})
def _exists(self, project, queue): queue_key = utils.scope_queue_name(queue, project) catalogue_queue_key = \ utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) return self._client.exists(catalogue_queue_key)
def _calculate_resource_count(self, project=None): client = self._client qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) return client.zlexcount(qset_key, '-', '+')
def _exists(self, name, project=None): # TODO(prashanthr_): Cache this lookup queue_key = utils.scope_queue_name(name, project) qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) return self._client.zrank(qset_key, queue_key) is not None
def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): queue_ctrl = self.driver.queue_controller msg_ctrl = self.driver.message_controller claim_ttl = metadata['ttl'] grace = metadata['grace'] now = timeutils.utcnow_ts() msg_ttl = claim_ttl + grace claim_expires = now + claim_ttl msg_expires = claim_expires + grace # Get the maxClaimCount and deadLetterQueue from current queue's meta queue_meta = queue_ctrl.get(queue, project=project) claim_id = uuidutils.generate_uuid() claimed_msgs = [] # NOTE(kgriffs): Claim some messages msgset_key = utils.msgset_key(queue, project) claimed_ids = self._claim_messages(msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires) if claimed_ids: claimed_msgs = messages.Message.from_redis_bulk(claimed_ids, self._client) claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] # NOTE(kgriffs): Perist claim records with self._client.pipeline() as pipe: claim_msgs_key = utils.scope_claim_messages( claim_id, CLAIM_MESSAGES_SUFFIX) for mid in claimed_ids: pipe.rpush(claim_msgs_key, mid) pipe.expire(claim_msgs_key, claim_ttl) claim_info = { 'id': claim_id, 't': claim_ttl, 'e': claim_expires, 'n': len(claimed_ids), } pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) # NOTE(kgriffs): Add the claim ID to a set so that # existence checks can be performed quickly. This # is also used as a watch key in order to guard # against race conditions. # # A sorted set is used to facilitate cleaning # up the IDs of expired claims. claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, {claim_id: claim_expires}) pipe.execute() if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta): claimed_msgs_removed = [] for msg in claimed_msgs: if msg: claimed_count = msg['claim_count'] if claimed_count < queue_meta['_max_claim_count']: # 1. Save the new max claim count for message claim_count = claimed_count + 1 dic = {"c.c": claim_count} pipe.hmset(msg['id'], dic) pipe.execute() else: # 2. Check if the message's claim count has # exceeded the max claim count defined in the # queue, if so, move the message to the dead # letter queue and modify it's ttl. # NOTE(gengchc): We're moving message by # moving the message id from queue to dead # letter queue directly.That means, the queue # and dead letter queue must be created on # the same pool. ddl = utils.scope_queue_name( queue_meta['_dead_letter_queue'], project) ddl_ttl = queue_meta.get( "_dead_letter_queue_messages_ttl") dic = {"t": msg['ttl']} if ddl_ttl: dic = {"t": ddl_ttl} pipe.hmset(msg['id'], dic) queueproject = [s for s in ddl.split('.')] msgs_key_ddl = utils.msgset_key( queueproject[1], queueproject[0]) counter_key_ddl = utils.scope_queue_index( queueproject[1], queueproject[0], MESSAGE_RANK_COUNTER_SUFFIX) msgs_key = utils.msgset_key( queue, project=project) pipe.zrem(msgs_key, msg['id']) message_ids = [] message_ids.append(msg['id']) msg_ctrl._index_messages(msgs_key_ddl, counter_key_ddl, message_ids) pipe.execute() # Add dead letter message to # claimed_msgs_removed, finally remove # them from claimed_msgs. claimed_msgs_removed.append(msg) # Remove dead letter messages from claimed_msgs. for msg_remove in claimed_msgs_removed: claimed_msgs.remove(msg_remove) if len(claimed_msgs) == 0: return None, iter([]) return claim_id, claimed_msgs
def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): queue_ctrl = self.driver.queue_controller msg_ctrl = self.driver.message_controller claim_ttl = metadata['ttl'] grace = metadata['grace'] now = timeutils.utcnow_ts() msg_ttl = claim_ttl + grace claim_expires = now + claim_ttl msg_expires = claim_expires + grace # Get the maxClaimCount and deadLetterQueue from current queue's meta queue_meta = queue_ctrl.get(queue, project=project) claim_id = uuidutils.generate_uuid() claimed_msgs = [] # NOTE(kgriffs): Claim some messages msgset_key = utils.msgset_key(queue, project) claimed_ids = self._claim_messages(msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires) if claimed_ids: claimed_msgs = messages.Message.from_redis_bulk( claimed_ids, self._client) claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] # NOTE(kgriffs): Perist claim records with self._client.pipeline() as pipe: claim_msgs_key = utils.scope_claim_messages( claim_id, CLAIM_MESSAGES_SUFFIX) for mid in claimed_ids: pipe.rpush(claim_msgs_key, mid) pipe.expire(claim_msgs_key, claim_ttl) claim_info = { 'id': claim_id, 't': claim_ttl, 'e': claim_expires, 'n': len(claimed_ids), } pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) # NOTE(kgriffs): Add the claim ID to a set so that # existence checks can be performed quickly. This # is also used as a watch key in order to guard # against race conditions. # # A sorted set is used to facilitate cleaning # up the IDs of expired claims. claims_set_key = utils.scope_claims_set( queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, claim_expires, claim_id) pipe.execute() if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta): claimed_msgs_removed = [] for msg in claimed_msgs: if msg: claimed_count = msg['claim_count'] if claimed_count < queue_meta['_max_claim_count']: # 1. Save the new max claim count for message claim_count = claimed_count + 1 dic = {"c.c": claim_count} pipe.hmset(msg['id'], dic) pipe.execute() else: # 2. Check if the message's claim count has # exceeded the max claim count defined in the # queue, if so, move the message to the dead # letter queue and modify it's ttl. # NOTE(gengchc): We're moving message by # moving the message id from queue to dead # letter queue directly.That means, the queue # and dead letter queue must be created on # the same pool. ddl = utils.scope_queue_name( queue_meta['_dead_letter_queue'], project) ddl_ttl = queue_meta.get( "_dead_letter_queue_messages_ttl") dic = {"t": msg['ttl']} if ddl_ttl: dic = {"t": ddl_ttl} pipe.hmset(msg['id'], dic) queueproject = [s for s in ddl.split('.')] msgs_key_ddl = utils.msgset_key( queueproject[1], queueproject[0]) counter_key_ddl = utils.scope_queue_index( queueproject[1], queueproject[0], MESSAGE_RANK_COUNTER_SUFFIX) msgs_key = utils.msgset_key(queue, project=project) pipe.zrem(msgs_key, msg['id']) message_ids = [] message_ids.append(msg['id']) msg_ctrl._index_messages( msgs_key_ddl, counter_key_ddl, message_ids) pipe.execute() # Add dead letter message to # claimed_msgs_removed, finally remove # them from claimed_msgs. claimed_msgs_removed.append(msg) # Remove dead letter messages from claimed_msgs. for msg_remove in claimed_msgs_removed: claimed_msgs.remove(msg_remove) if len(claimed_msgs) == 0: return None, iter([]) return claim_id, claimed_msgs