def claimed(self, queue_name, claim_id, expires=None, limit=None, project=None): query = { 'c.id': claim_id, 'c.e': {'$gt': expires or timeutils.utcnow()}, 'q': queue_name, 'p': project, } # NOTE(kgriffs): Claimed messages bust be queried from # the primary to avoid a race condition caused by the # multi-phased "create claim" algorithm. preference = pymongo.read_preferences.ReadPreference.PRIMARY msgs = self._col.find(query, sort=[('k', 1)], read_preference=preference) if limit: msgs = msgs.limit(limit) now = timeutils.utcnow() def denormalizer(msg): doc = _basic_message(msg, now) doc['claim'] = msg['c'] return doc return utils.HookedCursor(msgs, denormalizer)
def claimed(self, queue_name, claim_id=None, expires=None, limit=None, project=None): query = { 'c.id': claim_id, 'c.e': {'$gt': expires or timeutils.utcnow()}, 'q': queue_name, 'p': project, } if not claim_id: # lookup over c.id to use the index query['c.id'] = {'$ne': None} msgs = self._col.find(query, sort=[('k', 1)]) if limit: msgs = msgs.limit(limit) now = timeutils.utcnow() def denormalizer(msg): doc = _basic_message(msg, now) doc['claim'] = msg['c'] return doc return utils.HookedCursor(msgs, denormalizer)
def claimed(self, queue_name, claim_id, expires=None, limit=None, project=None): query = { 'c.id': claim_id, 'c.e': {'$gt': expires or timeutils.utcnow()}, 'q': queue_name, 'p': project, } preference = pymongo.read_preferences.ReadPreference.PRIMARY msgs = self._col.find(query, sort=[('k', 1)], read_preference=preference) if limit: msgs = msgs.limit(limit) now = timeutils.utcnow() def denormalizer(msg): doc = _basic_message(msg, now) doc['claim'] = msg['c'] return doc return utils.HookedCursor(msgs, denormalizer)
def claimed(self, queue_id, claim_id=None, expires=None, limit=None): query = { 'c.id': claim_id, 'c.e': {'$gt': expires or timeutils.utcnow()}, 'q': utils.to_oid(queue_id), } if not claim_id: # lookup over c.id to use the index query['c.id'] = {'$ne': None} msgs = self._col.find(query, sort=[('_id', 1)]) if limit: msgs = msgs.limit(limit) now = timeutils.utcnow() def denormalizer(msg): oid = msg['_id'] age = now - utils.oid_utc(oid) return { 'id': str(oid), 'age': age.seconds, 'ttl': msg['t'], 'body': msg['b'], 'claim': msg['c'] } return utils.HookedCursor(msgs, denormalizer)
def claimed(self, queue_name, claim_id, expires=None, limit=None, project=None): query = { 'c.id': claim_id, 'c.e': { '$gt': expires or timeutils.utcnow() }, 'q': queue_name, 'p': project, } preference = pymongo.read_preferences.ReadPreference.PRIMARY msgs = self._col.find(query, sort=[('k', 1)], read_preference=preference) if limit: msgs = msgs.limit(limit) now = timeutils.utcnow() def denormalizer(msg): doc = _basic_message(msg, now) doc['claim'] = msg['c'] return doc return utils.HookedCursor(msgs, denormalizer)
def delete(self, queue_name, message_id, project=None, claim=None): # NOTE(cpp-cabrera): return early - this is an invalid message # id so we won't be able to find it any way mid = utils.to_oid(message_id) if mid is None: return query = {"q": queue_name, "p": project, "_id": mid} # NOTE(cpp-cabrera): return early - the user gaves us an # invalid claim id and that renders the rest of this # request moot cid = utils.to_oid(claim) if cid is None: return now = timeutils.utcnow() query["e"] = {"$gt": now} message = self._col.find_one(query) if message is None: return is_claimed = message["c"]["id"] is not None and message["c"]["e"] > now if claim is None: if is_claimed: raise exceptions.MessageIsClaimed(message_id) else: if message["c"]["id"] != cid: raise exceptions.MessageIsClaimedBy(message_id, claim) self._col.remove(query["_id"], w=0)
def list(self, queue, project=None, marker=None, limit=10, echo=False, client_uuid=None): if marker is not None: try: marker = int(marker) except ValueError: raise exceptions.MalformedMarker() qid = self._get_queue_id(queue, project) messages = self.active(qid, marker, echo, client_uuid) messages = messages.limit(limit).sort('_id') marker_id = {} now = timeutils.utcnow() def denormalizer(msg): oid = msg['_id'] age = now - utils.oid_utc(oid) marker_id['next'] = msg['k'] return { 'id': str(oid), 'age': age.seconds, 'ttl': msg['t'], 'body': msg['b'], } yield utils.HookedCursor(messages, denormalizer) yield str(marker_id['next'])
def get(self, queue, message_ids, project=None): if not isinstance(message_ids, list): message_ids = [message_ids] message_ids = [utils.to_oid(id) for id in message_ids] now = timeutils.utcnow() # Base query, always check expire time query = { 'q': self._get_queue_id(queue, project), 'e': {'$gt': now}, '_id': {'$in': message_ids}, } messages = self._col.find(query) def denormalizer(msg): oid = msg['_id'] age = now - utils.oid_utc(oid) return { 'id': str(oid), 'age': age.seconds, 'ttl': msg['t'], 'body': msg['b'], } return utils.HookedCursor(messages, denormalizer)
def list( self, queue_name, project=None, marker=None, limit=None, echo=False, client_uuid=None, include_claimed=False ): if limit is None: limit = CFG.default_message_paging if marker is not None: try: marker = int(marker) except ValueError: yield iter([]) messages = self._list(queue_name, marker, echo, client_uuid, include_claimed=include_claimed, project=project) messages = messages.limit(limit) marker_id = {} now = timeutils.utcnow() def denormalizer(msg): marker_id["next"] = msg["k"] return _basic_message(msg, now) yield utils.HookedCursor(messages, denormalizer) yield str(marker_id["next"])
def delete(self, queue, message_id, project=None, claim=None): try: mid = utils.to_oid(message_id) query = { 'q': self._get_queue_id(queue, project), '_id': mid } if claim: now = timeutils.utcnow() query['e'] = {'$gt': now} message = self._col.find_one(query) if message is None: return cid = utils.to_oid(claim) if not ('c' in message and message['c']['id'] == cid and message['c']['e'] > now): raise exceptions.ClaimNotPermitted(message_id, claim) self._col.remove(query['_id'], w=0) else: self._col.remove(query, w=0) except exceptions.QueueDoesNotExist: pass
def stats(self, name, project=None): if not self.exists(name, project=project): raise exceptions.QueueDoesNotExist(name, project) controller = self.driver.message_controller active = controller.active(name, project=project).count() total = controller.count(name, project=project) message_stats = { 'claimed': total - active, 'free': active, 'total': total, } try: oldest = controller.first(name, project=project, sort=1) newest = controller.first(name, project=project, sort=-1) except exceptions.QueueIsEmpty: pass else: now = timeutils.utcnow() message_stats['oldest'] = utils.stat_message(oldest, now) message_stats['newest'] = utils.stat_message(newest, now) return {'messages': message_stats}
def get(self, queue_name, message_id, project=None): """Gets a single message by ID. :raises: exceptions.MessageDoesNotExist """ mid = utils.to_oid(message_id) if mid is None: raise exceptions.MessageDoesNotExist(message_id, queue_name, project) now = timeutils.utcnow() query = { '_id': mid, 'q': queue_name, 'p': project, 'e': {'$gt': now} } message = list(self._col.find(query).limit(1).hint([('_id', 1)])) if not message: raise exceptions.MessageDoesNotExist(message_id, queue_name, project) return _basic_message(message[0], now)
def get(self, queue, claim_id, project=None): if project is None: project = '' cid = utils.cid_decode(claim_id) if cid is None: raise errors.ClaimDoesNotExist(claim_id, queue, project) with self.driver.trans() as trans: sel = sa.sql.select([ tables.Claims.c.id, tables.Claims.c.ttl, tables.Claims.c.created ], sa.and_( tables.Claims.c.ttl > utils.get_age( tables.Claims.c.created), tables.Claims.c.id == cid, tables.Queues.c.project == project, tables.Queues.c.name == queue), from_obj=[tables.Queues.join(tables.Claims)]) res = trans.execute(sel).fetchone() if res is None: raise errors.ClaimDoesNotExist(claim_id, queue, project) cid, ttl, created = res return ({ 'id': claim_id, 'ttl': ttl, 'age': (timeutils.utcnow() - created).seconds }, self.__get(cid))
def test_message_counter(self): queue_name = self.queue_name iterations = 10 seed_marker1 = self.queue_controller._get_counter(queue_name, self.project) self.assertEqual(seed_marker1, 1, 'First marker is 1') for i in range(iterations): self.controller.post(queue_name, [{'ttl': 60}], 'uuid', project=self.project) marker1 = self.queue_controller._get_counter(queue_name, self.project) marker2 = self.queue_controller._get_counter(queue_name, self.project) marker3 = self.queue_controller._get_counter(queue_name, self.project) self.assertEqual(marker1, marker2) self.assertEqual(marker2, marker3) self.assertEqual(marker1, i + 2) new_value = self.queue_controller._inc_counter(queue_name, self.project) self.assertIsNotNone(new_value) value_before = self.queue_controller._get_counter(queue_name, project=self.project) new_value = self.queue_controller._inc_counter(queue_name, project=self.project) self.assertIsNotNone(new_value) value_after = self.queue_controller._get_counter(queue_name, project=self.project) self.assertEqual(value_after, value_before + 1) value_before = value_after new_value = self.queue_controller._inc_counter(queue_name, project=self.project, amount=7) value_after = self.queue_controller._get_counter(queue_name, project=self.project) self.assertEqual(value_after, value_before + 7) self.assertEqual(value_after, new_value) reference_value = value_after unchanged = self.queue_controller._inc_counter(queue_name, project=self.project, window=10) self.assertIsNone(unchanged) now = timeutils.utcnow() + datetime.timedelta(seconds=10) timeutils_utcnow = 'marconi.openstack.common.timeutils.utcnow' with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now changed = self.queue_controller._inc_counter(queue_name, project=self.project, window=5) self.assertEqual(changed, reference_value + 1)
def _remove_expired(self, queue_id): """Removes all expired messages except for the most recent in each queue. This method is used in lieu of mongo's TTL index since we must always leave at least one message in the queue for calculating the next marker. Note that expired messages are only removed if their count exceeds options.CFG.gc_threshold. :param queue_id: id for the queue from which to remove expired messages """ if options.CFG.gc_threshold <= self._count_expired(queue_id): # Get the message with the highest marker, and leave # it in the queue head = self._col.find_one({'q': queue_id}, sort=[('k', -1)], fields={'_id': 1}) if head is None: # Assume queue was just deleted via a parallel request LOG.warning(_('Queue %s is empty or missing.') % queue_id) return query = { 'q': queue_id, 'e': {'$lte': timeutils.utcnow()}, '_id': {'$ne': head['_id']} } self._col.remove(query)
def delete(self, queue_name, message_id, project=None, claim=None): # NOTE(cpp-cabrera): return early - this is an invalid message # id so we won't be able to find it any way mid = utils.to_oid(message_id) if mid is None: return query = {'q': queue_name, 'p': project, '_id': mid} # NOTE(cpp-cabrera): return early - the user gaves us an # invalid claim id and that renders the rest of this # request moot cid = utils.to_oid(claim) if cid is None: return now = timeutils.utcnow() query['e'] = {'$gt': now} message = self._col.find_one(query) if message is None: return is_claimed = (message['c']['id'] is not None and message['c']['e'] > now) if claim is None: if is_claimed: raise exceptions.MessageIsClaimed(message_id) else: if message['c']['id'] != cid: raise exceptions.MessageIsClaimedBy(message_id, claim) self._col.remove(query['_id'], w=0)
def list(self, queue_name, project=None, marker=None, limit=10, echo=False, client_uuid=None, include_claimed=False): if marker is not None: try: marker = int(marker) except ValueError: yield iter([]) messages = self._list(queue_name, marker, echo, client_uuid, include_claimed=include_claimed, project=project) messages = messages.limit(limit) marker_id = {} now = timeutils.utcnow() def denormalizer(msg): marker_id['next'] = msg['k'] return _basic_message(msg, now) yield utils.HookedCursor(messages, denormalizer) yield str(marker_id['next'])
def get(self, queue, claim_id, project=None): if project is None: project = '' cid = utils.cid_decode(claim_id) if cid is None: raise errors.ClaimDoesNotExist(claim_id, queue, project) with self.driver.trans() as trans: sel = sa.sql.select([tables.Claims.c.id, tables.Claims.c.ttl, tables.Claims.c.created], sa.and_(tables.Claims.c.ttl > utils.get_age(tables.Claims.c.created), tables.Claims.c.id == cid, tables.Queues.c.project == project, tables.Queues.c.name == queue), from_obj=[tables.Queues.join(tables.Claims)]) res = trans.execute(sel).fetchone() if res is None: raise errors.ClaimDoesNotExist(claim_id, queue, project) cid, ttl, created = res return ( {'id': claim_id, 'ttl': ttl, 'age': (timeutils.utcnow() - created).seconds}, self.__get(cid) )
def _test_post(self, sample_messages): sample_doc = jsonutils.dumps(sample_messages) result = self.simulate_post(self.messages_path, self.project_id, body=sample_doc, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_201) result_doc = jsonutils.loads(result[0]) msg_ids = self._get_msg_ids(self.srmock.headers_dict) self.assertEqual(len(msg_ids), len(sample_messages)) expected_resources = [six.text_type(self.messages_path + '/' + id) for id in msg_ids] self.assertEqual(expected_resources, result_doc['resources']) self.assertFalse(result_doc['partial']) self.assertEqual(len(msg_ids), len(sample_messages)) lookup = dict([(m['ttl'], m['body']) for m in sample_messages]) # Test GET on the message resource directly # NOTE(cpp-cabrera): force the passing of time to age a message timeutils_utcnow = 'marconi.openstack.common.timeutils.utcnow' now = timeutils.utcnow() + datetime.timedelta(seconds=10) with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now for msg_id in msg_ids: message_uri = self.messages_path + '/' + msg_id # Wrong project ID self.simulate_get(message_uri, '777777') self.assertEqual(self.srmock.status, falcon.HTTP_404) # Correct project ID result = self.simulate_get(message_uri, self.project_id) self.assertEqual(self.srmock.status, falcon.HTTP_200) self.assertEqual(self.srmock.headers_dict['Content-Location'], message_uri) # Check message properties message = jsonutils.loads(result[0]) self.assertEqual(message['href'], message_uri) self.assertEqual(message['body'], lookup[message['ttl']]) # no negative age # NOTE(cpp-cabrera): testtools lacks GreaterThanEqual on py26 self.assertThat(message['age'], matchers.GreaterThan(-1)) # Test bulk GET query_string = 'ids=' + ','.join(msg_ids) result = self.simulate_get(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(self.srmock.status, falcon.HTTP_200) result_doc = jsonutils.loads(result[0]) expected_ttls = set(m['ttl'] for m in sample_messages) actual_ttls = set(m['ttl'] for m in result_doc) self.assertFalse(expected_ttls - actual_ttls)
def bulk_get(self, queue_name, message_ids, project=None): message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] if not message_ids: return iter([]) now = timeutils.utcnow() # Base query, always check expire time query = { 'q': queue_name, 'p': project, '_id': { '$in': message_ids }, 'e': { '$gt': now }, } # NOTE(flaper87): Should this query # be sorted? messages = self._col.find(query).hint([('_id', 1)]) def denormalizer(msg): return _basic_message(msg, now) return utils.HookedCursor(messages, denormalizer)
def _list(self, queue_name, marker=None, echo=False, client_uuid=None, fields=None, include_claimed=False, project=None, sort=1): """Message document listing helper. :param queue_name: Name of the queue to list :param project: Project `queue_name` belongs to. :param marker: Message marker from which to start iterating :param echo: Whether to return messages that match client_uuid :param client_uuid: UUID for the client that originated this request :param fields: Fields to include in emmitted documents as a dict :param include_claimed: Whether to include claimed messages, not just active ones :param sort: (Default 1) Sort order for the listing. Pass 1 for ascending (oldest message first), or -1 for descending (newest message first). :returns: MongoDB cursor """ if sort not in (1, -1): raise ValueError(u'sort must be either 1 (ascending) ' u'or -1 (descending)') now = timeutils.utcnow() query = { # Messages must belong to this # queue and project 'p': project, 'q': queue_name, # The messages cannot be expired 'e': { '$gt': now }, } if not echo: query['u'] = {'$ne': client_uuid} if marker: query['k'] = {'$gt': marker} if not include_claimed: # Only include messages that are not part of # any claim, or are part of an expired claim. query['c.e'] = {'$lte': now} # NOTE(flaper87): Suggest the index to use for this query return self._col.find(query, fields=fields, sort=[('k', sort)]).hint(self.active_fields)
def update(self, queue, claim_id, metadata, project=None): cid = utils.to_oid(claim_id) if cid is None: raise exceptions.ClaimDoesNotExist(claim_id, queue, project) now = timeutils.utcnow() ttl = int(metadata.get('ttl', 60)) ttl_delta = datetime.timedelta(seconds=ttl) expires = now + ttl_delta msg_ctrl = self.driver.message_controller claimed = msg_ctrl.claimed(queue, cid, expires=now, limit=1, project=project) try: next(claimed) except StopIteration: raise exceptions.ClaimDoesNotExist(claim_id, queue, project) meta = { 'id': cid, 't': ttl, 'e': expires, } msg_ctrl._col.update({ 'q': queue, 'p': project, 'c.id': cid }, {'$set': { 'c': meta }}, upsert=False, multi=True) # NOTE(flaper87): Dirty hack! # This sets the expiration time to # `expires` on messages that would # expire before claim. msg_ctrl._col.update( { 'q': queue, 'p': project, 'e': { '$lt': expires }, 'c.id': cid }, {'$set': { 'e': expires, 't': ttl }}, upsert=False, multi=True)
def _count_expired(self, queue_name, project=None): """Counts the number of expired messages in a queue. :param queue_name: Name of the queue to stat """ query = {"p": project, "q": queue_name, "e": {"$lte": timeutils.utcnow()}} return self._col.find(query).count()
def _list( self, queue_name, marker=None, echo=False, client_uuid=None, fields=None, include_claimed=False, project=None, sort=1, ): """Message document listing helper. :param queue_name: Name of the queue to list :param project: Project `queue_name` belongs to. :param marker: Message marker from which to start iterating :param echo: Whether to return messages that match client_uuid :param client_uuid: UUID for the client that originated this request :param fields: Fields to include in emmitted documents as a dict :param include_claimed: Whether to include claimed messages, not just active ones :param sort: (Default 1) Sort order for the listing. Pass 1 for ascending (oldest message first), or -1 for descending (newest message first). :returns: MongoDB cursor """ if sort not in (1, -1): raise ValueError(u"sort must be either 1 (ascending) " u"or -1 (descending)") now = timeutils.utcnow() query = { # Messages must belong to this # queue and project "p": project, "q": queue_name, # The messages cannot be expired "e": {"$gt": now}, } if not echo: query["u"] = {"$ne": client_uuid} if marker: query["k"] = {"$gt": marker} if not include_claimed: # Only include messages that are not part of # any claim, or are part of an expired claim. query["c.e"] = {"$lte": now} # NOTE(flaper87): Suggest the index to use for this query return self._col.find(query, fields=fields, sort=[("k", sort)]).hint(self.active_fields)
def _test_post(self, sample_messages): sample_doc = json.dumps(sample_messages) result = self.simulate_post(self.messages_path, self.project_id, body=sample_doc, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_201) result_doc = json.loads(result[0]) msg_ids = self._get_msg_ids(self.srmock.headers_dict) self.assertEqual(len(msg_ids), len(sample_messages)) expected_resources = [unicode(self.messages_path + "/" + id) for id in msg_ids] self.assertEqual(expected_resources, result_doc["resources"]) self.assertFalse(result_doc["partial"]) self.assertEqual(len(msg_ids), len(sample_messages)) lookup = dict([(m["ttl"], m["body"]) for m in sample_messages]) # Test GET on the message resource directly # NOTE(cpp-cabrera): force the passing of time to age a message timeutils.set_time_override(timeutils.utcnow()) timeutils.advance_time_seconds(10) for msg_id in msg_ids: message_uri = self.messages_path + "/" + msg_id # Wrong project ID self.simulate_get(message_uri, "777777") self.assertEqual(self.srmock.status, falcon.HTTP_404) # Correct project ID result = self.simulate_get(message_uri, self.project_id) self.assertEqual(self.srmock.status, falcon.HTTP_200) self.assertEqual(self.srmock.headers_dict["Content-Location"], message_uri) # Check message properties message = json.loads(result[0]) self.assertEqual(message["href"], message_uri) self.assertEqual(message["body"], lookup[message["ttl"]]) # no negative age # NOTE(cpp-cabrera): testtools lacks GreaterThanEqual on py26 self.assertThat(message["age"], matchers.GreaterThan(-1)) timeutils.clear_time_override() # Test bulk GET query_string = "ids=" + ",".join(msg_ids) result = self.simulate_get(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(self.srmock.status, falcon.HTTP_200) result_doc = json.loads(result[0]) expected_ttls = set(m["ttl"] for m in sample_messages) actual_ttls = set(m["ttl"] for m in result_doc) self.assertFalse(expected_ttls - actual_ttls)
def _count_expired(self, queue_id): """Counts the number of expired messages in a queue. :param queue_id: id for the queue to stat """ query = { 'q': queue_id, 'e': {'$lte': timeutils.utcnow()}, } return self._col.find(query).count()
def claimed(self, queue_name, claim_id, expires=None, limit=None, project=None): query = {"c.id": claim_id, "c.e": {"$gt": expires or timeutils.utcnow()}, "q": queue_name, "p": project} # NOTE(kgriffs): Claimed messages bust be queried from # the primary to avoid a race condition caused by the # multi-phased "create claim" algorithm. preference = pymongo.read_preferences.ReadPreference.PRIMARY msgs = self._col.find(query, sort=[("k", 1)], read_preference=preference) if limit: msgs = msgs.limit(limit) now = timeutils.utcnow() def denormalizer(msg): doc = _basic_message(msg, now) doc["claim"] = msg["c"] return doc return utils.HookedCursor(msgs, denormalizer)
def _list(self, queue_name, marker=None, echo=False, client_uuid=None, fields=None, include_claimed=False, project=None, sort=1): """Message document listing helper. :param queue_name: Name of the queue to list :param project: Project `queue_name` belongs to. :param marker: Message marker from which to start iterating :param echo: Whether to return messages that match client_uuid :param client_uuid: UUID for the client that originated this request :param fields: Fields to include in emmitted documents :param include_claimed: Whether to include claimed messages, not just active ones :param sort: (Default 1) Sort order for the listing. Pass 1 for ascending (oldest message first), or -1 for descending (newest message first). :returns: MongoDB cursor """ if sort not in (1, -1): raise ValueError('sort must be either 1 (ascending) ' 'or -1 (descending)') now = timeutils.utcnow() query = { # Messages must belong to this # queue and project 'p': project, 'q': queue_name, # The messages cannot be expired 'e': {'$gt': now}, } if fields and not isinstance(fields, (dict, list)): raise TypeError('Fields must be an instance of list / dict') if not echo and client_uuid is not None: query['u'] = {'$ne': client_uuid} if marker: query['k'] = {'$gt': marker} if not include_claimed: # Only include messages that are not part of # any claim, or are part of an expired claim. query['c.e'] = {'$lte': now} # NOTE(flaper87): Suggest the index to use for this query return self._col.find(query, fields=fields, sort=[('k', sort)]).hint(self.active_fields)
def _count_expired(self, queue_name, project=None): """Counts the number of expired messages in a queue. :param queue_name: Name of the queue to stat """ query = { 'p': project, 'q': queue_name, 'e': {'$lte': timeutils.utcnow()}, } return self._col.find(query).count()
def unclaim(self, queue_name, claim_id, project=None): cid = utils.to_oid(claim_id) # NOTE(cpp-cabrera): early abort - avoid a DB query if we're handling # an invalid ID if cid is None: return # NOTE(cpp-cabrera): unclaim by setting the claim ID to None # and the claim expiration time to now now = timeutils.utcnow() self._col.update({'q': queue_name, 'p': project, 'c.id': cid}, {'$set': {'c': {'id': None, 'e': now}}}, upsert=False, multi=True)
def _remove_expired(self, queue_name, project): """Removes all expired messages except for the most recent in each queue. This method is used in lieu of mongo's TTL index since we must always leave at least one message in the queue for calculating the next marker. Note that expired messages are only removed if their count exceeds options.CFG.gc_threshold. :param queue_name: name for the queue from which to remove expired messages :param project: Project queue_name belong's too """ expired_msgs = self._count_expired(queue_name, project) if options.CFG.gc_threshold <= expired_msgs: # Get the message with the highest marker, and leave # it in the queue # NOTE(flaper87): Keep the counter in a separate record and # lets remove all messages. head = self._col.find_one({ 'q': queue_name, 'p': project }, sort=[('k', -1)], fields={'_id': 1}) if head is None: # Assume queue was just deleted via a parallel request LOG.warning(_(u'Queue %s is empty or missing.') % queue_name) return # NOTE(flaper87): Can we use k instead of # _id here? The active index will cover # the previous query and the remove one. query = { 'p': project, 'q': queue_name, 'e': { '$lte': timeutils.utcnow() }, '_id': { '$ne': head['_id'] } } self._col.remove(query, w=0)
def test_message_counter(self): queue_name = 'marker_test' iterations = 10 self.queue_controller.create(queue_name) seed_marker1 = self.queue_controller._get_counter(queue_name) self.assertEqual(seed_marker1, 1, 'First marker is 1') for i in range(iterations): self.controller.post(queue_name, [{'ttl': 60}], 'uuid') marker1 = self.queue_controller._get_counter(queue_name) marker2 = self.queue_controller._get_counter(queue_name) marker3 = self.queue_controller._get_counter(queue_name) self.assertEqual(marker1, marker2) self.assertEqual(marker2, marker3) self.assertEqual(marker1, i + 2) new_value = self.queue_controller._inc_counter(queue_name) self.assertIsNotNone(new_value) value_before = self.queue_controller._get_counter(queue_name) new_value = self.queue_controller._inc_counter(queue_name) self.assertIsNotNone(new_value) value_after = self.queue_controller._get_counter(queue_name) self.assertEqual(value_after, value_before + 1) value_before = value_after new_value = self.queue_controller._inc_counter(queue_name, amount=7) value_after = self.queue_controller._get_counter(queue_name) self.assertEqual(value_after, value_before + 7) self.assertEqual(value_after, new_value) reference_value = value_after unchanged = self.queue_controller._inc_counter(queue_name, window=10) self.assertIsNone(unchanged) # TODO(kgriffs): Pass utcnow to work around bug # in set_time_override until we merge the fix in # from upstream. timeutils.set_time_override(timeutils.utcnow()) timeutils.advance_time_seconds(10) changed = self.queue_controller._inc_counter(queue_name, window=5) self.assertEqual(changed, reference_value + 1) timeutils.clear_time_override()
def test_message_counter(self): queue_name = 'marker_test' iterations = 10 self.queue_controller.create(queue_name) seed_marker1 = self.queue_controller._get_counter(queue_name) self.assertEqual(seed_marker1, 1, 'First marker is 1') for i in range(iterations): self.controller.post(queue_name, [{'ttl': 60}], 'uuid') marker1 = self.queue_controller._get_counter(queue_name) marker2 = self.queue_controller._get_counter(queue_name) marker3 = self.queue_controller._get_counter(queue_name) self.assertEqual(marker1, marker2) self.assertEqual(marker2, marker3) self.assertEqual(marker1, i + 2) new_value = self.queue_controller._inc_counter(queue_name) self.assertIsNotNone(new_value) value_before = self.queue_controller._get_counter(queue_name) new_value = self.queue_controller._inc_counter(queue_name) self.assertIsNotNone(new_value) value_after = self.queue_controller._get_counter(queue_name) self.assertEqual(value_after, value_before + 1) value_before = value_after new_value = self.queue_controller._inc_counter(queue_name, amount=7) value_after = self.queue_controller._get_counter(queue_name) self.assertEqual(value_after, value_before + 7) self.assertEqual(value_after, new_value) reference_value = value_after unchanged = self.queue_controller._inc_counter(queue_name, window=10) self.assertIsNone(unchanged) now = timeutils.utcnow() + datetime.timedelta(seconds=10) timeutils_utcnow = 'marconi.openstack.common.timeutils.utcnow' with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now changed = self.queue_controller._inc_counter(queue_name, window=5) self.assertEqual(changed, reference_value + 1)
def count(self, queue_name, project=None): """Return total number of (non-expired) messages in a queue. This method is designed to very quickly count the number of messages in a given queue. Expired messages are not counted, of course. If the queue does not exist, the count will always be 0. """ query = { # Messages must belong to this queue 'q': queue_name, 'p': project, # The messages can not be expired 'e': {'$gt': timeutils.utcnow()}, } return self._col.find(query).count()
def unclaim(self, queue_name, claim_id, project=None): cid = utils.to_oid(claim_id) # NOTE(cpp-cabrera): early abort - avoid a DB query if we're handling # an invalid ID if cid is None: return # NOTE(cpp-cabrera): unclaim by setting the claim ID to None # and the claim expiration time to now now = timeutils.utcnow() self._col.update( {"q": queue_name, "p": project, "c.id": cid}, {"$set": {"c": {"id": None, "e": now}}}, upsert=False, multi=True, )
def bulk_get(self, queue_name, message_ids, project=None): message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] if not message_ids: return iter([]) now = timeutils.utcnow() # Base query, always check expire time query = {"q": queue_name, "p": project, "_id": {"$in": message_ids}, "e": {"$gt": now}} # NOTE(flaper87): Should this query # be sorted? messages = self._col.find(query).hint([("_id", 1)]) def denormalizer(msg): return _basic_message(msg, now) return utils.HookedCursor(messages, denormalizer)
def update(self, queue, claim_id, metadata, project=None): try: cid = utils.to_oid(claim_id) except ValueError: raise exceptions.ClaimDoesNotExist(claim_id, queue, project) now = timeutils.utcnow() ttl = int(metadata.get('ttl', 60)) ttl_delta = datetime.timedelta(seconds=ttl) expires = now + ttl_delta if now > expires: raise ValueError('New ttl will make the claim expires') msg_ctrl = self.driver.message_controller claimed = msg_ctrl.claimed(queue, cid, expires=now, limit=1, project=project) try: next(claimed) except StopIteration: raise exceptions.ClaimDoesNotExist(claim_id, queue, project) meta = { 'id': cid, 't': ttl, 'e': expires, } msg_ctrl._col.update({'q': queue, 'p': project, 'c.id': cid}, {'$set': {'c': meta}}, upsert=False, multi=True) # NOTE(flaper87): Dirty hack! # This sets the expiration time to # `expires` on messages that would # expire before claim. msg_ctrl._col.update({'q': queue, 'p': project, 'e': {'$lt': expires}, 'c.id': cid}, {'$set': {'e': expires, 't': ttl}}, upsert=False, multi=True)
def get(self, queue_name, message_id, project=None): """Gets a single message by ID. :raises: exceptions.MessageDoesNotExist """ mid = utils.to_oid(message_id) if mid is None: raise exceptions.MessageDoesNotExist(message_id, queue_name, project) now = timeutils.utcnow() query = {"_id": mid, "q": queue_name, "p": project, "e": {"$gt": now}} message = list(self._col.find(query).limit(1).hint([("_id", 1)])) if not message: raise exceptions.MessageDoesNotExist(message_id, queue_name, project) return _basic_message(message[0], now)
def _remove_expired(self, queue_name, project): """Removes all expired messages except for the most recent in each queue. This method is used in lieu of mongo's TTL index since we must always leave at least one message in the queue for calculating the next marker. Note that expired messages are only removed if their count exceeds options.CFG.gc_threshold. :param queue_name: name for the queue from which to remove expired messages :param project: Project queue_name belong's too """ expired_msgs = self._count_expired(queue_name, project) if options.CFG.gc_threshold <= expired_msgs: # Get the message with the highest marker, and leave # it in the queue # NOTE(flaper87): Keep the counter in a separate record and # lets remove all messages. head = self._col.find_one({'q': queue_name, 'p': project}, sort=[('k', -1)], fields={'_id': 1}) if head is None: # Assume queue was just deleted via a parallel request LOG.warning(_(u'Queue %s is empty or missing.') % queue_name) return # NOTE(flaper87): Can we use k instead of # _id here? The active index will cover # the previous query and the remove one. query = { 'p': project, 'q': queue_name, 'e': {'$lte': timeutils.utcnow()}, '_id': {'$ne': head['_id']} } self._col.remove(query, w=0)
def bulk_get(self, queue_name, message_ids, project=None): message_ids = [utils.to_oid(id) for id in message_ids] now = timeutils.utcnow() # Base query, always check expire time query = { 'q': queue_name, 'p': project, '_id': {'$in': message_ids}, 'e': {'$gt': now}, } # NOTE(flaper87): Should this query # be sorted? messages = self._col.find(query).hint([('_id', 1)]) def denormalizer(msg): return _basic_message(msg, now) return utils.HookedCursor(messages, denormalizer)
def verify_message_stats(self, message): """Verifies the oldest & newest message stats :param message: oldest (or) newest message returned by queue_name/stats. """ expected_keys = ['age', 'created', 'href'] response_keys = message.keys() response_keys.sort() self.assertEqual(response_keys, expected_keys) # Verify that age has valid values age = message['age'] self.assertTrue(0 <= age <= self.limits.max_message_ttl, msg='Invalid Age {0}'.format(age)) # Verify that GET on href returns 200 path = message['href'] result = self.client.get(path) self.assertEqual(result.status_code, 200) # Verify that created time falls within the last 10 minutes # NOTE(malini): The messages are created during the test. created_time = message['created'] created_time = timeutils.normalize_time( timeutils.parse_isotime(created_time)) now = timeutils.utcnow() delta = timeutils.delta_seconds(before=created_time, after=now) # NOTE(malini): The 'int()' below is a work around for the small time # difference between julianday & UTC. # (needed to pass this test on sqlite driver) delta = int(delta) msg = 'Invalid Time Delta {0}, Created time {1}, Now {2}' \ .format(delta, created_time, now) self.assertTrue(0 <= delta <= 6000, msg)
def get(self, queue, claim_id, project=None): msg_ctrl = self.driver.message_controller # Base query, always check expire time now = timeutils.utcnow() cid = utils.to_oid(claim_id) if cid is None: raise exceptions.ClaimDoesNotExist(queue, project, claim_id) def messages(msg_iter): msg = next(msg_iter) yield msg.pop('claim') yield msg # Smoke it! for msg in msg_iter: del msg['claim'] yield msg try: # Lets get claim's data # from the first message # in the iterator msgs = messages(msg_ctrl.claimed(queue, cid, now, project=project)) claim = next(msgs) update_time = claim['e'] - datetime.timedelta(seconds=claim['t']) age = timeutils.delta_seconds(update_time, now) claim = { 'age': int(age), 'ttl': claim.pop('t'), 'id': str(claim['id']), } except StopIteration: raise exceptions.ClaimDoesNotExist(cid, queue, project) return (claim, msgs)
def __get(self, cid): # NOTE(flaper87): This probably needs to # join on `Claim` to check the claim ttl. sel = sa.sql.select( [ tables.Messages.c.id, tables.Messages.c.body, tables.Messages.c.ttl, tables.Messages.c.created ], sa.and_( tables.Messages.c.ttl > utils.get_age( tables.Messages.c.created), #tables.Messages.c.ttl > #utils.get_age(tables.Claims.c.created), tables.Messages.c.cid == cid)) records = self.driver.run(sel) for id, body, ttl, created in records: yield { 'id': utils.msgid_encode(id), 'ttl': ttl, 'age': (timeutils.utcnow() - created).seconds, 'body': body, }
def first(self, queue, project=None, sort=1): if project is None: project = '' qid = utils.get_qid(self.driver, queue, project) sel = sa.sql.select([tables.Messages.c.id, tables.Messages.c.body, tables.Messages.c.ttl, tables.Messages.c.created], sa.and_( tables.Messages.c.ttl > sfunc.now() - tables.Messages.c.created, tables.Messages.c.qid == qid)) if sort not in (1, -1): raise ValueError(u'sort must be either 1 (ascending) ' u'or -1 (descending)') order = sa.asc if sort == -1: order = sa.desc sel = sel.order_by(order(tables.Messages.c.id)) try: id, body, ttl, created = self.driver.get(sel) except utils.NoResult: raise errors.QueueIsEmpty(queue, project) created_iso = timeutils.isotime(created) return { 'id': utils.msgid_encode(id), 'ttl': ttl, 'created': created_iso, 'age': int((timeutils.utcnow() - created).seconds), 'body': body, }
def test_queue_lifecycle(self): # Test Queue Creation created = self.controller.create('test', project=self.project) self.assertTrue(created) # Test Queue Existence self.assertTrue(self.controller.exists('test', project=self.project)) # Test Queue retrieval metadata = self.controller.get_metadata('test', project=self.project) self.assertEqual(metadata, {}) # Test Queue Update created = self.controller.set_metadata('test', project=self.project, metadata=dict(meta='test_meta')) metadata = self.controller.get_metadata('test', project=self.project) self.assertEqual(metadata['meta'], 'test_meta') # Touching an existing queue does not affect metadata created = self.controller.create('test', project=self.project) self.assertFalse(created) metadata = self.controller.get_metadata('test', project=self.project) self.assertEqual(metadata['meta'], 'test_meta') # Test Queue Statistic _insert_fixtures(self.message_controller, 'test', project=self.project, client_uuid='my_uuid', num=6) # NOTE(kgriffs): We can't get around doing this, because # we don't know how the storage drive may be calculating # message timestamps (and may not be monkey-patchable). time.sleep(1) _insert_fixtures(self.message_controller, 'test', project=self.project, client_uuid='my_uuid', num=6) stats = self.controller.stats('test', project=self.project) message_stats = stats['messages'] self.assertEqual(message_stats['free'], 12) self.assertEqual(message_stats['claimed'], 0) self.assertEqual(message_stats['total'], 12) oldest = message_stats['oldest'] newest = message_stats['newest'] self.assertNotEqual(oldest, newest) # NOTE(kgriffs): Ensure "now" is different enough # for the next comparison to work. timeutils.set_time_override() timeutils.advance_time_seconds(10) for message_stat in (oldest, newest): created_iso = message_stat['created'] created = timeutils.parse_isotime(created_iso) self.assertThat(timeutils.normalize_time(created), matchers.LessThan(timeutils.utcnow())) self.assertIn('id', message_stat) self.assertThat(oldest['created'], matchers.LessThan(newest['created'])) # Test Queue Deletion self.controller.delete('test', project=self.project) # Test Queue Existence self.assertFalse(self.controller.exists('test', project=self.project)) # Test DoesNotExist Exception with testing.expect(storage.exceptions.DoesNotExist): self.controller.get_metadata('test', project=self.project) with testing.expect(storage.exceptions.DoesNotExist): self.controller.set_metadata('test', '{}', project=self.project)
def test_lifecycle(self): doc = '{"ttl": 100, "grace": 60}' # First, claim some messages body = self.simulate_post(self.claims_path, self.project_id, body=doc) self.assertEqual(self.srmock.status, falcon.HTTP_201) claimed = json.loads(body[0]) claim_href = self.srmock.headers_dict['Location'] message_href, params = claimed[0]['href'].split('?') # No more messages to claim self.simulate_post(self.claims_path, self.project_id, body=doc, query_string='limit=3') self.assertEqual(self.srmock.status, falcon.HTTP_204) headers = { 'Client-ID': str(uuid.uuid4()), } # Listing messages, by default, won't include claimed body = self.simulate_get(self.messages_path, self.project_id, headers=headers) self.assertEqual(self.srmock.status, falcon.HTTP_204) # Include claimed messages this time body = self.simulate_get(self.messages_path, self.project_id, query_string='include_claimed=true', headers=headers) listed = json.loads(body[0]) self.assertEqual(self.srmock.status, falcon.HTTP_200) self.assertEqual(len(listed['messages']), len(claimed)) now = timeutils.utcnow() + datetime.timedelta(seconds=10) timeutils_utcnow = 'marconi.openstack.common.timeutils.utcnow' with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now body = self.simulate_get(claim_href, self.project_id) claim = json.loads(body[0]) self.assertEqual(self.srmock.status, falcon.HTTP_200) self.assertEqual(self.srmock.headers_dict['Content-Location'], claim_href) self.assertEqual(claim['ttl'], 100) ## NOTE(cpp-cabrera): verify that claim age is non-negative self.assertThat(claim['age'], matchers.GreaterThan(-1)) # Try to delete the message without submitting a claim_id self.simulate_delete(message_href, self.project_id) self.assertEqual(self.srmock.status, falcon.HTTP_403) # Delete the message and its associated claim self.simulate_delete(message_href, self.project_id, query_string=params) self.assertEqual(self.srmock.status, falcon.HTTP_204) # Try to get it from the wrong project self.simulate_get(message_href, 'bogus_project', query_string=params) self.assertEqual(self.srmock.status, falcon.HTTP_404) # Get the message self.simulate_get(message_href, self.project_id, query_string=params) self.assertEqual(self.srmock.status, falcon.HTTP_404) # Update the claim new_claim_ttl = '{"ttl": 60}' creation = timeutils.utcnow() self.simulate_patch(claim_href, self.project_id, body=new_claim_ttl) self.assertEqual(self.srmock.status, falcon.HTTP_204) # Get the claimed messages (again) body = self.simulate_get(claim_href, self.project_id) query = timeutils.utcnow() claim = json.loads(body[0]) message_href, params = claim['messages'][0]['href'].split('?') self.assertEqual(claim['ttl'], 60) estimated_age = timeutils.delta_seconds(creation, query) self.assertTrue(estimated_age > claim['age']) # Delete the claim self.simulate_delete(claim['href'], 'bad_id') self.assertEqual(self.srmock.status, falcon.HTTP_204) self.simulate_delete(claim['href'], self.project_id) self.assertEqual(self.srmock.status, falcon.HTTP_204) # Try to delete a message with an invalid claim ID self.simulate_delete(message_href, self.project_id, query_string=params) self.assertEqual(self.srmock.status, falcon.HTTP_403) # Make sure it wasn't deleted! self.simulate_get(message_href, self.project_id, query_string=params) self.assertEqual(self.srmock.status, falcon.HTTP_200) # Try to get a claim that doesn't exist self.simulate_get(claim['href']) self.assertEqual(self.srmock.status, falcon.HTTP_404) # Try to update a claim that doesn't exist self.simulate_patch(claim['href'], body=doc) self.assertEqual(self.srmock.status, falcon.HTTP_404)
def create(self, queue, metadata, project=None, limit=10): """Creates a claim. This implementation was done in a best-effort fashion. In order to create a claim we need to get a list of messages that can be claimed. Once we have that list we execute a query filtering by the ids returned by the previous query. Since there's a lot of space for race conditions here, we'll check if the number of updated records is equal to the max number of messages to claim. If the number of updated messages is lower than limit we'll try to claim the remaining number of messages. This 2 queries are required because there's no way, as for the time being, to execute an update on a limited number of records. """ msg_ctrl = self.driver.message_controller if not self.driver.queue_controller.exists(queue, project): raise exceptions.QueueDoesNotExist(queue, project) ttl = metadata['ttl'] grace = metadata['grace'] oid = objectid.ObjectId() now = timeutils.utcnow() ttl_delta = datetime.timedelta(seconds=ttl) claim_expires = now + ttl_delta grace_delta = datetime.timedelta(seconds=grace) message_expires = claim_expires + grace_delta message_ttl = ttl + grace meta = { 'id': oid, 't': ttl, 'e': claim_expires, } # Get a list of active, not claimed nor expired # messages that could be claimed. msgs = msg_ctrl.active(queue, fields={'_id': 1}, project=project) msgs = msgs.limit(limit) messages = iter([]) ids = [msg['_id'] for msg in msgs] if len(ids) == 0: return (str(oid), messages) now = timeutils.utcnow() # Set claim field for messages in ids updated = msg_ctrl._col.update( { '_id': { '$in': ids }, '$or': [{ 'c.id': None }, { 'c.id': { '$ne': None }, 'c.e': { '$lte': now } }] }, {'$set': { 'c': meta }}, upsert=False, multi=True)['n'] # NOTE(flaper87): Dirty hack! # This sets the expiration time to # `expires` on messages that would # expire before claim. new_values = {'e': message_expires, 't': message_ttl} msg_ctrl._col.update( { 'q': queue, 'p': project, 'e': { '$lt': message_expires }, 'c.id': oid }, {'$set': new_values}, upsert=False, multi=True) if updated != 0: claim, messages = self.get(queue, oid, project=project) return (str(oid), messages)
def post(self, queue_name, messages, client_uuid, project=None): now = timeutils.utcnow() if not self._queue_controller.exists(queue_name, project): raise exceptions.QueueDoesNotExist(queue_name, project) # Set the next basis marker for the first attempt. next_marker = self._next_marker(queue_name, project) prepared_messages = [{ 't': message['ttl'], 'q': queue_name, 'p': project, 'e': now + datetime.timedelta(seconds=message['ttl']), 'u': client_uuid, 'c': { 'id': None, 'e': now }, 'b': message['body'] if 'body' in message else {}, 'k': next_marker + index, } for index, message in enumerate(messages)] # Results are aggregated across all attempts # NOTE(kgriffs): Using lazy instantiation... aggregated_results = None # Use a retry range for sanity, although we expect # to rarely, if ever, reach the maximum number of # retries. for attempt in self._retry_range: try: ids = self._col.insert(prepared_messages) # NOTE(kgriffs): Only use aggregated results if we must, # which saves some cycles on the happy path. if aggregated_results: aggregated_results.extend(ids) ids = aggregated_results # Log a message if we retried, for debugging perf issues if attempt != 0: message = _(u'%(attempts)d attempt(s) required to post ' u'%(num_messages)d messages to queue ' u'%(queue_name)s and project %(project)s') message %= dict(queue_name=queue_name, attempts=attempt + 1, num_messages=len(ids), project=project) LOG.debug(message) return map(str, ids) except pymongo.errors.DuplicateKeyError as ex: # Try again with the remaining messages # NOTE(kgriffs): This can be used in conjunction with the # log line, above, that is emitted after all messages have # been posted, to guage how long it is taking for messages # to be posted to a given queue, or overall. # # TODO(kgriffs): Add transaction ID to help match up loglines if attempt == 0: message = _(u'First attempt failed while ' u'adding messages to queue %s ' u'for current request') % queue_name LOG.debug(message) # TODO(kgriffs): Record stats of how often retries happen, # and how many attempts, on average, are required to insert # messages. # NOTE(kgriffs): Slice prepared_messages. We have to interpret # the error message to get the duplicate key, which gives # us the marker that had a dupe, allowing us to extrapolate # how many messages were consumed, since markers are monotonic # counters. duplicate_marker = utils.dup_marker_from_error(str(ex)) failed_index = duplicate_marker - next_marker # Put the successful one's IDs into aggregated_results. succeeded_messages = prepared_messages[:failed_index] succeeded_ids = [msg['_id'] for msg in succeeded_messages] # Results are aggregated across all attempts if aggregated_results is None: aggregated_results = succeeded_ids else: aggregated_results.extend(succeeded_ids) # Retry the remaining messages with a new sequence # of markers. prepared_messages = prepared_messages[failed_index:] next_marker = self._next_marker(queue_name, project) for index, message in enumerate(prepared_messages): message['k'] = next_marker + index # Chill out for a moment to mitigate thrashing/thundering self._backoff_sleep(attempt) except Exception as ex: # TODO(kgriffs): Query the DB to get the last marker that # made it, and extrapolate from there to figure out what # needs to be retried. LOG.exception(ex) raise message = _(u'Hit maximum number of attempts (%(max)s) for queue ' u'%(id)s in project %(project)s') message %= dict(max=options.CFG.max_attempts, id=queue_name, project=project) LOG.warning(message) succeeded_ids = map(str, aggregated_results) raise exceptions.MessageConflict(queue_name, project, succeeded_ids)
def test_lifecycle(self): doc = '{"ttl": 100, "grace": 60}' # First, claim some messages body = self.simulate_post(self.claims_path, self.project_id, body=doc) self.assertEquals(self.srmock.status, falcon.HTTP_201) claimed = json.loads(body[0]) claim_href = self.srmock.headers_dict['Location'] message_href, params = claimed[0]['href'].split('?') # No more messages to claim self.simulate_post(self.claims_path, self.project_id, body=doc, query_string='limit=3') self.assertEquals(self.srmock.status, falcon.HTTP_204) # Listing messages, by default, won't include claimed body = self.simulate_get(self.messages_path, self.project_id, headers={'Client-ID': 'foo'}) self.assertEquals(self.srmock.status, falcon.HTTP_204) # Include claimed messages this time body = self.simulate_get(self.messages_path, self.project_id, query_string='include_claimed=true', headers={'Client-ID': 'foo'}) listed = json.loads(body[0]) self.assertEquals(self.srmock.status, falcon.HTTP_200) self.assertEquals(len(listed['messages']), len(claimed)) # Check the claim's metadata body = self.simulate_get(claim_href, self.project_id) claim = json.loads(body[0]) self.assertEquals(self.srmock.status, falcon.HTTP_200) self.assertEquals(self.srmock.headers_dict['Content-Location'], claim_href) self.assertEquals(claim['ttl'], 100) # Try to delete the message without submitting a claim_id self.simulate_delete(message_href, self.project_id) self.assertEquals(self.srmock.status, falcon.HTTP_403) # Delete the message and its associated claim self.simulate_delete(message_href, self.project_id, query_string=params) self.assertEquals(self.srmock.status, falcon.HTTP_204) # Try to get it from the wrong project self.simulate_get(message_href, 'bogus_project', query_string=params) self.assertEquals(self.srmock.status, falcon.HTTP_404) # Get the message self.simulate_get(message_href, self.project_id, query_string=params) self.assertEquals(self.srmock.status, falcon.HTTP_404) # Update the claim new_claim_ttl = '{"ttl": 60}' creation = timeutils.utcnow() self.simulate_patch(claim_href, self.project_id, body=new_claim_ttl) self.assertEquals(self.srmock.status, falcon.HTTP_204) # Get the claimed messages (again) body = self.simulate_get(claim_href, self.project_id) query = timeutils.utcnow() claim = json.loads(body[0]) message_href, params = claim['messages'][0]['href'].split('?') self.assertEquals(claim['ttl'], 60) estimated_age = timeutils.delta_seconds(creation, query) self.assertTrue(estimated_age > claim['age']) # Delete the claim self.simulate_delete(claim['href'], 'bad_id') self.assertEquals(self.srmock.status, falcon.HTTP_204) self.simulate_delete(claim['href'], self.project_id) self.assertEquals(self.srmock.status, falcon.HTTP_204) # Try to delete a message with an invalid claim ID self.simulate_delete(message_href, self.project_id, query_string=params) self.assertEquals(self.srmock.status, falcon.HTTP_403) # Make sure it wasn't deleted! self.simulate_get(message_href, self.project_id, query_string=params) self.assertEquals(self.srmock.status, falcon.HTTP_200) # Try to get a claim that doesn't exist self.simulate_get(claim['href']) self.assertEquals(self.srmock.status, falcon.HTTP_404) # Try to update a claim that doesn't exist self.simulate_patch(claim['href'], body=doc) self.assertEquals(self.srmock.status, falcon.HTTP_404)
def test_queue_lifecycle(self): # Test queue creation created = self.controller.create('test', project=self.project) self.assertTrue(created) # Test queue existence self.assertTrue(self.controller.exists('test', project=self.project)) # Test queue retrieval interaction = self.controller.list(project=self.project) queue = list(next(interaction))[0] self.assertEqual(queue['name'], 'test') # Test queue metadata retrieval metadata = self.controller.get_metadata('test', project=self.project) self.assertEqual(metadata, {}) # Test queue update created = self.controller.set_metadata('test', project=self.project, metadata=dict(meta='test_meta')) metadata = self.controller.get_metadata('test', project=self.project) self.assertEqual(metadata['meta'], 'test_meta') # Touching an existing queue does not affect metadata created = self.controller.create('test', project=self.project) self.assertFalse(created) metadata = self.controller.get_metadata('test', project=self.project) self.assertEqual(metadata['meta'], 'test_meta') client_uuid = uuid.uuid4() # Test queue statistic _insert_fixtures(self.message_controller, 'test', project=self.project, client_uuid=client_uuid, num=6) # NOTE(kgriffs): We can't get around doing this, because # we don't know how the storage drive may be calculating # message timestamps (and may not be monkey-patchable). time.sleep(1.2) _insert_fixtures(self.message_controller, 'test', project=self.project, client_uuid=client_uuid, num=6) stats = self.controller.stats('test', project=self.project) message_stats = stats['messages'] self.assertEqual(message_stats['free'], 12) self.assertEqual(message_stats['claimed'], 0) self.assertEqual(message_stats['total'], 12) oldest = message_stats['oldest'] newest = message_stats['newest'] self.assertNotEqual(oldest, newest) age = oldest['age'] self.assertThat(age, matchers.GreaterThan(0)) # NOTE(kgriffs): Ensure is different enough # for the next comparison to work. soon = timeutils.utcnow() + datetime.timedelta(seconds=60) for message_stat in (oldest, newest): created_iso = message_stat['created'] created = timeutils.parse_isotime(created_iso) self.assertThat(timeutils.normalize_time(created), matchers.LessThan(soon)) self.assertIn('id', message_stat) self.assertThat(oldest['created'], matchers.LessThan(newest['created'])) # Test queue deletion self.controller.delete('test', project=self.project) # Test queue existence self.assertFalse(self.controller.exists('test', project=self.project)) # Test DoesNotExist exception with testing.expect(storage.errors.DoesNotExist): self.controller.get_metadata('test', project=self.project) with testing.expect(storage.errors.DoesNotExist): self.controller.set_metadata('test', '{}', project=self.project)