def stats(self, name, project=None): if not self.exists(name, project=project): raise exceptions.QueueDoesNotExist(name, project) controller = self.driver.message_controller active = controller.active(name, project=project).count() total = controller.count(name, project=project) message_stats = { 'claimed': total - active, 'free': active, 'total': total, } try: oldest = controller.first(name, project=project, sort=1) newest = controller.first(name, project=project, sort=-1) except exceptions.QueueIsEmpty: pass else: now = timeutils.utcnow() message_stats['oldest'] = utils.stat_message(oldest, now) message_stats['newest'] = utils.stat_message(newest, now) return {'messages': message_stats}
def set_metadata(self, name, metadata, project=None): rst = self._col.update({'p': project, 'n': name}, {'$set': {'m': metadata}}, multi=False, manipulate=False) if not rst['updatedExisting']: raise exceptions.QueueDoesNotExist(name, project)
def get_qid(driver, queue, project): try: return driver.get( ''' select id from Queues where project = ? and name = ?''', project, queue)[0] except NoResult: raise exceptions.QueueDoesNotExist(queue, project)
def get_metadata(self, name, project): if project is None: project = '' try: return self.driver.get( ''' select metadata from Queues where project = ? and name = ?''', project, name)[0] except utils.NoResult: raise exceptions.QueueDoesNotExist(name, project)
def set_metadata(self, name, metadata, project): if project is None: project = '' self.driver.run( ''' update Queues set metadata = ? where project = ? and name = ? ''', self.driver.pack(metadata), project, name) if not self.driver.affected: raise exceptions.QueueDoesNotExist(name, project)
def _get(self, name, project=None, fields={'m': 1, '_id': 0}): queue = self._col.find_one({'p': project, 'n': name}, fields=fields) if queue is None: raise exceptions.QueueDoesNotExist(name, project) return queue
def create(self, queue, metadata, project=None, limit=10): """Creates a claim. This implementation was done in a best-effort fashion. In order to create a claim we need to get a list of messages that can be claimed. Once we have that list we execute a query filtering by the ids returned by the previous query. Since there's a lot of space for race conditions here, we'll check if the number of updated records is equal to the max number of messages to claim. If the number of updated messages is lower than limit we'll try to claim the remaining number of messages. This 2 queries are required because there's no way, as for the time being, to execute an update on a limited number of records. """ msg_ctrl = self.driver.message_controller if not self.driver.queue_controller.exists(queue, project): raise exceptions.QueueDoesNotExist(queue, project) ttl = metadata['ttl'] grace = metadata['grace'] oid = objectid.ObjectId() now = timeutils.utcnow() ttl_delta = datetime.timedelta(seconds=ttl) claim_expires = now + ttl_delta grace_delta = datetime.timedelta(seconds=grace) message_expires = claim_expires + grace_delta message_ttl = ttl + grace meta = { 'id': oid, 't': ttl, 'e': claim_expires, } # Get a list of active, not claimed nor expired # messages that could be claimed. msgs = msg_ctrl.active(queue, fields={'_id': 1}, project=project) msgs = msgs.limit(limit) messages = iter([]) ids = [msg['_id'] for msg in msgs] if len(ids) == 0: return (str(oid), messages) now = timeutils.utcnow() # Set claim field for messages in ids updated = msg_ctrl._col.update( { '_id': { '$in': ids }, '$or': [{ 'c.id': None }, { 'c.id': { '$ne': None }, 'c.e': { '$lte': now } }] }, {'$set': { 'c': meta }}, upsert=False, multi=True)['n'] # NOTE(flaper87): Dirty hack! # This sets the expiration time to # `expires` on messages that would # expire before claim. new_values = {'e': message_expires, 't': message_ttl} msg_ctrl._col.update( { 'q': queue, 'p': project, 'e': { '$lt': message_expires }, 'c.id': oid }, {'$set': new_values}, upsert=False, multi=True) if updated != 0: claim, messages = self.get(queue, oid, project=project) return (str(oid), messages)
def post(self, queue_name, messages, client_uuid, project=None): now = timeutils.utcnow() if not self._queue_controller.exists(queue_name, project): raise exceptions.QueueDoesNotExist(queue_name, project) # Set the next basis marker for the first attempt. next_marker = self._next_marker(queue_name, project) prepared_messages = [{ 't': message['ttl'], 'q': queue_name, 'p': project, 'e': now + datetime.timedelta(seconds=message['ttl']), 'u': client_uuid, 'c': { 'id': None, 'e': now }, 'b': message['body'] if 'body' in message else {}, 'k': next_marker + index, } for index, message in enumerate(messages)] # Results are aggregated across all attempts # NOTE(kgriffs): Using lazy instantiation... aggregated_results = None # Use a retry range for sanity, although we expect # to rarely, if ever, reach the maximum number of # retries. for attempt in self._retry_range: try: ids = self._col.insert(prepared_messages) # NOTE(kgriffs): Only use aggregated results if we must, # which saves some cycles on the happy path. if aggregated_results: aggregated_results.extend(ids) ids = aggregated_results # Log a message if we retried, for debugging perf issues if attempt != 0: message = _(u'%(attempts)d attempt(s) required to post ' u'%(num_messages)d messages to queue ' u'%(queue_name)s and project %(project)s') message %= dict(queue_name=queue_name, attempts=attempt + 1, num_messages=len(ids), project=project) LOG.debug(message) return map(str, ids) except pymongo.errors.DuplicateKeyError as ex: # Try again with the remaining messages # NOTE(kgriffs): This can be used in conjunction with the # log line, above, that is emitted after all messages have # been posted, to guage how long it is taking for messages # to be posted to a given queue, or overall. # # TODO(kgriffs): Add transaction ID to help match up loglines if attempt == 0: message = _(u'First attempt failed while ' u'adding messages to queue %s ' u'for current request') % queue_name LOG.debug(message) # TODO(kgriffs): Record stats of how often retries happen, # and how many attempts, on average, are required to insert # messages. # NOTE(kgriffs): Slice prepared_messages. We have to interpret # the error message to get the duplicate key, which gives # us the marker that had a dupe, allowing us to extrapolate # how many messages were consumed, since markers are monotonic # counters. duplicate_marker = utils.dup_marker_from_error(str(ex)) failed_index = duplicate_marker - next_marker # Put the successful one's IDs into aggregated_results. succeeded_messages = prepared_messages[:failed_index] succeeded_ids = [msg['_id'] for msg in succeeded_messages] # Results are aggregated across all attempts if aggregated_results is None: aggregated_results = succeeded_ids else: aggregated_results.extend(succeeded_ids) # Retry the remaining messages with a new sequence # of markers. prepared_messages = prepared_messages[failed_index:] next_marker = self._next_marker(queue_name, project) for index, message in enumerate(prepared_messages): message['k'] = next_marker + index # Chill out for a moment to mitigate thrashing/thundering self._backoff_sleep(attempt) except Exception as ex: # TODO(kgriffs): Query the DB to get the last marker that # made it, and extrapolate from there to figure out what # needs to be retried. LOG.exception(ex) raise message = _(u'Hit maximum number of attempts (%(max)s) for queue ' u'%(id)s in project %(project)s') message %= dict(max=options.CFG.max_attempts, id=queue_name, project=project) LOG.warning(message) succeeded_ids = map(str, aggregated_results) raise exceptions.MessageConflict(queue_name, project, succeeded_ids)