Ejemplo n.º 1
0
    def test_dup_marker_from_error(self):
        error_message = ('E11000 duplicate key error index: '
                         'marconi.messages.$queue_marker  dup key: '
                         '{ : ObjectId("51adff46b100eb85d8a93a2d"), : 3 }')

        marker = utils.dup_marker_from_error(error_message)
        self.assertEquals(marker, 3)

        error_message = ('E11000 duplicate key error index: '
                         'marconi.messages.$x_y  dup key: '
                         '{ : ObjectId("51adff46b100eb85d8a93a2d"), : 3 }')

        self.assertRaises(exceptions.PatternNotFound,
                          utils.dup_marker_from_error, error_message)

        error_message = ('E11000 duplicate key error index: '
                         'marconi.messages.$queue_marker  dup key: '
                         '{ : ObjectId("51adff46b100eb85d8a93a2d") }')

        self.assertRaises(exceptions.PatternNotFound,
                          utils.dup_marker_from_error, error_message)
Ejemplo n.º 2
0
    def test_dup_marker_from_error(self):
        error_message = ('E11000 duplicate key error index: '
                         'marconi.messages.$queue_marker  dup key: '
                         '{ : "queue", : "project", : 3 }')

        marker = utils.dup_marker_from_error(error_message)
        self.assertEquals(marker, 3)

        error_message = ('E11000 duplicate key error index: '
                         'marconi.messages.$x_y  dup key: '
                         '{ : "queue", : "project", : 3 }')

        self.assertRaises(exceptions.PatternNotFound,
                          utils.dup_marker_from_error, error_message)

        error_message = ('E11000 duplicate key error index: '
                         'marconi.messages.$queue_marker  dup key: '
                         '{ : ObjectId("51adff46b100eb85d8a93a2d") }')

        self.assertRaises(exceptions.PatternNotFound,
                          utils.dup_marker_from_error, error_message)
Ejemplo n.º 3
0
    def post(self, queue, messages, client_uuid, project=None):
        now = timeutils.utcnow()
        queue_id = self._get_queue_id(queue, project)

        # Set the next basis marker for the first attempt.
        next_marker = self._next_marker(queue_id)

        # Results are aggregated across all attempts
        # NOTE(kgriffs): lazy instantiation
        aggregated_results = None

        # NOTE(kgriffs): This avoids iterating over messages twice,
        # since pymongo internally will iterate over them all to
        # encode as bson before submitting to mongod. By using a
        # generator, we can produce each message only once,
        # as needed by pymongo. At the same time, each message is
        # cached in case we need to retry any of them.
        message_gen = (
            {
                't': message['ttl'],
                'q': queue_id,
                'e': now + datetime.timedelta(seconds=message['ttl']),
                'u': client_uuid,
                'c': {'id': None, 'e': now},
                'b': message['body'] if 'body' in message else {},
                'k': next_marker + index,
            }

            for index, message in enumerate(messages)
        )

        prepared_messages, cached_messages = utils.cached_gen(message_gen)

        # Use a retry range for sanity, although we expect
        # to rarely, if ever, reach the maximum number of
        # retries.
        for attempt in self._retry_range:
            try:
                ids = self._col.insert(prepared_messages)

                # NOTE(kgriffs): Only use aggregated results if we must,
                # which saves some cycles on the happy path.
                if aggregated_results:
                    aggregated_results.extend(ids)
                    ids = aggregated_results

                # Log a message if we retried, for debugging perf issues
                if attempt != 0:
                    message = _('%(attempts)d attempt(s) required to post '
                                '%(num_messages)d messages to queue '
                                '%(queue_id)s')
                    message %= dict(queue_id=queue_id, attempts=attempt + 1,
                                    num_messages=len(ids))

                    LOG.debug(message)

                return map(str, ids)

            except pymongo.errors.DuplicateKeyError as ex:
                # Try again with the remaining messages

                # NOTE(kgriffs): This can be used in conjunction with the
                # log line, above, that is emitted after all messages have
                # been posted, to guage how long it is taking for messages
                # to be posted to a given queue, or overall.
                #
                # TODO(kgriffs): Add transaction ID to help match up loglines
                if attempt == 0:
                    message = _('First attempt failed while adding messages '
                                'to queue %s for current request') % queue_id

                    LOG.debug(message)

                # TODO(kgriffs): Record stats of how often retries happen,
                # and how many attempts, on average, are required to insert
                # messages.

                # NOTE(kgriffs): Slice prepared_messages. We have to interpret
                # the error message to get the duplicate key, which gives
                # us the marker that had a dupe, allowing us to extrapolate
                # how many messages were consumed, since markers are monotonic
                # counters.
                duplicate_marker = utils.dup_marker_from_error(str(ex))
                failed_index = duplicate_marker - next_marker

                # First time here, convert the deque to a list
                # to support slicing.
                if isinstance(cached_messages, collections.deque):
                    cached_messages = list(cached_messages)

                # Put the successful one's IDs into aggregated_results.
                succeeded_messages = cached_messages[:failed_index]
                succeeded_ids = [msg['_id'] for msg in succeeded_messages]

                # Results are aggregated across all attempts
                if aggregated_results is None:
                    aggregated_results = succeeded_ids
                else:
                    aggregated_results.extend(succeeded_ids)

                # Retry the remaining messages with a new sequence
                # of markers.
                prepared_messages = cached_messages[failed_index:]
                next_marker = self._next_marker(queue_id)
                for index, message in enumerate(prepared_messages):
                    message['k'] = next_marker + index

                # Chill out to avoid thrashing/thundering
                self._backoff_sleep(attempt)

            except Exception as ex:
                # TODO(kgriffs): Query the DB to get the last marker that
                # made it, and extrapolate from there to figure out what
                # needs to be retried. Definitely retry on AutoReconnect;
                # other types of errors TBD.

                LOG.exception(ex)
                raise

        message = _('Hit maximum number of attempts (%(max)s) for queue '
                    '%(id)s in project %(project)s')
        message %= dict(max=options.CFG.max_attempts, id=queue_id,
                        project=project)

        LOG.warning(message)

        succeeded_ids = map(str, aggregated_results)
        raise exceptions.MessageConflict(queue, project, succeeded_ids)
Ejemplo n.º 4
0
    def post(self, queue_name, messages, client_uuid, project=None):
        now = timeutils.utcnow()

        if not self._queue_controller.exists(queue_name, project):
            raise exceptions.QueueDoesNotExist(queue_name, project)

        # Set the next basis marker for the first attempt.
        next_marker = self._next_marker(queue_name, project)

        prepared_messages = [
            {
                't': message['ttl'],
                'q': queue_name,
                'p': project,
                'e': now + datetime.timedelta(seconds=message['ttl']),
                'u': client_uuid,
                'c': {'id': None, 'e': now},
                'b': message['body'] if 'body' in message else {},
                'k': next_marker + index,
            }

            for index, message in enumerate(messages)
        ]

        # Results are aggregated across all attempts
        # NOTE(kgriffs): Using lazy instantiation...
        aggregated_results = None

        # Use a retry range for sanity, although we expect
        # to rarely, if ever, reach the maximum number of
        # retries.
        for attempt in self._retry_range:
            try:
                ids = self._col.insert(prepared_messages)

                # NOTE(kgriffs): Only use aggregated results if we must,
                # which saves some cycles on the happy path.
                if aggregated_results:
                    aggregated_results.extend(ids)
                    ids = aggregated_results

                # Log a message if we retried, for debugging perf issues
                if attempt != 0:
                    message = _(u'%(attempts)d attempt(s) required to post '
                                u'%(num_messages)d messages to queue '
                                u'%(queue_name)s and project %(project)s')
                    message %= dict(queue_name=queue_name, attempts=attempt+1,
                                    num_messages=len(ids), project=project)

                    LOG.debug(message)

                return map(str, ids)

            except pymongo.errors.DuplicateKeyError as ex:
                # Try again with the remaining messages

                # NOTE(kgriffs): This can be used in conjunction with the
                # log line, above, that is emitted after all messages have
                # been posted, to guage how long it is taking for messages
                # to be posted to a given queue, or overall.
                #
                # TODO(kgriffs): Add transaction ID to help match up loglines
                if attempt == 0:
                    message = _(u'First attempt failed while '
                                u'adding messages to queue %s '
                                u'for current request') % queue_name

                    LOG.debug(message)

                # TODO(kgriffs): Record stats of how often retries happen,
                # and how many attempts, on average, are required to insert
                # messages.

                # NOTE(kgriffs): Slice prepared_messages. We have to interpret
                # the error message to get the duplicate key, which gives
                # us the marker that had a dupe, allowing us to extrapolate
                # how many messages were consumed, since markers are monotonic
                # counters.
                duplicate_marker = utils.dup_marker_from_error(str(ex))
                failed_index = duplicate_marker - next_marker

                # Put the successful one's IDs into aggregated_results.
                succeeded_messages = prepared_messages[:failed_index]
                succeeded_ids = [msg['_id'] for msg in succeeded_messages]

                # Results are aggregated across all attempts
                if aggregated_results is None:
                    aggregated_results = succeeded_ids
                else:
                    aggregated_results.extend(succeeded_ids)

                # Retry the remaining messages with a new sequence
                # of markers.
                prepared_messages = prepared_messages[failed_index:]
                next_marker = self._next_marker(queue_name, project)
                for index, message in enumerate(prepared_messages):
                    message['k'] = next_marker + index

                # Chill out for a moment to mitigate thrashing/thundering
                self._backoff_sleep(attempt)

            except Exception as ex:
                # TODO(kgriffs): Query the DB to get the last marker that
                # made it, and extrapolate from there to figure out what
                # needs to be retried.

                LOG.exception(ex)
                raise

        message = _(u'Hit maximum number of attempts (%(max)s) for queue '
                    u'%(id)s in project %(project)s')
        message %= dict(max=options.CFG.max_attempts, id=queue_name,
                        project=project)

        LOG.warning(message)

        succeeded_ids = map(str, aggregated_results)
        raise exceptions.MessageConflict(queue_name, project, succeeded_ids)
Ejemplo n.º 5
0
    def post(self, queue_name, messages, client_uuid, project=None):
        now = timeutils.utcnow()

        if not self._queue_controller.exists(queue_name, project):
            raise exceptions.QueueDoesNotExist(queue_name, project)

        # Set the next basis marker for the first attempt.
        next_marker = self._next_marker(queue_name, project)

        prepared_messages = [{
            't':
            message['ttl'],
            'q':
            queue_name,
            'p':
            project,
            'e':
            now + datetime.timedelta(seconds=message['ttl']),
            'u':
            client_uuid,
            'c': {
                'id': None,
                'e': now
            },
            'b':
            message['body'] if 'body' in message else {},
            'k':
            next_marker + index,
        } for index, message in enumerate(messages)]

        # Results are aggregated across all attempts
        # NOTE(kgriffs): Using lazy instantiation...
        aggregated_results = None

        # Use a retry range for sanity, although we expect
        # to rarely, if ever, reach the maximum number of
        # retries.
        for attempt in self._retry_range:
            try:
                ids = self._col.insert(prepared_messages)

                # NOTE(kgriffs): Only use aggregated results if we must,
                # which saves some cycles on the happy path.
                if aggregated_results:
                    aggregated_results.extend(ids)
                    ids = aggregated_results

                # Log a message if we retried, for debugging perf issues
                if attempt != 0:
                    message = _(u'%(attempts)d attempt(s) required to post '
                                u'%(num_messages)d messages to queue '
                                u'%(queue_name)s and project %(project)s')
                    message %= dict(queue_name=queue_name,
                                    attempts=attempt + 1,
                                    num_messages=len(ids),
                                    project=project)

                    LOG.debug(message)

                return map(str, ids)

            except pymongo.errors.DuplicateKeyError as ex:
                # Try again with the remaining messages

                # NOTE(kgriffs): This can be used in conjunction with the
                # log line, above, that is emitted after all messages have
                # been posted, to guage how long it is taking for messages
                # to be posted to a given queue, or overall.
                #
                # TODO(kgriffs): Add transaction ID to help match up loglines
                if attempt == 0:
                    message = _(u'First attempt failed while '
                                u'adding messages to queue %s '
                                u'for current request') % queue_name

                    LOG.debug(message)

                # TODO(kgriffs): Record stats of how often retries happen,
                # and how many attempts, on average, are required to insert
                # messages.

                # NOTE(kgriffs): Slice prepared_messages. We have to interpret
                # the error message to get the duplicate key, which gives
                # us the marker that had a dupe, allowing us to extrapolate
                # how many messages were consumed, since markers are monotonic
                # counters.
                duplicate_marker = utils.dup_marker_from_error(str(ex))
                failed_index = duplicate_marker - next_marker

                # Put the successful one's IDs into aggregated_results.
                succeeded_messages = prepared_messages[:failed_index]
                succeeded_ids = [msg['_id'] for msg in succeeded_messages]

                # Results are aggregated across all attempts
                if aggregated_results is None:
                    aggregated_results = succeeded_ids
                else:
                    aggregated_results.extend(succeeded_ids)

                # Retry the remaining messages with a new sequence
                # of markers.
                prepared_messages = prepared_messages[failed_index:]
                next_marker = self._next_marker(queue_name, project)
                for index, message in enumerate(prepared_messages):
                    message['k'] = next_marker + index

                # Chill out for a moment to mitigate thrashing/thundering
                self._backoff_sleep(attempt)

            except Exception as ex:
                # TODO(kgriffs): Query the DB to get the last marker that
                # made it, and extrapolate from there to figure out what
                # needs to be retried.

                LOG.exception(ex)
                raise

        message = _(u'Hit maximum number of attempts (%(max)s) for queue '
                    u'%(id)s in project %(project)s')
        message %= dict(max=options.CFG.max_attempts,
                        id=queue_name,
                        project=project)

        LOG.warning(message)

        succeeded_ids = map(str, aggregated_results)
        raise exceptions.MessageConflict(queue_name, project, succeeded_ids)