Exemplo n.º 1
0
def get_checked_field(document, name, value_type):
    """Validates and retrieves a typed field from a document.

    This function attempts to look up doc[name], and raises
    appropriate HTTP errors if the field is missing or not an
    instance of the given type.

    :param document: dict-like object
    :param name: field name
    :param value_type: expected value type, or '*' to accept any type
    :raises: HTTPBadRequest if the field is missing or not an
        instance of value_type
    :returns: value obtained from doc[name]
    """

    try:
        value = document[name]
    except KeyError:
        description = _(u'Missing "{name}" field.').format(name=name)
        raise errors.HTTPBadRequestBody(description)

    if value_type == '*' or isinstance(value, value_type):
        return value

    description = _(u'The value of the "{name}" field must be a {vtype}.')
    description = description.format(name=name, vtype=value_type.__name__)
    raise errors.HTTPBadRequestBody(description)
Exemplo n.º 2
0
    def claim_creation(self, metadata, limit=None):
        """Restrictions on the claim parameters upon creation.

        :param metadata: The claim metadata
        :param limit: The number of messages to claim
        :raises: ValidationFailed if either TTL or grace is out of range,
            or the expected number of messages exceed the limit.
        """

        self.claim_updating(metadata)

        uplimit = self._limits_conf.max_messages_per_claim
        if limit is not None and not (0 < limit <= uplimit):
            msg = _(u'Limit must be at least 1 and may not '
                    'be greater than {0}.')

            raise ValidationFailed(
                msg, self._limits_conf.max_messages_per_claim)

        grace = metadata['grace']

        if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace):
            msg = _(u'The grace for a claim may not exceed {0} seconds, and '
                    'must be at least {1} seconds long.')

            raise ValidationFailed(
                msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)
Exemplo n.º 3
0
    def on_patch(self, req, resp, project_id, queue_name, claim_id):
        LOG.debug(_(u'Claim Item PATCH - claim: %(claim_id)s, '
                    u'queue: %(queue_name)s, project:%(project_id)s') %
                  {'queue_name': queue_name,
                   'project_id': project_id,
                   'claim_id': claim_id})

        # Read claim metadata (e.g., TTL) and raise appropriate
        # HTTP errors as needed.
        metadata, = wsgi_utils.filter_stream(req.stream, req.content_length,
                                             CLAIM_PATCH_SPEC)

        try:
            self._validate.claim_updating(metadata)
            self.claim_controller.update(queue_name,
                                         claim_id=claim_id,
                                         metadata=metadata,
                                         project=project_id)

            resp.status = falcon.HTTP_204

        except validation.ValidationFailed as ex:
            LOG.debug(ex)
            raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))

        except storage_errors.DoesNotExist as ex:
            LOG.debug(ex)
            raise falcon.HTTPNotFound()

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Claim could not be updated.')
            raise wsgi_errors.HTTPServiceUnavailable(description)
Exemplo n.º 4
0
    def on_delete(self, req, resp, project_id, queue_name, message_id):
        LOG.debug(_(u'Messages item DELETE - message: %(message)s, '
                    u'queue: %(queue)s, project: %(project)s'),
                  {'message': message_id,
                   'queue': queue_name,
                   'project': project_id})
        try:
            self.message_controller.delete(
                queue_name,
                message_id=message_id,
                project=project_id,
                claim=req.get_param('claim_id'))

        except storage_errors.NotPermitted as ex:
            LOG.exception(ex)
            title = _(u'Unable to delete')
            description = _(u'This message is claimed; it cannot be '
                            u'deleted without a valid claim_id.')
            raise falcon.HTTPForbidden(title, description)

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Message could not be deleted.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Alles guete
        resp.status = falcon.HTTP_204
Exemplo n.º 5
0
    def on_get(self, req, resp, project_id, queue_name, message_id):
        LOG.debug(_(u'Messages item GET - message: %(message)s, '
                    u'queue: %(queue)s, project: %(project)s'),
                  {'message': message_id,
                   'queue': queue_name,
                   'project': project_id})
        try:
            message = self.message_controller.get(
                queue_name,
                message_id,
                project=project_id)

        except storage_errors.DoesNotExist as ex:
            LOG.debug(ex)
            raise falcon.HTTPNotFound()

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Message could not be retrieved.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Prepare response
        message['href'] = req.path
        del message['id']

        resp.content_location = req.relative_uri
        resp.body = utils.to_json(message)
Exemplo n.º 6
0
    def __enter__(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug(_('Got file lock "%s"'), self.fname)
                return self
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise
Exemplo n.º 7
0
    def claim_creation(self, metadata, limit=None):
        """Restrictions on the claim parameters upon creation.

        :param metadata: The claim metadata
        :param limit: The number of messages to claim
        :raises: ValidationFailed if either TTL or grace is out of range,
            or the expected number of messages exceed the limit.
        """

        self.claim_updating(metadata)

        uplimit = self._limits_conf.max_messages_per_claim
        if limit is not None and not (0 < limit <= uplimit):
            msg = _(u'Limit must be at least 1 and may not '
                    'be greater than {0}.')

            raise ValidationFailed(msg,
                                   self._limits_conf.max_messages_per_claim)

        grace = metadata['grace']

        if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace):
            msg = _(u'The grace for a claim may not exceed {0} seconds, and '
                    'must be at least {1} seconds long.')

            raise ValidationFailed(msg, self._limits_conf.max_claim_grace,
                                   MIN_CLAIM_GRACE)
Exemplo n.º 8
0
    def wrapper(self, *args, **kwargs):
        # TODO(kgriffs): Figure out a way to not have to rely on the
        # Note(prashanthr_) : Try to reuse this utility. Violates DRY
        # Can pass config parameters into the decorator and create a
        # storage level utility.

        max_attemps = self.driver.redis_conf.max_reconnect_attempts
        sleep_sec = self.driver.redis_conf.reconnect_sleep

        for attempt in range(max_attemps):
            try:
                self.init_connection()
                return func(self, *args, **kwargs)
                break

            except redis.exceptions.ConnectionError as ex:
                LOG.warn(_(u'Caught AutoReconnect, retrying the '
                           'call to {0}').format(func))

                time.sleep(sleep_sec * (2 ** attempt))
        else:
            LOG.error(_(u'Caught AutoReconnect, maximum attempts '
                        'to {0} exceeded.').format(func))

            raise ex
Exemplo n.º 9
0
    def on_get(self, req, resp, project_id, queue_name, message_id):
        LOG.debug(
            _(u'Messages item GET - message: %(message)s, '
              u'queue: %(queue)s, project: %(project)s'), {
                  'message': message_id,
                  'queue': queue_name,
                  'project': project_id
              })
        try:
            message = self.message_controller.get(queue_name,
                                                  message_id,
                                                  project=project_id)

        except storage_errors.DoesNotExist as ex:
            LOG.debug(ex)
            raise falcon.HTTPNotFound()

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Message could not be retrieved.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Prepare response
        message['href'] = req.path
        del message['id']

        resp.content_location = req.relative_uri
        resp.body = utils.to_json(message)
Exemplo n.º 10
0
    def on_patch(self, req, resp, project_id, queue_name, claim_id):
        LOG.debug(
            _(u'Claim Item PATCH - claim: %(claim_id)s, '
              u'queue: %(queue_name)s, project:%(project_id)s') % {
                  'queue_name': queue_name,
                  'project_id': project_id,
                  'claim_id': claim_id
              })

        # Read claim metadata (e.g., TTL) and raise appropriate
        # HTTP errors as needed.
        metadata, = wsgi_utils.filter_stream(req.stream, req.content_length,
                                             CLAIM_PATCH_SPEC)

        try:
            self._validate.claim_updating(metadata)
            self.claim_controller.update(queue_name,
                                         claim_id=claim_id,
                                         metadata=metadata,
                                         project=project_id)

            resp.status = falcon.HTTP_204

        except validation.ValidationFailed as ex:
            LOG.debug(ex)
            raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))

        except storage_errors.DoesNotExist as ex:
            LOG.debug(ex)
            raise falcon.HTTPNotFound()

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Claim could not be updated.')
            raise wsgi_errors.HTTPServiceUnavailable(description)
Exemplo n.º 11
0
def get_checked_field(document, name, value_type):
    """Validates and retrieves a typed field from a document.

    This function attempts to look up doc[name], and raises
    appropriate HTTP errors if the field is missing or not an
    instance of the given type.

    :param document: dict-like object
    :param name: field name
    :param value_type: expected value type, or '*' to accept any type
    :raises: HTTPBadRequest if the field is missing or not an
        instance of value_type
    :returns: value obtained from doc[name]
    """

    try:
        value = document[name]
    except KeyError:
        description = _(u'Missing "{name}" field.').format(name=name)
        raise errors.HTTPBadRequestBody(description)

    if value_type == '*' or isinstance(value, value_type):
        return value

    description = _(u'The value of the "{name}" field must be a {vtype}.')
    description = description.format(name=name, vtype=value_type.__name__)
    raise errors.HTTPBadRequestBody(description)
Exemplo n.º 12
0
    def on_delete(self, req, resp, project_id, queue_name, message_id):
        LOG.debug(
            _(u'Messages item DELETE - message: %(message)s, '
              u'queue: %(queue)s, project: %(project)s'), {
                  'message': message_id,
                  'queue': queue_name,
                  'project': project_id
              })
        try:
            self.message_controller.delete(queue_name,
                                           message_id=message_id,
                                           project=project_id,
                                           claim=req.get_param('claim_id'))

        except storage_errors.NotPermitted as ex:
            LOG.exception(ex)
            title = _(u'Unable to delete')
            description = _(u'This message is claimed; it cannot be '
                            u'deleted without a valid claim_id.')
            raise falcon.HTTPForbidden(title, description)

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Message could not be deleted.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Alles guete
        resp.status = falcon.HTTP_204
Exemplo n.º 13
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     try:
         self.unlock()
         self.lockfile.close()
     except IOError:
         LOG.exception(_("Could not release the acquired lock `%s`"),
                       self.fname)
     LOG.debug(_('Released file lock "%s"'), self.fname)
Exemplo n.º 14
0
        def inner(*args, **kwargs):
            with lock(name, lock_file_prefix, external, lock_path):
                LOG.debug(_('Got semaphore / lock "%(function)s"'),
                          {'function': f.__name__})
                return f(*args, **kwargs)

            LOG.debug(_('Semaphore / lock released "%(function)s"'),
                      {'function': f.__name__})
Exemplo n.º 15
0
        def inner(*args, **kwargs):
            with lock(name, lock_file_prefix, external, lock_path):
                LOG.debug(_('Got semaphore / lock "%(function)s"'),
                          {'function': f.__name__})
                return f(*args, **kwargs)

            LOG.debug(_('Semaphore / lock released "%(function)s"'),
                      {'function': f.__name__})
Exemplo n.º 16
0
class HTTPServiceUnavailable(falcon.HTTPServiceUnavailable):
    """Wraps falcon.HTTPServiceUnavailable with Marconi messaging."""

    TITLE = _(u'Service temporarily unavailable')
    DESCRIPTION = _(u'Please try again in a few seconds.')

    def __init__(self, description, retry_after=30):
        description = description + ' ' + self.DESCRIPTION
        super(HTTPServiceUnavailable, self).__init__(self.TITLE, description,
                                                     retry_after)
Exemplo n.º 17
0
    def on_post(self, req, resp, project_id, queue_name):
        LOG.debug(_(u'Claims collection POST - queue: %(queue)s, '
                    u'project: %(project)s'),
                  {'queue': queue_name, 'project': project_id})

        # Check for an explicit limit on the # of messages to claim
        limit = req.get_param_as_int('limit')
        claim_options = {} if limit is None else {'limit': limit}

        # Place JSON size restriction before parsing
        if req.content_length > self._metadata_max_length:
            description = _(u'Claim metadata size is too large.')
            raise wsgi_errors.HTTPBadRequestBody(description)

        # Read claim metadata (e.g., TTL) and raise appropriate
        # HTTP errors as needed.
        metadata, = wsgi_utils.filter_stream(req.stream, req.content_length,
                                             CLAIM_POST_SPEC)

        # Claim some messages
        try:
            self._validate.claim_creation(metadata, **claim_options)
            cid, msgs = self.claim_controller.create(
                queue_name,
                metadata=metadata,
                project=project_id,
                **claim_options)

            # Buffer claimed messages
            # TODO(kgriffs): optimize, along with serialization (below)
            resp_msgs = list(msgs)

        except validation.ValidationFailed as ex:
            LOG.debug(ex)
            raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Claim could not be created.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Serialize claimed messages, if any. This logic assumes
        # the storage driver returned well-formed messages.
        if len(resp_msgs) != 0:
            for msg in resp_msgs:
                msg['href'] = _msg_uri_from_claim(
                    req.path.rpartition('/')[0], msg['id'], cid)

                del msg['id']

            resp.location = req.path + '/' + cid
            resp.body = utils.to_json(resp_msgs)
            resp.status = falcon.HTTP_201
        else:
            resp.status = falcon.HTTP_204
Exemplo n.º 18
0
    def on_post(self, req, resp, project_id, queue_name):
        LOG.debug(
            _(u'Claims collection POST - queue: %(queue)s, '
              u'project: %(project)s'), {
                  'queue': queue_name,
                  'project': project_id
              })

        # Check for an explicit limit on the # of messages to claim
        limit = req.get_param_as_int('limit')
        claim_options = {} if limit is None else {'limit': limit}

        # Read claim metadata (e.g., TTL) and raise appropriate
        # HTTP errors as needed.
        metadata, = wsgi_utils.filter_stream(req.stream, req.content_length,
                                             CLAIM_POST_SPEC)

        # Claim some messages
        try:
            self._validate.claim_creation(metadata, limit=limit)
            cid, msgs = self.claim_controller.create(queue_name,
                                                     metadata=metadata,
                                                     project=project_id,
                                                     **claim_options)

            # Buffer claimed messages
            # TODO(kgriffs): optimize, along with serialization (below)
            resp_msgs = list(msgs)

        except validation.ValidationFailed as ex:
            LOG.debug(ex)
            raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Claim could not be created.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Serialize claimed messages, if any. This logic assumes
        # the storage driver returned well-formed messages.
        if len(resp_msgs) != 0:
            for msg in resp_msgs:
                msg['href'] = _msg_uri_from_claim(
                    req.path.rpartition('/')[0], msg['id'], cid)

                del msg['id']

            resp.location = req.path + '/' + cid
            resp.body = utils.to_json(resp_msgs)
            resp.status = falcon.HTTP_201
        else:
            resp.status = falcon.HTTP_204
Exemplo n.º 19
0
    def on_delete(self, req, resp, project_id, queue_name):
        LOG.debug(_(u'Queue item DELETE - queue: %(queue)s, '
                    u'project: %(project)s'),
                  {'queue': queue_name, 'project': project_id})
        try:
            self.queue_controller.delete(queue_name, project=project_id)

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Queue could not be deleted.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        resp.status = falcon.HTTP_204
Exemplo n.º 20
0
    def storage(self):
        LOG.debug(_(u'Loading storage driver'))

        if self.conf.sharding:
            LOG.debug(_(u'Storage sharding enabled'))
            storage_driver = sharding.DataDriver(self.conf, self.cache,
                                                 self.control)
        else:
            storage_driver = storage_utils.load_storage_driver(
                self.conf, self.cache)

        LOG.debug(_(u'Loading storage pipeline'))
        return pipeline.DataDriver(self.conf, storage_driver)
Exemplo n.º 21
0
    def storage(self):
        LOG.debug(_(u'Loading storage driver'))

        if self.conf.sharding:
            LOG.debug(_(u'Storage sharding enabled'))
            storage_driver = sharding.DataDriver(self.conf, self.cache,
                                                 self.control)
        else:
            storage_driver = storage_utils.load_storage_driver(
                self.conf, self.cache)

        LOG.debug(_(u'Loading storage pipeline'))
        return pipeline.DataDriver(self.conf, storage_driver)
Exemplo n.º 22
0
    def _get_by_id(self, base_path, project_id, queue_name, ids):
        """Returns one or more messages from the queue by ID."""
        try:
            self._validate.message_listing(limit=len(ids))
            messages = self.message_controller.bulk_get(
                queue_name,
                message_ids=ids,
                project=project_id)

        except validation.ValidationFailed as ex:
            LOG.debug(ex)
            raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Message could not be retrieved.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Prepare response
        messages = list(messages)
        if not messages:
            return None

        base_path += '/'
        for each_message in messages:
            each_message['href'] = base_path + each_message['id']
            del each_message['id']

        return messages
Exemplo n.º 23
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(_("Unable to acquire lock on"
                                                  " `%(filename)s` due to"
                                                  " %(exception)s") %
                                                {
                                                    'filename': self.fname,
                                                    'exception': e,
                                                })
Exemplo n.º 24
0
 def inner_func(*args, **kwargs):
     last_log_time = 0
     last_exc_message = None
     exc_count = 0
     while True:
         try:
             return infunc(*args, **kwargs)
         except Exception as exc:
             this_exc_message = unicode(exc)
             if this_exc_message == last_exc_message:
                 exc_count += 1
             else:
                 exc_count = 1
             # Do not log any more frequently than once a minute unless
             # the exception message changes
             cur_time = int(time.time())
             if (cur_time - last_log_time > 60 or
                     this_exc_message != last_exc_message):
                 logging.exception(
                     _('Unexpected exception occurred %d time(s)... '
                       'retrying.') % exc_count)
                 last_log_time = cur_time
                 last_exc_message = this_exc_message
                 exc_count = 0
             # This should be a very rare event. In case it isn't, do
             # a sleep.
             time.sleep(1)
Exemplo n.º 25
0
def _get_storage_pipeline(resource_name, conf):
    """Constructs and returns a storage resource pipeline.

    This is a helper function for any service supporting
    pipelines for the storage layer. The function returns
    a pipeline based on the `{resource_name}_pipeline`
    config option.

    Stages in the pipeline implement controller methods
    that they want to hook. A stage can halt the
    pipeline immediate by returning a value that is
    not None; otherwise, processing will continue
    to the next stage, ending with the actual storage
    controller.

    :param conf: Configuration instance.
    :type conf: `cfg.ConfigOpts`

    :returns: A pipeline to use.
    :rtype: `Pipeline`
    """
    conf.register_opts(_PIPELINE_CONFIGS, group=_PIPELINE_GROUP)

    storage_conf = conf[_PIPELINE_GROUP]

    pipeline = []
    for ns in storage_conf[resource_name + "_pipeline"]:
        try:
            mgr = driver.DriverManager("marconi.queues.storage.stages", ns, invoke_on_load=True)
            pipeline.append(mgr.driver)
        except RuntimeError as exc:
            LOG.warning(_(u"Stage %(stage)d could not be imported: %(ex)s"), {"stage": ns, "ex": str(exc)})
            continue

    return common.Pipeline(pipeline)
Exemplo n.º 26
0
class HTTPBadRequestBody(falcon.HTTPBadRequest):
    """Wraps falcon.HTTPBadRequest with a contextual title."""

    TITLE = _(u'Invalid request body')

    def __init__(self, description):
        super(HTTPBadRequestBody, self).__init__(self.TITLE, description)
Exemplo n.º 27
0
 def deprecated(self, msg, *args, **kwargs):
     stdmsg = _("Deprecated: %s") % msg
     if CONF.fatal_deprecations:
         self.critical(stdmsg, *args, **kwargs)
         raise DeprecatedConfig(msg=stdmsg)
     else:
         self.warn(stdmsg, *args, **kwargs)
Exemplo n.º 28
0
def bool_from_string(subject, strict=False, default=False):
    """Interpret a string as a boolean.

    A case-insensitive match is performed such that strings matching 't',
    'true', 'on', 'y', 'yes', or '1' are considered True and, when
    `strict=False`, anything else returns the value specified by 'default'.

    Useful for JSON-decoded stuff and config file parsing.

    If `strict=True`, unrecognized values, including None, will raise a
    ValueError which is useful when parsing values passed in from an API call.
    Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
    """
    if not isinstance(subject, six.string_types):
        subject = six.text_type(subject)

    lowered = subject.strip().lower()

    if lowered in TRUE_STRINGS:
        return True
    elif lowered in FALSE_STRINGS:
        return False
    elif strict:
        acceptable = ', '.join(
            "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
        msg = _("Unrecognized value '%(val)s', acceptable values are:"
                " %(acceptable)s") % {'val': subject,
                                      'acceptable': acceptable}
        raise ValueError(msg)
    else:
        return default
Exemplo n.º 29
0
class HTTPDocumentTypeNotSupported(HTTPBadRequestBody):
    """Wraps HTTPBadRequestBody with a standard description."""

    DESCRIPTION = _(u'Document type not supported.')

    def __init__(self):
        super(HTTPDocumentTypeNotSupported, self).__init__(self.DESCRIPTION)
Exemplo n.º 30
0
    def on_get(self, req, resp, project_id, queue_name):
        try:
            resp_dict = self.queue_ctrl.stats(queue_name, project=project_id)

            message_stats = resp_dict['messages']

            if message_stats['total'] != 0:
                base_path = req.path[:req.path.rindex('/')] + '/messages/'

                newest = message_stats['newest']
                newest['href'] = base_path + newest['id']
                del newest['id']

                oldest = message_stats['oldest']
                oldest['href'] = base_path + oldest['id']
                del oldest['id']

            resp.content_location = req.path
            resp.body = utils.to_json(resp_dict)
            # status defaults to 200

        except storage_errors.DoesNotExist as ex:
            LOG.debug(ex)
            raise falcon.HTTPNotFound()

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Queue stats could not be read.')
            raise wsgi_errors.HTTPServiceUnavailable(description)
Exemplo n.º 31
0
    def on_put(self, req, resp, project_id, queue_name):
        LOG.debug(
            u"Queue metadata PUT - queue: %(queue)s, " u"project: %(project)s",
            {"queue": queue_name, "project": project_id},
        )

        try:
            # Place JSON size restriction before parsing
            self._validate.queue_metadata_length(req.content_length)
        except validation.ValidationFailed as ex:
            LOG.debug(ex)
            raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))

        # Deserialize queue metadata
        metadata, = wsgi_utils.filter_stream(req.stream, req.content_length, spec=None)

        try:
            self.queue_ctrl.set_metadata(queue_name, metadata=metadata, project=project_id)

        except validation.ValidationFailed as ex:
            LOG.debug(ex)
            raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))

        except storage_errors.QueueDoesNotExist:
            raise falcon.HTTPNotFound()

        except Exception as ex:
            LOG.exception(ex)
            description = _(u"Metadata could not be updated.")
            raise wsgi_errors.HTTPServiceUnavailable(description)

        resp.status = falcon.HTTP_204
        resp.location = req.path
Exemplo n.º 32
0
    def deprecated(self, msg, *args, **kwargs):
        """Call this method when a deprecated feature is used.

        If the system is configured for fatal deprecations then the message
        is logged at the 'critical' level and :class:`DeprecatedConfig` will
        be raised.

        Otherwise, the message will be logged (once) at the 'warn' level.

        :raises: :class:`DeprecatedConfig` if the system is configured for
                 fatal deprecations.

        """
        stdmsg = _("Deprecated: %s") % msg
        if CONF.fatal_deprecations:
            self.critical(stdmsg, *args, **kwargs)
            raise DeprecatedConfig(msg=stdmsg)

        # Using a list because a tuple with dict can't be stored in a set.
        sent_args = self._deprecated_messages_sent.setdefault(msg, list())

        if args in sent_args:
            # Already logged this message, so don't log it again.
            return

        sent_args.append(args)
        self.warn(stdmsg, *args, **kwargs)
Exemplo n.º 33
0
 def inner_func(*args, **kwargs):
     last_log_time = 0
     last_exc_message = None
     exc_count = 0
     while True:
         try:
             return infunc(*args, **kwargs)
         except Exception as exc:
             this_exc_message = six.u(str(exc))
             if this_exc_message == last_exc_message:
                 exc_count += 1
             else:
                 exc_count = 1
             # Do not log any more frequently than once a minute unless
             # the exception message changes
             cur_time = int(time.time())
             if (cur_time - last_log_time > 60 or
                     this_exc_message != last_exc_message):
                 logging.exception(
                     _('Unexpected exception occurred %d time(s)... '
                       'retrying.') % exc_count)
                 last_log_time = cur_time
                 last_exc_message = this_exc_message
                 exc_count = 0
             # This should be a very rare event. In case it isn't, do
             # a sleep.
             time.sleep(1)
Exemplo n.º 34
0
 def deprecated(self, msg, *args, **kwargs):
     stdmsg = _("Deprecated: %s") % msg
     if CONF.fatal_deprecations:
         self.critical(stdmsg, *args, **kwargs)
         raise DeprecatedConfig(msg=stdmsg)
     else:
         self.warn(stdmsg, *args, **kwargs)
Exemplo n.º 35
0
    def on_put(self, req, resp, project_id, queue_name):
        LOG.debug(_(u'Queue item PUT - queue: %(queue)s, '
                    u'project: %(project)s'),
                  {'queue': queue_name, 'project': project_id})

        try:
            created = self.queue_controller.create(
                queue_name, project=project_id)

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Queue could not be created.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        resp.status = falcon.HTTP_201 if created else falcon.HTTP_204
        resp.location = req.path
Exemplo n.º 36
0
    def on_delete(self, req, resp, project_id, queue_name):
        LOG.debug(
            _(u'Queue item DELETE - queue: %(queue)s, '
              u'project: %(project)s'), {
                  'queue': queue_name,
                  'project': project_id
              })
        try:
            self.queue_controller.delete(queue_name, project=project_id)

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Queue could not be deleted.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        resp.status = falcon.HTTP_204
Exemplo n.º 37
0
    def on_get(self, req, resp, project_id, queue_name):
        try:
            resp_dict = self.queue_ctrl.stats(queue_name,
                                              project=project_id)

            message_stats = resp_dict['messages']

            if message_stats['total'] != 0:
                base_path = req.path[:req.path.rindex('/')] + '/messages/'

                newest = message_stats['newest']
                newest['href'] = base_path + newest['id']
                del newest['id']

                oldest = message_stats['oldest']
                oldest['href'] = base_path + oldest['id']
                del oldest['id']

            resp.content_location = req.path
            resp.body = utils.to_json(resp_dict)
            # status defaults to 200

        except storage_errors.DoesNotExist as ex:
            LOG.debug(ex)
            raise falcon.HTTPNotFound()

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Queue stats could not be read.')
            raise wsgi_errors.HTTPServiceUnavailable(description)
Exemplo n.º 38
0
    def deprecated(self, msg, *args, **kwargs):
        """Call this method when a deprecated feature is used.

        If the system is configured for fatal deprecations then the message
        is logged at the 'critical' level and :class:`DeprecatedConfig` will
        be raised.

        Otherwise, the message will be logged (once) at the 'warn' level.

        :raises: :class:`DeprecatedConfig` if the system is configured for
                 fatal deprecations.

        """
        stdmsg = _("Deprecated: %s") % msg
        if CONF.fatal_deprecations:
            self.critical(stdmsg, *args, **kwargs)
            raise DeprecatedConfig(msg=stdmsg)

        # Using a list because a tuple with dict can't be stored in a set.
        sent_args = self._deprecated_messages_sent.setdefault(msg, list())

        if args in sent_args:
            # Already logged this message, so don't log it again.
            return

        sent_args.append(args)
        self.warn(stdmsg, *args, **kwargs)
Exemplo n.º 39
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     try:
         self.unlock()
         self.lockfile.close()
     except IOError:
         LOG.exception(_("Could not release the acquired lock `%s`"),
                       self.fname)
Exemplo n.º 40
0
    def _get_by_id(self, base_path, project_id, queue_name, ids):
        """Returns one or more messages from the queue by ID."""
        try:
            self._validate.message_listing(limit=len(ids))
            messages = self.message_controller.bulk_get(queue_name,
                                                        message_ids=ids,
                                                        project=project_id)

        except validation.ValidationFailed as ex:
            LOG.debug(ex)
            raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Message could not be retrieved.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Prepare response
        messages = list(messages)
        if not messages:
            return None

        base_path += '/'
        for each_message in messages:
            each_message['href'] = base_path + each_message['id']
            del each_message['id']

        return messages
Exemplo n.º 41
0
    def on_get(self, req, resp, project_id, queue_name, claim_id):
        LOG.debug(
            u"Claim item GET - claim: %(claim_id)s, " u"queue: %(queue_name)s, project: %(project_id)s",
            {"queue_name": queue_name, "project_id": project_id, "claim_id": claim_id},
        )
        try:
            meta, msgs = self.claim_controller.get(queue_name, claim_id=claim_id, project=project_id)

            # Buffer claimed messages
            # TODO(kgriffs): Optimize along with serialization (see below)
            meta["messages"] = list(msgs)

        except storage_errors.DoesNotExist as ex:
            LOG.debug(ex)
            raise falcon.HTTPNotFound()
        except Exception as ex:
            LOG.exception(ex)
            description = _(u"Claim could not be queried.")
            raise wsgi_errors.HTTPServiceUnavailable(description)

        # Serialize claimed messages
        # TODO(kgriffs): Optimize
        for msg in meta["messages"]:
            msg["href"] = _msg_uri_from_claim(req.path.rsplit("/", 2)[0], msg["id"], meta["id"])
            del msg["id"]

        meta["href"] = req.path
        del meta["id"]

        resp.content_location = req.relative_uri
        resp.body = utils.to_json(meta)
Exemplo n.º 42
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     if exc_type is not None:
         logging.error(
             _('Original exception being dropped: %s'),
             traceback.format_exception(self.type_, self.value, self.tb))
         return False
     if self.reraise:
         six.reraise(self.type_, self.value, self.tb)
Exemplo n.º 43
0
    def on_delete(self, req, resp, project_id, queue_name, claim_id):
        LOG.debug(_(u'Claim item DELETE - claim: %(claim_id)s, '
                    u'queue: %(queue_name)s, project: %(project_id)s') %
                  {'queue_name': queue_name,
                   'project_id': project_id,
                   'claim_id': claim_id})
        try:
            self.claim_controller.delete(queue_name,
                                         claim_id=claim_id,
                                         project=project_id)

            resp.status = falcon.HTTP_204

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Claim could not be deleted.')
            raise wsgi_errors.HTTPServiceUnavailable(description)
Exemplo n.º 44
0
 def _wrapper():
     try:
         logging.setup('marconi')
         func()
     except KeyboardInterrupt:
         LOG.info(_(u'Terminating'))
     except Exception as ex:
         _fail(1, ex)
Exemplo n.º 45
0
 def cache(self):
     LOG.debug(_(u'Loading proxy cache driver'))
     try:
         mgr = oslo_cache.get_cache(self.conf)
         return mgr
     except RuntimeError as exc:
         LOG.exception(exc)
         raise errors.InvalidDriver(exc)
Exemplo n.º 46
0
 def _wrapper():
     try:
         logging.setup('marconi')
         func()
     except KeyboardInterrupt:
         LOG.info(_(u'Terminating'))
     except Exception as ex:
         _fail(1, ex)
Exemplo n.º 47
0
    def _inc_counter(self, name, project=None, amount=1, window=None):
        """Increments the message counter and returns the new value.

        :param name: Name of the queue to which the counter is scoped
        :param project: Queue's project name
        :param amount: (Default 1) Amount by which to increment the counter
        :param window: (Default None) A time window, in seconds, that
            must have elapsed since the counter was last updated, in
            order to increment the counter.

        :returns: Updated message counter value, or None if window
            was specified, and the counter has already been updated
            within the specified time period.

        :raises: storage.errors.QueueDoesNotExist
        """
        now = timeutils.utcnow_ts()

        update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}}
        query = _get_scoped_query(name, project)
        if window is not None:
            threshold = now - window
            query['c.t'] = {'$lt': threshold}

        while True:
            try:
                doc = self._collection.find_and_modify(query,
                                                       update,
                                                       new=True,
                                                       fields={
                                                           'c.v': 1,
                                                           '_id': 0
                                                       })

                break
            except pymongo.errors.AutoReconnect as ex:
                LOG.exception(ex)

        if doc is None:
            if window is None:
                # NOTE(kgriffs): Since we did not filter by a time window,
                # the queue should have been found and updated. Perhaps
                # the queue has been deleted?
                message = _(u'Failed to increment the message '
                            u'counter for queue %(name)s and '
                            u'project %(project)s')
                message %= dict(name=name, project=project)

                LOG.warning(message)

                raise errors.QueueDoesNotExist(name, project)

            # NOTE(kgriffs): Assume the queue existed, but the counter
            # was recently updated, causing the range query on 'c.t' to
            # exclude the record.
            return None

        return doc['c']['v']
Exemplo n.º 48
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     if exc_type is not None:
         logging.error(_('Original exception being dropped: %s'),
                       traceback.format_exception(self.type_,
                                                  self.value,
                                                  self.tb))
         return False
     if self.reraise:
         six.reraise(self.type_, self.value, self.tb)
Exemplo n.º 49
0
 def cache(self):
     LOG.debug(_(u'Loading proxy cache driver'))
     try:
         oslo_cache.register_oslo_configs(self.conf)
         mgr = oslo_cache.get_cache(self.conf.cache_url)
         return mgr
     except RuntimeError as exc:
         LOG.exception(exc)
         raise errors.InvalidDriver(exc)
Exemplo n.º 50
0
def string_to_bytes(text, unit_system='IEC', return_int=False):
    """Converts a string into an float representation of bytes.

    The units supported for IEC ::

        Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
        KB, KiB, MB, MiB, GB, GiB, TB, TiB

    The units supported for SI ::

        kb(it), Mb(it), Gb(it), Tb(it)
        kB, MB, GB, TB

    Note that the SI unit system does not support capital letter 'K'

    :param text: String input for bytes size conversion.
    :param unit_system: Unit system for byte size conversion.
    :param return_int: If True, returns integer representation of text
                       in bytes. (default: decimal)
    :returns: Numerical representation of text in bytes.
    :raises ValueError: If text has an invalid value.

    """
    try:
        base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
    except KeyError:
        msg = _('Invalid unit system: "%s"') % unit_system
        raise ValueError(msg)
    match = reg_ex.match(text)
    if match:
        magnitude = float(match.group(1))
        unit_prefix = match.group(2)
        if match.group(3) in ['b', 'bit']:
            magnitude /= 8
    else:
        msg = _('Invalid string format: %s') % text
        raise ValueError(msg)
    if not unit_prefix:
        res = magnitude
    else:
        res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
    if return_int:
        return int(math.ceil(res))
    return res
Exemplo n.º 51
0
    def message_length(self, content_length):
        """Restrictions on message post length.

        :param content_length: Queue request's length.
        :raises: ValidationFailed if the metadata is oversize.
        """
        if content_length > self._limits_conf.max_message_size:
            raise ValidationFailed(
                _(u'Message collection size is too large. Max size {0}'),
                self._limits_conf.max_message_size)
Exemplo n.º 52
0
    def queue_metadata_length(self, content_length):
        """Restrictions on queue's length.

        :param content_length: Queue request's length.
        :raises: ValidationFailed if the metadata is oversize.
        """

        if content_length > self._limits_conf.max_queue_metadata:
            msg = _(u'Queue metadata is too large. Max size: {0}')
            raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
Exemplo n.º 53
0
    def message_length(self, content_length):
        """Restrictions on message post length.

        :param content_length: Queue request's length.
        :raises: ValidationFailed if the metadata is oversize.
        """
        if content_length > self._limits_conf.max_message_size:
            raise ValidationFailed(
                _(u'Message collection size is too large. Max size {0}'),
                self._limits_conf.max_message_size)
Exemplo n.º 54
0
    def on_put(self, req, resp, project_id, queue_name):
        LOG.debug(
            _(u'Queue item PUT - queue: %(queue)s, '
              u'project: %(project)s'), {
                  'queue': queue_name,
                  'project': project_id
              })

        try:
            created = self.queue_controller.create(queue_name,
                                                   project=project_id)

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Queue could not be created.')
            raise wsgi_errors.HTTPServiceUnavailable(description)

        resp.status = falcon.HTTP_201 if created else falcon.HTTP_204
        resp.location = req.path
Exemplo n.º 55
0
        def consumer(*args, **kwargs):
            """Consumes the pipeline for `method`

            This function walks through the pipeline and calls
            `method` for each of the items in the pipeline. A
            warning will be logged for each stage not implementing
            `method` and an Attribute error will be raised if
            none of the stages do.

            :param args: Positional arguments to pass to the call.
            :param kwargs: Keyword arguments to pass to the call.

            :raises: AttributeError if none of the stages implement `method`
            """
            # NOTE(flaper87): Used as a way to verify
            # the requested method exists in at least
            # one of the stages, otherwise AttributeError
            # will be raised.
            target = None

            for stage in self._pipeline:
                try:
                    target = getattr(stage, method)
                except AttributeError:
                    sstage = six.text_type(stage)
                    msgtmpl = _(u"Stage %(stage)s does not "
                                "implement %(method)s")
                    LOG.warning(msgtmpl, {'stage': sstage, 'method': method})
                    continue

                result = target(*args, **kwargs)

                # NOTE(flaper87): Will keep going forward
                # through the stageline unless the call returns
                # something.
                if result is not None:
                    return result

            if target is None:
                msg = _(u'Method %s not found in any of '
                        'the registered stages') % method
                LOG.error(msg)
                raise AttributeError(msg)
Exemplo n.º 56
0
    def on_delete(self, req, resp, project_id, queue_name, claim_id):
        LOG.debug(
            _(u'Claim item DELETE - claim: %(claim_id)s, '
              u'queue: %(queue_name)s, project: %(project_id)s') % {
                  'queue_name': queue_name,
                  'project_id': project_id,
                  'claim_id': claim_id
              })
        try:
            self.claim_controller.delete(queue_name,
                                         claim_id=claim_id,
                                         project=project_id)

            resp.status = falcon.HTTP_204

        except Exception as ex:
            LOG.exception(ex)
            description = _(u'Claim could not be deleted.')
            raise wsgi_errors.HTTPServiceUnavailable(description)
Exemplo n.º 57
0
        def consumer(*args, **kwargs):
            """Consumes the pipeline for `method`

            This function walks through the pipeline and calls
            `method` for each of the items in the pipeline. A
            warning will be logged for each stage not implementing
            `method` and an Attribute error will be raised if
            none of the stages do.

            :param args: Positional arguments to pass to the call.
            :param kwargs: Keyword arguments to pass to the call.

            :raises: AttributeError if none of the stages implement `method`
            """
            # NOTE(flaper87): Used as a way to verify
            # the requested method exists in at least
            # one of the stages, otherwise AttributeError
            # will be raised.
            target = None

            for stage in self._pipeline:
                try:
                    target = getattr(stage, method)
                except AttributeError:
                    sstage = six.text_type(stage)
                    msgtmpl = _(u"Stage %(stage)s does not "
                                "implement %(method)s")
                    LOG.warning(msgtmpl, {'stage': sstage, 'method': method})
                    continue

                result = target(*args, **kwargs)

                # NOTE(flaper87): Will keep going forward
                # through the stageline unless the call returns
                # something.
                if result is not None:
                    return result

            if target is None:
                msg = _(u'Method %s not found in any of '
                        'the registered stages') % method
                LOG.error(msg)
                raise AttributeError(msg)