Ejemplo n.º 1
0
def main():
    CONF.register_cli_opt(category_opt)

    try:
        utils.read_config('designate', sys.argv)
        logging.setup(CONF, 'designate')
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
            except Exception:
                print(_('sudo failed, continuing as if nothing happened'))

        print(_('Please re-run designate-manage as root.'))
        sys.exit(2)

    utils.setup_gmr(log_dir=cfg.CONF.log_dir)

    hookpoints.log_hook_setup()

    fn = CONF.category.action_fn

    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Ejemplo n.º 2
0
def check(rule, ctxt, target=None, do_raise=True, exc=exceptions.Forbidden):
    creds = ctxt.to_dict()
    target = target or {}
    try:
        result = _ENFORCER.enforce(rule, target, creds, do_raise, exc)
    except Exception:
        result = False
        raise
    else:
        return result
    finally:
        extra = {'policy': {'rule': rule, 'target': target}}

        if result:
            LOG.info(_("Policy check succeeded for rule '%(rule)s' "
                       "on target %(target)s") % {
                           'rule': rule,
                           'target': repr(target)
                       },
                     extra=extra)
        else:
            LOG.info(_("Policy check failed for rule '%(rule)s' "
                       "on target %(target)s") % {
                           'rule': rule,
                           'target': repr(target)
                       },
                     extra=extra)
Ejemplo n.º 3
0
def check(rule, ctxt, target=None, do_raise=True, exc=exceptions.Forbidden):
    creds = ctxt.to_dict()
    target = target or {}
    try:
        result = _ENFORCER.enforce(rule, target, creds, do_raise, exc)
    except Exception:
        result = False
        raise
    else:
        return result
    finally:
        extra = {"policy": {"rule": rule, "target": target}}

        if result:
            LOG.info(
                _("Policy check succeeded for rule '%(rule)s' " "on target %(target)s")
                % {"rule": rule, "target": repr(target)},
                extra=extra,
            )
        else:
            LOG.info(
                _("Policy check failed for rule '%(rule)s' " "on target %(target)s")
                % {"rule": rule, "target": repr(target)},
                extra=extra,
            )
Ejemplo n.º 4
0
def main():
    CONF.register_cli_opt(category_opt)

    try:
        utils.read_config('designate', sys.argv)
        logging.setup(CONF, 'designate')
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
            except Exception:
                print(_('sudo failed, continuing as if nothing happened'))

        print(_('Please re-run designate-manage as root.'))
        sys.exit(2)

    utils.setup_gmr(log_dir=cfg.CONF.log_dir)

    hookpoints.log_hook_setup()

    fn = CONF.category.action_fn

    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Ejemplo n.º 5
0
def verify_project_id(context, project_id):
    """verify that a project_id exists.

    This attempts to verify that a project id exists. If it does not,
    an HTTPBadRequest is emitted.

    """
    session = ksa_loading.load_session_from_conf_options(
        CONF, 'keystone', auth=context.get_auth_plugin())
    adap = ksa_loading.load_adapter_from_conf_options(CONF,
                                                      'keystone',
                                                      session=session,
                                                      min_version=(3, 0),
                                                      max_version=(3,
                                                                   'latest'))
    try:
        resp = adap.get('/projects/%s' % project_id, raise_exc=False)
    except kse.EndpointNotFound:
        LOG.error(
            "Keystone identity service version 3.0 was not found. This might "
            "be because your endpoint points to the v2.0 versioned endpoint "
            "which is not supported. Please fix this.")
        raise exceptions.KeystoneCommunicationFailure(
            _("KeystoneV3 endpoint not found"))
    except kse.ClientException:
        # something is wrong, like there isn't a keystone v3 endpoint,
        # or nova isn't configured for the interface to talk to it;
        # we'll take the pass and default to everything being ok.
        LOG.info("Unable to contact keystone to verify project_id")
        return True

    if resp:
        # All is good with this 20x status
        return True
    elif resp.status_code == 404:
        # we got access, and we know this project is not there
        raise exceptions.InvalidProject(
            _("%s is not a valid project ID.") % project_id)

    elif resp.status_code == 403:
        # we don't have enough permission to verify this, so default
        # to "it's ok".
        LOG.info(
            "Insufficient permissions for user %(user)s to verify "
            "existence of project_id %(pid)s", {
                "user": context.user_id,
                "pid": project_id
            })
        return True
    else:
        LOG.warning(
            "Unexpected response from keystone trying to "
            "verify project_id %(pid)s - resp: %(code)s %(content)s", {
                "pid": project_id,
                "code": resp.status_code,
                "content": resp.content
            })
        # realize we did something wrong, but move on with a warning
        return True
Ejemplo n.º 6
0
def verify_project_id(context, project_id):
    """verify that a project_id exists.

    This attempts to verify that a project id exists. If it does not,
    an HTTPBadRequest is emitted.

    """
    session = ksa_loading.load_session_from_conf_options(
        CONF, 'keystone', auth=context.get_auth_plugin())
    adap = ksa_loading.load_adapter_from_conf_options(
        CONF, 'keystone',
        session=session, min_version=(3, 0), max_version=(3, 'latest'))
    try:
        resp = adap.get('/projects/%s' % project_id, raise_exc=False)
    except kse.EndpointNotFound:
        LOG.error(
            "Keystone identity service version 3.0 was not found. This might "
            "be because your endpoint points to the v2.0 versioned endpoint "
            "which is not supported. Please fix this.")
        raise exceptions.KeystoneCommunicationFailure(
            _("KeystoneV3 endpoint not found"))
    except kse.ClientException:
        # something is wrong, like there isn't a keystone v3 endpoint,
        # or nova isn't configured for the interface to talk to it;
        # we'll take the pass and default to everything being ok.
        LOG.info("Unable to contact keystone to verify project_id")
        return True

    if resp:
        # All is good with this 20x status
        return True
    elif resp.status_code == 404:
        # we got access, and we know this project is not there
        raise exceptions.InvalidProject(
            _("%s is not a valid project ID.") % project_id)

    elif resp.status_code == 403:
        # we don't have enough permission to verify this, so default
        # to "it's ok".
        LOG.info(
            "Insufficient permissions for user %(user)s to verify "
            "existence of project_id %(pid)s",
            {"user": context.user_id, "pid": project_id})
        return True
    else:
        LOG.warning(
            "Unexpected response from keystone trying to "
            "verify project_id %(pid)s - resp: %(code)s %(content)s",
            {"pid": project_id,
             "code": resp.status_code,
             "content": resp.content})
        # realize we did something wrong, but move on with a warning
        return True
Ejemplo n.º 7
0
 def _get_config(self, pool_id, target_id):
     pool = pool_object.Pool.from_config(cfg.CONF, pool_id)
     target = None
     for t in pool.targets:
         if t.id == target_id:
             target = t
         else:
             msg = _("Failed to find target with ID %s")
             raise exceptions.ConfigurationError(msg % target_id)
     if target is None:
         msg = _("Found multiple targets with ID %s")
         raise exceptions.ConfigurationError(msg % target_id)
     return pool, target
Ejemplo n.º 8
0
def sort_query(query, table, sort_keys, sort_dir=None, sort_dirs=None):

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))

    assert(not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert(len(sort_dirs) == len(sort_keys))

    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        try:
            sort_dir_func = {
                'asc': sqlalchemy.asc,
                'desc': sqlalchemy.desc,
            }[current_sort_dir]
        except KeyError:
            raise ValueError(_("Unknown sort direction, "
                               "must be 'desc' or 'asc'"))
        try:
            sort_key_attr = getattr(table.c, current_sort_key)
        except AttributeError:
            raise utils.InvalidSortKey()
        query = query.order_by(sort_dir_func(sort_key_attr))

    return query, sort_dirs
Ejemplo n.º 9
0
def serve(server, workers=None):
    global _launcher
    if _launcher:
        raise RuntimeError(_('serve() can only be called once'))

    _launcher = service.launch(CONF, server, workers=workers,
                               restart_method='mutate')
Ejemplo n.º 10
0
    def __init__(self, *args, **kwargs):
        super(RPCService, self).__init__(*args, **kwargs)

        LOG.debug(_("Creating RPC Server on topic '%s'") % self._rpc_topic)
        self._rpc_server = rpc.get_server(
            messaging.Target(topic=self._rpc_topic, server=self._host),
            self._rpc_endpoints)
Ejemplo n.º 11
0
def serve(server, workers=None):
    global _launcher
    if _launcher:
        raise RuntimeError(_('serve() can only be called once'))

    _launcher = service.launch(CONF, server, workers=workers,
                               restart_method='mutate')
Ejemplo n.º 12
0
    def __init__(self, *args, **kwargs):
        super(RPCService, self).__init__(*args, **kwargs)

        LOG.debug(_("Creating RPC Server on topic '%s'") % self._rpc_topic)
        self._rpc_server = rpc.get_server(
            messaging.Target(topic=self._rpc_topic, server=self._host),
            self._rpc_endpoints)
Ejemplo n.º 13
0
    def start(self):
        super(Service, self).start()

        LOG.info(
            _('Starting %(name)s service (version: %(version)s)') % {
                'name': self.service_name,
                'version': version.version_info.version_string()
            })
Ejemplo n.º 14
0
    def start(self):
        super(RPCService, self).start()

        version_string = version.version_info.version_string()
        LOG.info(_('Starting %(topic)s node (version %(version_string)s)') %
                 {'topic': self.topic, 'version_string': version_string})

        LOG.debug(_("Creating RPC server on topic '%s'") % self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)
        self.rpcserver = rpc.get_server(target, self.endpoints)
        self.rpcserver.start()

        self.notifier = rpc.get_notifier(self.service_name)

        for e in self.endpoints:
            if e != self and hasattr(e, 'start'):
                e.start()
Ejemplo n.º 15
0
    def _from_xml(self, datastring):
        plurals = set(self.metadata.get('plurals', {}))

        try:
            node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0]
            return {node.nodeName: self._from_xml_node(node, plurals)}
        except expat.ExpatError:
            msg = _("cannot understand XML")
            raise exception.MalformedRequestBody(reason=msg)
Ejemplo n.º 16
0
    def __call__(self, request):
        """WSGI method that controls (de)serialization and method dispatch."""

        try:
            action, action_args, accept = self.deserialize_request(request)
        except exception.InvalidContentType:
            msg = _("Unsupported Content-Type")
            return webob.exc.HTTPUnsupportedMediaType(explanation=msg)
        except exception.MalformedRequestBody:
            msg = _("Malformed request body")
            return webob.exc.HTTPBadRequest(explanation=msg)

        action_result = self.execute_action(action, request, **action_args)
        try:
            return self.serialize_response(action, action_result, accept)
        # return unserializable result (typically a webob exc)
        except Exception:
            return action_result
Ejemplo n.º 17
0
 def _wrap(*args, **kw):
     try:
         return f(*args, **kw)
     except Exception, e:
         if not isinstance(e, Error):
             # exc_type, exc_value, exc_traceback = sys.exc_info()
             logging.exception(_('Uncaught exception'))
             # logging.error(traceback.extract_stack(exc_traceback))
             raise Error(str(e))
         raise
Ejemplo n.º 18
0
    def _get_paging_params(self, params):
        """
        Extract any paging parameters
        """
        marker = params.pop('marker', None)
        limit = params.pop('limit', None)
        sort_key = params.pop('sort_key', None)
        sort_dir = params.pop('sort_dir', None)

        # Negative and zero limits are not caught in storage.
        # With a number bigger than MAXSIZE, rpc throws an 'OverflowError long
        #  too big to convert'.
        # So the parameter 'limit' is checked here.
        if limit:
            try:
                invalid_limit_message = _(
                    str.format('limit should be an integer between 1 and {0}',
                               six.MAXSIZE))
                int_limit = int(limit)
                if int_limit <= 0 or int_limit > six.MAXSIZE:
                    raise exceptions.InvalidLimit(invalid_limit_message)
            # This exception is raised for non ints when int(limit) is called
            except ValueError:
                raise exceptions.InvalidLimit(invalid_limit_message)

        # sort_dir is checked in paginate_query.
        # We duplicate the sort_dir check here to throw a more specific
        # exception than ValueError.
        if sort_dir and sort_dir not in ['asc', 'desc']:
            raise exceptions.InvalidSortDir(
                _("Unknown sort direction, "
                  "must be 'desc' or 'asc'"))

        if sort_key and sort_key not in self.SORT_KEYS:
            raise exceptions.InvalidSortKey(
                _(
                    str.format('sort key must be one of {0}',
                               str(self.SORT_KEYS))))

        return marker, limit, sort_key, sort_dir
Ejemplo n.º 19
0
    def start(self):
        super(RPCService, self).start()

        LOG.debug(_("Starting RPC server on topic '%s'") % self._rpc_topic)
        self._rpc_server.start()

        # TODO(kiall): This probably belongs somewhere else, maybe the base
        #              Service class?
        self.notifier = rpc.get_notifier(self.service_name)

        for e in self._rpc_endpoints:
            if e != self and hasattr(e, 'start'):
                e.start()
Ejemplo n.º 20
0
    def deserialize_body(self, request, action):
        if not len(request.body) > 0:
            LOG.debug(_("Empty body provided in request"))
            return {}

        try:
            content_type = request.get_content_type()
        except exception.InvalidContentType:
            LOG.debug(_("Unrecognized Content-Type provided in request"))
            raise

        if content_type is None:
            LOG.debug(_("No Content-Type provided in request"))
            return {}

        try:
            deserializer = self.get_body_deserializer(content_type)
        except exception.InvalidContentType:
            LOG.debug(_("Unable to deserialize body as provided Content-Type"))
            raise

        return deserializer.deserialize(request.body, action)
Ejemplo n.º 21
0
    def start(self):
        super(RPCService, self).start()

        LOG.debug(_("Starting RPC server on topic '%s'") % self._rpc_topic)
        self._rpc_server.start()

        # TODO(kiall): This probably belongs somewhere else, maybe the base
        #              Service class?
        self.notifier = rpc.get_notifier(self.service_name)

        for e in self._rpc_endpoints:
            if e != self and hasattr(e, 'start'):
                e.start()
Ejemplo n.º 22
0
def get_paging_params(context, params, sort_keys):
    """
    Extract any paging parameters
    """
    marker = params.pop('marker', None)
    limit = params.pop('limit', cfg.CONF['service:api'].default_limit_v2)
    sort_key = params.pop('sort_key', None)
    sort_dir = params.pop('sort_dir', None)
    max_limit = cfg.CONF['service:api'].max_limit_v2

    if isinstance(limit, six.string_types) and limit.lower() == "max":
        # Support for retrieving the max results at once. If set to "max",
        # the configured max limit will be used.
        limit = max_limit

    elif limit:
        # Negative and zero limits are not caught in storage.
        # With a number bigger than MAXSIZE, rpc throws an 'OverflowError long
        # too big to convert'.
        # So the parameter 'limit' is checked here.
        invalid_limit_message = ('limit should be an integer between 1 and '
                                 '%(max)s' % {
                                     'max': max_limit
                                 })
        try:
            int_limit = int(limit)
            if int_limit <= 0 or int_limit > six.MAXSIZE:
                raise exceptions.InvalidLimit(invalid_limit_message)
        # This exception is raised for non ints when int(limit) is called
        except ValueError:
            raise exceptions.InvalidLimit(invalid_limit_message)

    # sort_dir is checked in paginate_query.
    # We duplicate the sort_dir check here to throw a more specific
    # exception than ValueError.
    if sort_dir and sort_dir not in ['asc', 'desc']:
        raise exceptions.InvalidSortDir(
            _("Unknown sort direction, "
              "must be 'desc' or 'asc'"))

    if sort_keys is None:
        sort_key = None
        sort_dir = None

    elif sort_key and sort_key not in sort_keys:
        msg = 'sort key must be one of %(keys)s' % {'keys': sort_keys}
        raise exceptions.InvalidSortKey(msg)
    elif sort_key == 'tenant_id' and not context.all_tenants:
        sort_key = None

    return marker, limit, sort_key, sort_dir
Ejemplo n.º 23
0
 def _duplicate_service_status(self):
     engine = session.get_engine('storage:sqlalchemy')
     metadata = MetaData(bind=engine)
     status = Table('service_statuses', metadata, autoload=True)
     service_select = (select([func.count()]).select_from(status).group_by(
         'service_name', 'hostname'))
     service_counts = engine.execute(service_select).fetchall()
     duplicated_services = [i for i in service_counts if i[0] > 1]
     if duplicated_services:
         return upgradecheck.Result(
             upgradecheck.Code.FAILURE,
             _('Duplicated services found in '
               'service_statuses table.'))
     return upgradecheck.Result(upgradecheck.Code.SUCCESS)
Ejemplo n.º 24
0
class Checks(upgradecheck.UpgradeCommands):
    def _duplicate_service_status(self):
        engine = session.get_engine('storage:sqlalchemy')
        metadata = MetaData(bind=engine)
        status = Table('service_statuses', metadata, autoload=True)
        service_select = (select([func.count()]).select_from(status).group_by(
            'service_name', 'hostname'))
        service_counts = engine.execute(service_select).fetchall()
        duplicated_services = [i for i in service_counts if i[0] > 1]
        if duplicated_services:
            return upgradecheck.Result(
                upgradecheck.Code.FAILURE,
                _('Duplicated services found in '
                  'service_statuses table.'))
        return upgradecheck.Result(upgradecheck.Code.SUCCESS)

    _upgrade_checks = (
        (_('Duplicate service status'), _duplicate_service_status),
        (_('Policy File JSON to YAML Migration'),
         (common_checks.check_policy_json, {
             'conf': designate.conf.CONF
         })),
    )
Ejemplo n.º 25
0
    def _get_paging_params(self, params):
        """
        Extract any paging parameters
        """
        marker = params.pop('marker', None)
        limit = params.pop('limit', None)
        sort_key = params.pop('sort_key', None)
        sort_dir = params.pop('sort_dir', None)

        # Negative and zero limits are not caught in storage.
        # With a number bigger than MAXSIZE, rpc throws an 'OverflowError long
        #  too big to convert'.
        # So the parameter 'limit' is checked here.
        if limit:
            try:
                invalid_limit_message = _(str.format(
                    'limit should be an integer between 1 and {0}',
                    six.MAXSIZE))
                int_limit = int(limit)
                if int_limit <= 0 or int_limit > six.MAXSIZE:
                    raise exceptions.InvalidLimit(invalid_limit_message)
            # This exception is raised for non ints when int(limit) is called
            except ValueError:
                raise exceptions.InvalidLimit(invalid_limit_message)

        # sort_dir is checked in paginate_query.
        # We duplicate the sort_dir check here to throw a more specific
        # exception than ValueError.
        if sort_dir and sort_dir not in ['asc', 'desc']:
            raise exceptions.InvalidSortDir(_("Unknown sort direction, "
                                              "must be 'desc' or 'asc'"))

        if sort_key and sort_key not in self.SORT_KEYS:
            raise exceptions.InvalidSortKey(_(str.format(
                'sort key must be one of {0}', str(self.SORT_KEYS))))

        return marker, limit, sort_key, sort_dir
Ejemplo n.º 26
0
 def _duplicate_service_status(self):
     engine = session.get_engine('storage:sqlalchemy')
     metadata = MetaData(bind=engine)
     status = Table('service_statuses', metadata, autoload=True)
     service_select = (select([func.count()])
                       .select_from(status)
                       .group_by('service_name', 'hostname')
                       )
     service_counts = engine.execute(service_select).fetchall()
     duplicated_services = [i for i in service_counts if i[0] > 1]
     if duplicated_services:
         return upgradecheck.Result(upgradecheck.Code.FAILURE,
                                    _('Duplicated services found in '
                                      'service_statuses table.'))
     return upgradecheck.Result(upgradecheck.Code.SUCCESS)
Ejemplo n.º 27
0
def get_paging_params(context, params, sort_keys):
    """
    Extract any paging parameters
    """
    marker = params.pop('marker', None)
    limit = params.pop('limit', cfg.CONF['service:api'].default_limit_v2)
    sort_key = params.pop('sort_key', None)
    sort_dir = params.pop('sort_dir', None)
    max_limit = cfg.CONF['service:api'].max_limit_v2

    if isinstance(limit, six.string_types) and limit.lower() == "max":
        # Support for retrieving the max results at once. If set to "max",
        # the configured max limit will be used.
        limit = max_limit

    elif limit:
        # Negative and zero limits are not caught in storage.
        # With a number bigger than MAXSIZE, rpc throws an 'OverflowError long
        # too big to convert'.
        # So the parameter 'limit' is checked here.
        invalid_limit_message = ('limit should be an integer between 1 and '
                                 '%(max)s' % {'max': max_limit})
        try:
            int_limit = int(limit)
            if int_limit <= 0 or int_limit > six.MAXSIZE:
                raise exceptions.InvalidLimit(invalid_limit_message)
        # This exception is raised for non ints when int(limit) is called
        except ValueError:
            raise exceptions.InvalidLimit(invalid_limit_message)

    # sort_dir is checked in paginate_query.
    # We duplicate the sort_dir check here to throw a more specific
    # exception than ValueError.
    if sort_dir and sort_dir not in ['asc', 'desc']:
        raise exceptions.InvalidSortDir(_("Unknown sort direction, "
                                          "must be 'desc' or 'asc'"))

    if sort_keys is None:
        sort_key = None
        sort_dir = None

    elif sort_key and sort_key not in sort_keys:
        msg = 'sort key must be one of %(keys)s' % {'keys': sort_keys}
        raise exceptions.InvalidSortKey(msg)
    elif sort_key == 'tenant_id' and not context.all_tenants:
        sort_key = None

    return marker, limit, sort_key, sort_dir
Ejemplo n.º 28
0
    def _wsgi_get_socket(self):
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        info = socket.getaddrinfo(self._service_config.api_host,
                                  self._service_config.api_port,
                                  socket.AF_UNSPEC,
                                  socket.SOCK_STREAM)[0]
        family = info[0]
        bind_addr = info[-1]

        sock = None
        retry_until = time.time() + 30
        while not sock and time.time() < retry_until:
            try:
                # TODO(kiall): Backlog should be a service specific setting,
                #              rather than a global
                sock = eventlet.listen(bind_addr,
                                       backlog=cfg.CONF.backlog,
                                       family=family)
                if sslutils.is_enabled():
                    sock = sslutils.wrap(sock)

            except socket.error as err:
                if err.args[0] != errno.EADDRINUSE:
                    raise
                eventlet.sleep(0.1)
        if not sock:
            raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
                               "after trying for 30 seconds") %
                               {'host': self._service_config.api_host,
                                'port': self._service_config.api_port})
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # sockets can hang around forever without keepalive
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

        # This option isn't available in the OS X version of eventlet
        if hasattr(socket, 'TCP_KEEPIDLE'):
            sock.setsockopt(socket.IPPROTO_TCP,
                            socket.TCP_KEEPIDLE,
                            CONF.tcp_keepidle)

        return sock
Ejemplo n.º 29
0
    def _wsgi_get_socket(self):
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        info = socket.getaddrinfo(self._service_config.api_host,
                                  self._service_config.api_port,
                                  socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
        family = info[0]
        bind_addr = info[-1]

        sock = None
        retry_until = time.time() + 30
        while not sock and time.time() < retry_until:
            try:
                # TODO(kiall): Backlog should be a service specific setting,
                #              rather than a global
                sock = eventlet.listen(bind_addr,
                                       backlog=cfg.CONF.backlog,
                                       family=family)
                if sslutils.is_enabled():
                    sock = sslutils.wrap(sock)

            except socket.error as err:
                if err.args[0] != errno.EADDRINUSE:
                    raise
                eventlet.sleep(0.1)
        if not sock:
            raise RuntimeError(
                _("Could not bind to %(host)s:%(port)s "
                  "after trying for 30 seconds") % {
                      'host': self._service_config.api_host,
                      'port': self._service_config.api_port
                  })
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # sockets can hang around forever without keepalive
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

        # This option isn't available in the OS X version of eventlet
        if hasattr(socket, 'TCP_KEEPIDLE'):
            sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
                            CONF.tcp_keepidle)

        return sock
Ejemplo n.º 30
0
def sort_query(query, table, sort_keys, sort_dir=None, sort_dirs=None):

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))

    assert (not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert (len(sort_dirs) == len(sort_keys))

    for current_sort_key, current_sort_dir in \
            six.moves.zip(sort_keys, sort_dirs):
        try:
            sort_dir_func = {
                'asc': sqlalchemy.asc,
                'desc': sqlalchemy.desc,
            }[current_sort_dir]
        except KeyError:
            raise ValueError(
                _("Unknown sort direction, "
                  "must be 'desc' or 'asc'"))
        try:
            sort_key_attr = getattr(table.c, current_sort_key)
        except AttributeError:
            raise utils.InvalidSortKey()
        query = query.order_by(sort_dir_func(sort_key_attr))

    return query, sort_dirs
Ejemplo n.º 31
0
 def __str__(self):
     return (_("<%(type)s count:'%(count)s' object:'%(list_type)s'>")
             % {'count': len(self),
                'type': self.LIST_ITEM_TYPE.obj_name(),
                'list_type': self.obj_name()})
Ejemplo n.º 32
0
    def stop(self):
        LOG.info(_('Stopping %(name)s service') % {'name': self.service_name})

        super(Service, self).stop()
Ejemplo n.º 33
0
def serve(server, workers=None):
    global _launcher
    if _launcher:
        raise RuntimeError(_('serve() can only be called once'))

    _launcher = service.launch(server, workers=workers)
Ejemplo n.º 34
0
def serve(server, workers=None):
    global _launcher
    if _launcher:
        raise RuntimeError(_('serve() can only be called once'))

    _launcher = service.launch(server, workers=workers)
Ejemplo n.º 35
0
 def _from_json(self, datastring):
     try:
         return jsonutils.loads(datastring)
     except ValueError:
         msg = _("cannot understand JSON")
         raise exception.MalformedRequestBody(reason=msg)
Ejemplo n.º 36
0
def paginate_query(query,
                   table,
                   limit,
                   sort_keys,
                   marker=None,
                   sort_dir=None,
                   sort_dirs=None):
    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))

    assert (not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert (len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        try:
            sort_dir_func = {
                'asc': sqlalchemy.asc,
                'desc': sqlalchemy.desc,
            }[current_sort_dir]
        except KeyError:
            raise ValueError(
                _("Unknown sort direction, "
                  "must be 'desc' or 'asc'"))
        try:
            sort_key_attr = getattr(table.c, current_sort_key)
        except AttributeError:
            raise utils.InvalidSortKey()
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = marker[sort_key]
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(len(sort_keys)):
            crit_attrs = []
            for j in range(i):
                table_attr = getattr(table.c, sort_keys[j])
                crit_attrs.append((table_attr == marker_values[j]))

            table_attr = getattr(table.c, sort_keys[i])
            if sort_dirs[i] == 'desc':
                crit_attrs.append((table_attr < marker_values[i]))
            else:
                crit_attrs.append((table_attr > marker_values[i]))

            criteria = sqlalchemy.sql.and_(*crit_attrs)
            criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.where(f)

    if limit is not None:
        query = query.limit(limit)

    return query
Ejemplo n.º 37
0
def paginate_query(query, table, limit, sort_keys, marker=None,
                   sort_dir=None, sort_dirs=None):
    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))

    assert(not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert(len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        try:
            sort_dir_func = {
                'asc': sqlalchemy.asc,
                'desc': sqlalchemy.desc,
            }[current_sort_dir]
        except KeyError:
            raise ValueError(_("Unknown sort direction, "
                               "must be 'desc' or 'asc'"))
        try:
            sort_key_attr = getattr(table.c, current_sort_key)
        except AttributeError:
            raise utils.InvalidSortKey()
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = marker[sort_key]
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(len(sort_keys)):
            crit_attrs = []
            for j in range(i):
                table_attr = getattr(table.c, sort_keys[j])
                crit_attrs.append((table_attr == marker_values[j]))

            table_attr = getattr(table.c, sort_keys[i])
            if sort_dirs[i] == 'desc':
                crit_attrs.append((table_attr < marker_values[i]))
            else:
                crit_attrs.append((table_attr > marker_values[i]))

            criteria = sqlalchemy.sql.and_(*crit_attrs)
            criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.where(f)

    if limit is not None:
        query = query.limit(limit)

    return query
Ejemplo n.º 38
0
    def start(self):
        super(Service, self).start()

        LOG.info(_('Starting %(name)s service (version: %(version)s)'),
                 {'name': self.service_name,
                  'version': version.version_info.version_string()})
Ejemplo n.º 39
0
    def stop(self):
        LOG.info(_('Stopping %(name)s service'), {'name': self.service_name})

        super(Service, self).stop()
Ejemplo n.º 40
0
 def __str__(self):
     return (_("<%(type)s count:'%(count)s' object:'%(list_type)s'>")
             % {'count': len(self),
                'type': self.LIST_ITEM_TYPE.obj_name(),
                'list_type': self.obj_name()})