Exemple #1
0
def build_cache_config():
    """Build the cache region dictionary configuration.

    :param conf: configuration object for keystone
    :returns: dict
    """
    prefix = CONF.cache.config_prefix
    conf_dict = {}
    conf_dict['%s.backend' % prefix] = CONF.cache.backend
    conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
    for argument in CONF.cache.backend_argument:
        try:
            (argname, argvalue) = argument.split(':', 1)
        except ValueError:
            msg = _('Unable to build cache config-key. Expected format '
                    '"<argname>:<value>". Skipping unknown format: %s')
            LOG.error(msg, argument)
            continue

        arg_key = '.'.join([prefix, 'arguments', argname])
        conf_dict[arg_key] = argvalue

        LOG.debug(_('Keystone Cache Config: %s'), conf_dict)

    return conf_dict
Exemple #2
0
        def wrapper(self, context, **kwargs):
            if not context['is_admin']:
                action = 'identity:%s' % f.__name__
                creds = _build_policy_check_credentials(self, action,
                                                        context, kwargs)
                # Now, build the target dict for policy check.  We include:
                #
                # - Any query filter parameters
                # - Data from the main url (which will be in the kwargs
                #   parameter) and would typically include the prime key
                #   of a get/update/delete call
                #
                # First  any query filter parameters
                target = dict()
                if filters:
                    for item in filters:
                        if item in context['query_string']:
                            target[item] = context['query_string'][item]

                    LOG.debug(_('RBAC: Adding query filter params (%s)'), (
                        ', '.join(['%s=%s' % (item, target[item])
                                  for item in target])))

                # Now any formal url parameters
                for key in kwargs:
                    target[key] = kwargs[key]

                self.policy_api.enforce(creds,
                                        action,
                                        authorization.flatten(target))

                LOG.debug(_('RBAC: Authorization granted'))
            else:
                LOG.warning(_('RBAC: Bypassing authorization'))
            return f(self, context, filters, **kwargs)
Exemple #3
0
def register_event_callback(event, resource_type, callbacks):
    if event not in ACTIONS:
        raise ValueError(_('%(event)s is not a valid notification event, must '
                           'be one of: %(actions)s') %
                         {'event': event, 'actions': ', '.join(ACTIONS)})

    if not hasattr(callbacks, '__iter__'):
        callbacks = [callbacks]

    for callback in callbacks:
        if not callable(callback):
            msg = _('Method not callable: %s') % callback
            LOG.error(msg)
            raise TypeError(msg)
        _SUBSCRIBERS.setdefault(event, {}).setdefault(resource_type, set())
        _SUBSCRIBERS[event][resource_type].add(callback)

        if LOG.logger.getEffectiveLevel() <= logging.INFO:
            # Do this only if its going to appear in the logs.
            msg = _('Callback: `%(callback)s` subscribed to event '
                    '`%(event)s`.')
            callback_info = _get_callback_info(callback)
            callback_str = '.'.join(i for i in callback_info if i is not None)
            event_str = '.'.join(['identity', resource_type, event])
            LOG.info(msg, {'callback': callback_str, 'event': event_str})
    def create_user(self, context, user):
        user = self._normalize_OSKSADM_password_on_request(user)
        user = self.normalize_username_in_request(user)
        user = self._normalize_dict(user)
        self.assert_admin(context)

        if 'name' not in user or not user['name']:
            msg = _('Name field is required and cannot be empty')
            raise exception.ValidationError(message=msg)
        if 'enabled' in user and not isinstance(user['enabled'], bool):
            msg = _('Enabled field must be a boolean')
            raise exception.ValidationError(message=msg)

        default_project_id = user.pop('tenantId', None)
        if default_project_id is not None:
            # Check to see if the project is valid before moving on.
            self.assignment_api.get_project(default_project_id)
            user['default_project_id'] = default_project_id

        user_id = uuid.uuid4().hex
        user_ref = self._normalize_domain_id(context, user.copy())
        user_ref['id'] = user_id
        new_user_ref = self.v3_to_v2_user(
            self.identity_api.create_user(user_id, user_ref))

        if default_project_id is not None:
            self.assignment_api.add_user_to_project(default_project_id,
                                                    user_id)
        return {'user': new_user_ref}
Exemple #5
0
def format_url(url, data):
    """Safely string formats a user-defined URL with the given data."""
    data = utils.WhiteListedFormatter(
        CONF.catalog.endpoint_substitution_whitelist,
        data)
    try:
        result = url.replace('$(', '%(') % data
    except AttributeError:
        LOG.error(_('Malformed endpoint - %(url)r is not a string'),
                  {"url": url})
        raise exception.MalformedEndpoint(endpoint=url)
    except KeyError as e:
        LOG.error(_("Malformed endpoint %(url)s - unknown key %(keyerror)s"),
                  {"url": url,
                   "keyerror": e})
        raise exception.MalformedEndpoint(endpoint=url)
    except TypeError as e:
        LOG.error(_("Malformed endpoint %(url)s - unknown key %(keyerror)s"
                    "(are you missing brackets ?)"),
                  {"url": url,
                   "keyerror": e})
        raise exception.MalformedEndpoint(endpoint=url)
    except ValueError as e:
        LOG.error(_("Malformed endpoint %s - incomplete format "
                    "(are you missing a type notifier ?)"), url)
        raise exception.MalformedEndpoint(endpoint=url)
    return result
Exemple #6
0
    def create_user(self, user_id, user):
        try:
            self.get_user(user_id)
        except exception.UserNotFound:
            pass
        else:
            msg = _('Duplicate ID, %s.') % user_id
            raise exception.Conflict(type='user', details=msg)

        try:
            self.get_user_by_name(user['name'], user['domain_id'])
        except exception.UserNotFound:
            pass
        else:
            msg = _('Duplicate name, %s.') % user['name']
            raise exception.Conflict(type='user', details=msg)

        user = utils.hash_user_password(user)
        new_user = user.copy()

        new_user.setdefault('groups', [])

        self.db.set('user-%s' % user_id, new_user)
        domain_id = user['domain_id']
        user_name_key = self._calc_user_name_key(new_user['name'], domain_id)
        self.db.set(user_name_key, new_user)
        self._user_id_to_domain_id.notify_user_created(user_id, domain_id)
        user_list = set(self.db.get('user_list', []))
        user_list.add(user_id)
        self.db.set('user_list', list(user_list))
        return identity.filter_user(new_user)
Exemple #7
0
def _multi_send(method, context, topic, msg, timeout=None,
                envelope=False, _msg_id=None):
    """Wraps the sending of messages.

    Dispatches to the matchmaker and sends message to all relevant hosts.
    """
    conf = CONF
    LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})

    queues = _get_matchmaker().queues(topic)
    LOG.debug(_("Sending message(s) to: %s"), queues)

    # Don't stack if we have no matchmaker results
    if not queues:
        LOG.warn(_("No matchmaker results. Not casting."))
        # While not strictly a timeout, callers know how to handle
        # this exception and a timeout isn't too big a lie.
        raise rpc_common.Timeout(_("No match from matchmaker."))

    # This supports brokerless fanout (addresses > 1)
    for queue in queues:
        (_topic, ip_addr) = queue
        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)

        if method.__name__ == '_cast':
            eventlet.spawn_n(method, _addr, context,
                             _topic, msg, timeout, envelope,
                             _msg_id)
            return
        return method(_addr, context, _topic, msg, timeout,
                      envelope)
Exemple #8
0
 def _assert_default_domain(self, token_ref):
     """Make sure we are operating on default domain only."""
     if token_ref.get("token_data") and self.get_token_version(token_ref.get("token_data")) == token.provider.V3:
         # this is a V3 token
         msg = _("Non-default domain is not supported")
         # user in a non-default is prohibited
         if token_ref["token_data"]["token"]["user"]["domain"]["id"] != CONF.identity.default_domain_id:
             raise exception.Unauthorized(msg)
         # domain scoping is prohibited
         if token_ref["token_data"]["token"].get("domain"):
             raise exception.Unauthorized(_("Domain scoped token is not supported"))
         # project in non-default domain is prohibited
         if token_ref["token_data"]["token"].get("project"):
             project = token_ref["token_data"]["token"]["project"]
             project_domain_id = project["domain"]["id"]
             # scoped to project in non-default domain is prohibited
             if project_domain_id != CONF.identity.default_domain_id:
                 raise exception.Unauthorized(msg)
         # if token is scoped to trust, both trustor and trustee must
         # be in the default domain. Furthermore, the delegated project
         # must also be in the default domain
         metadata_ref = token_ref["metadata"]
         if CONF.trust.enabled and "trust_id" in metadata_ref:
             trust_ref = self.trust_api.get_trust(metadata_ref["trust_id"])
             trustee_user_ref = self.identity_api.get_user(trust_ref["trustee_user_id"])
             if trustee_user_ref["domain_id"] != CONF.identity.default_domain_id:
                 raise exception.Unauthorized(msg)
             trustor_user_ref = self.identity_api.get_user(trust_ref["trustor_user_id"])
             if trustor_user_ref["domain_id"] != CONF.identity.default_domain_id:
                 raise exception.Unauthorized(msg)
             project_ref = self.assignment_api.get_project(trust_ref["project_id"])
             if project_ref["domain_id"] != CONF.identity.default_domain_id:
                 raise exception.Unauthorized(msg)
Exemple #9
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
Exemple #10
0
    def create_consumer(self, topic, proxy, fanout=False):
        # Register with matchmaker.
        _get_matchmaker().register(topic, CONF.rpc_zmq_host)

        # Subscription scenarios
        if fanout:
            sock_type = zmq.SUB
            subscribe = ('', fanout)[type(fanout) == str]
            topic = 'fanout~' + topic.split('.', 1)[0]
        else:
            sock_type = zmq.PULL
            subscribe = None
            topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))

        if topic in self.topics:
            LOG.info(_("Skipping topic registration. Already registered."))
            return

        # Receive messages from (local) proxy
        inaddr = "ipc://%s/zmq_topic_%s" % \
            (CONF.rpc_zmq_ipc_dir, topic)

        LOG.debug(_("Consumer is a zmq.%s"),
                  ['PULL', 'SUB'][sock_type == zmq.SUB])

        self.reactor.register(proxy, inaddr, sock_type,
                              subscribe=subscribe, in_bind=False)
        self.topics.append(topic)
Exemple #11
0
    def _delete_tokens_for_role(self, role_id):
        assignments = self.list_role_assignments_for_role(role_id=role_id)

        # Iterate over the assignments for this role and build the list of
        # user or user+project IDs for the tokens we need to delete
        user_ids = set()
        user_and_project_ids = list()
        for assignment in assignments:
            # If we have a project assignment, then record both the user and
            # project IDs so we can target the right token to delete. If it is
            # a domain assignment, we might as well kill all the tokens for
            # the user, since in the vast majority of cases all the tokens
            # for a user will be within one domain anyway, so not worth
            # trying to delete tokens for each project in the domain.
            if 'user_id' in assignment:
                if 'project_id' in assignment:
                    user_and_project_ids.append(
                        (assignment['user_id'], assignment['project_id']))
                elif 'domain_id' in assignment:
                    user_ids.add(assignment['user_id'])
            elif 'group_id' in assignment:
                # Add in any users for this group, being tolerant of any
                # cross-driver database integrity errors.
                try:
                    users = self.identity_api.list_users_in_group(
                        assignment['group_id'])
                except exception.GroupNotFound:
                    # Ignore it, but log a debug message
                    if 'project_id' in assignment:
                        target = _('Project (%s)') % assignment['project_id']
                    elif 'domain_id' in assignment:
                        target = _('Domain (%s)') % assignment['domain_id']
                    else:
                        target = _('Unknown Target')
                    msg = _('Group (%(group)s), referenced in assignment '
                            'for %(target)s, not found - ignoring.')
                    LOG.debug(msg, {'group': assignment['group_id'],
                                    'target': target})
                    continue

                if 'project_id' in assignment:
                    for user in users:
                        user_and_project_ids.append(
                            (user['id'], assignment['project_id']))
                elif 'domain_id' in assignment:
                    for user in users:
                        user_ids.add(user['id'])

        # Now process the built up lists.  Before issuing calls to delete any
        # tokens, let's try and minimize the number of calls by pruning out
        # any user+project deletions where a general token deletion for that
        # same user is also planned.
        user_and_project_ids_to_action = []
        for user_and_project_id in user_and_project_ids:
            if user_and_project_id[0] not in user_ids:
                user_and_project_ids_to_action.append(user_and_project_id)

        self.token_api.delete_tokens_for_users(user_ids)
        for user_id, project_id in user_and_project_ids_to_action:
            self.token_api.delete_tokens_for_user(user_id, project_id)
Exemple #12
0
    def check_protection(self, context, prep_info, target_attr=None):
        """Provide call protection for complex target attributes.

        As well as including the standard parameters from the original API
        call (which is passed in prep_info), this call will add in any
        additional entities or attributes (passed in target_attr), so that
        they can be referenced by policy rules.

         """
        if 'is_admin' in context and context['is_admin']:
            LOG.warning(_('RBAC: Bypassing authorization'))
        else:
            action = 'identity:%s' % prep_info['f_name']
            # TODO(henry-nash) need to log the target attributes as well
            creds = _build_policy_check_credentials(self, action,
                                                    context,
                                                    prep_info['input_attr'])
            # Build the dict the policy engine will check against from both the
            # parameters passed into the call we are protecting (which was
            # stored in the prep_info by protected()), plus the target
            # attributes provided.
            policy_dict = {}
            if target_attr:
                policy_dict = {'target': target_attr}
            policy_dict.update(prep_info['input_attr'])
            self.policy_api.enforce(creds,
                                    action,
                                    authorization.flatten(policy_dict))
            LOG.debug(_('RBAC: Authorization granted'))
Exemple #13
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemple #14
0
    def _format_project_list(self, tenant_refs, **kwargs):
        marker = kwargs.get('marker')
        first_index = 0
        if marker is not None:
            for (marker_index, tenant) in enumerate(tenant_refs):
                if tenant['id'] == marker:
                    # we start pagination after the marker
                    first_index = marker_index + 1
                    break
            else:
                msg = _('Marker could not be found')
                raise exception.ValidationError(message=msg)

        limit = kwargs.get('limit')
        last_index = None
        if limit is not None:
            try:
                limit = int(limit)
                if limit < 0:
                    raise AssertionError()
            except (ValueError, AssertionError):
                msg = _('Invalid limit value')
                raise exception.ValidationError(message=msg)
            last_index = first_index + limit

        tenant_refs = tenant_refs[first_index:last_index]

        for x in tenant_refs:
            if 'enabled' not in x:
                x['enabled'] = True
        o = {'tenants': tenant_refs,
             'tenants_links': []}
        return o
Exemple #15
0
    def add_user(self, role_id, role_dn, user_dn, user_id, tenant_id=None):
        conn = self.get_connection()
        try:
            conn.modify_s(role_dn, [(ldap.MOD_ADD,
                                     self.member_attribute, user_dn)])
        except ldap.TYPE_OR_VALUE_EXISTS:
            msg = (_('User %(user_id)s already has role %(role_id)s in '
                     'tenant %(tenant_id)s') %
                   dict(user_id=user_id, role_id=role_id, tenant_id=tenant_id))
            raise exception.Conflict(type='role grant', details=msg)
        except ldap.NO_SUCH_OBJECT:
            if tenant_id is None or self.get(role_id) is None:
                raise Exception(_("Role %s not found") % (role_id,))

            attrs = [('objectClass', [self.object_class]),
                     (self.member_attribute, [user_dn])]

            if self.use_dumb_member:
                attrs[1][1].append(self.dumb_member)
            try:
                conn.add_s(role_dn, attrs)
            except Exception as inst:
                    raise inst
        finally:
            conn.unbind_s()
Exemple #16
0
    def _start_child(self, wrap):
        if len(wrap.forktimes) > wrap.workers:
            # Limit ourselves to one process a second (over the period of
            # number of workers * 1 second). This will allow workers to
            # start up quickly but ensure we don't fork off children that
            # die instantly too quickly.
            if time.time() - wrap.forktimes[0] < wrap.workers:
                LOG.info(_('Forking too fast, sleeping'))
                time.sleep(1)

            wrap.forktimes.pop(0)

        wrap.forktimes.append(time.time())

        pid = os.fork()
        if pid == 0:
            # NOTE(johannes): All exceptions are caught to ensure this
            # doesn't fallback into the loop spawning children. It would
            # be bad for a child to spawn more children.
            launcher = self._child_process(wrap.service)
            while True:
                self._child_process_handle_signal()
                status, signo = self._child_wait_for_exit_or_signal(launcher)
                if signo != signal.SIGHUP:
                    break
                launcher.restart()

            os._exit(status)

        LOG.info(_('Started child %d'), pid)

        wrap.children.add(pid)
        self.children[pid] = wrap

        return pid
Exemple #17
0
    def _wait_for_exit_or_signal(self):
        status = None
        signo = 0

        LOG.debug(_('Full set of CONF:'))
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = {signal.SIGTERM: 'SIGTERM',
                       signal.SIGINT: 'SIGINT',
                       signal.SIGHUP: 'SIGHUP'}[exc.signo]
            LOG.info(_('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_('Exception during rpc cleanup.'))

        return status, signo
Exemple #18
0
def _build_policy_check_credentials(self, action, context, kwargs):
    LOG.debug(_('RBAC: Authorizing %(action)s(%(kwargs)s)'), {
        'action': action,
        'kwargs': ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs])})

    # see if auth context has already been created. If so use it.
    if ('environment' in context and
            authorization.AUTH_CONTEXT_ENV in context['environment']):
        LOG.debug(_('RBAC: using auth context from the request environment'))
        return context['environment'].get(authorization.AUTH_CONTEXT_ENV)

    # now build the auth context from the incoming auth token
    try:
        LOG.debug(_('RBAC: building auth context from the incoming '
                    'auth token'))
        # TODO(ayoung): These two functions return the token in different
        # formats.  However, the call
        # to get_token hits the caching layer, and does not validate the
        # token.  This should be reduced to one call
        if not CONF.token.revoke_by_id:
            self.token_api.token_provider_api.validate_token(
                context['token_id'])
        token_ref = self.token_api.get_token(context['token_id'])
    except exception.TokenNotFound:
        LOG.warning(_('RBAC: Invalid token'))
        raise exception.Unauthorized()

    # NOTE(jamielennox): whilst this maybe shouldn't be within this function
    # it would otherwise need to reload the token_ref from backing store.
    wsgi.validate_token_bind(context, token_ref)

    auth_context = authorization.token_to_auth_context(token_ref['token_data'])

    return auth_context
Exemple #19
0
    def _get_domain_id_from_token(self, context):
        """Get the domain_id for a v3 create call.

        In the case of a v3 create entity call that does not specify a domain
        ID, the spec says that we should use the domain scoping from the token
        being used.

        """
        # We could make this more efficient by loading the domain_id
        # into the context in the wrapper function above (since
        # this version of normalize_domain will only be called inside
        # a v3 protected call).  However, this optimization is probably not
        # worth the duplication of state
        try:
            token_ref = self.token_api.get_token(context['token_id'])
        except exception.KeyError:
            # This might happen if we use the Admin token, for instance
            raise exception.ValidationError(
                _('A domain-scoped token must be used'))
        except exception.TokenNotFound:
            LOG.warning(_('Invalid token found while getting domain ID '
                          'for list request'))
            raise exception.Unauthorized()

        if token_ref.get('token_data', {}).get('token', {}).get('domain', {}):
            return token_ref['token_data']['token']['domain']['id']
        else:
            # TODO(henry-nash): We should issue an exception here since if
            # a v3 call does not explicitly specify the domain_id in the
            # entity, it should be using a domain scoped token.  However,
            # the current tempest heat tests issue a v3 call without this.
            # This is raised as bug #1283539.  Once this is fixed, we
            # should remove the line below and replace it with an error.
            return CONF.identity.default_domain_id
Exemple #20
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""

        LOG.debug(_('Full set of CONF:'))
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        while True:
            self.handle_signal()
            self._respawn_children()
            if self.sigcaught:
                signame = {signal.SIGTERM: 'SIGTERM',
                           signal.SIGINT: 'SIGINT',
                           signal.SIGHUP: 'SIGHUP'}[self.sigcaught]
                LOG.info(_('Caught %s, stopping children'), signame)
            if self.sigcaught != signal.SIGHUP:
                break

            for pid in self.children:
                os.kill(pid, signal.SIGHUP)
            self.running = True
            self.sigcaught = None

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Exemple #21
0
def register_event_callback(event, resource_type, callbacks):
    if event not in ACTIONS:
        raise ValueError(
            _("%(event)s is not a valid notification event, must " "be one of: %(actions)s")
            % {"event": event, "actions": ", ".join(ACTIONS)}
        )

    if not hasattr(callbacks, "__iter__"):
        callbacks = [callbacks]

    for callback in callbacks:
        if not callable(callback):
            msg = _("Method not callable: %s") % callback
            LOG.error(msg)
            raise TypeError(msg)
        _SUBSCRIBERS.setdefault(event, {}).setdefault(resource_type, set())
        _SUBSCRIBERS[event][resource_type].add(callback)

        if LOG.logger.getEffectiveLevel() <= logging.INFO:
            # Do this only if its going to appear in the logs.
            msg = _("Callback: `%(callback)s` subscribed to event " "`%(event)s`.")
            callback_info = _get_callback_info(callback)
            callback_str = ".".join(i for i in callback_info if i is not None)
            event_str = ".".join(["identity", resource_type, event])
            LOG.info(msg, {"callback": callback_str, "event": event_str})
Exemple #22
0
    def add_user_to_project(self, tenant_id, user_id):
        """Add user to a tenant by creating a default role relationship.

        :raises: keystone.exception.ProjectNotFound,
                 keystone.exception.UserNotFound

        """
        try:
            self.driver.add_role_to_user_and_project(
                user_id,
                tenant_id,
                config.CONF.member_role_id)
        except exception.RoleNotFound:
            LOG.info(_("Creating the default role %s "
                       "because it does not exist."),
                     config.CONF.member_role_id)
            role = {'id': CONF.member_role_id,
                    'name': CONF.member_role_name}
            try:
                self.driver.create_role(config.CONF.member_role_id, role)
            except exception.Conflict:
                LOG.info(_("Creating the default role %s failed because it "
                           "was already created"),
                         config.CONF.member_role_id)
            # now that default role exists, the add should succeed
            self.driver.add_role_to_user_and_project(
                user_id,
                tenant_id,
                config.CONF.member_role_id)
def v3_token_to_auth_context(token):
    creds = {}
    token_data = token['token']
    try:
        creds['user_id'] = token_data['user']['id']
    except AttributeError:
        LOG.warning(_('RBAC: Invalid user data in v3 token'))
        raise exception.Unauthorized()

    if 'user' in token_data:
        creds['user.domain_id'] = token_data['user']['domain']['id']
    if 'project' in token_data:
        creds['project_id'] = token_data['project']['id']
        creds['project.domain_id'] = token_data['project']['domain']['id']
        creds['domain_id'] = token_data['project']['domain']['id']
    else:
        LOG.debug(_('RBAC: Proceeding without project'))
    if 'domain' in token_data:
        creds['domain_id'] = token_data['domain']['id']

    if 'roles' in token_data:
        creds['roles'] = []
        for role in token_data['roles']:
            creds['roles'].append(role['name'])
    creds['group_ids'] = [
        g['id'] for g in token_data['user'].get(federation.FEDERATION, {}).get(
            'groups', [])]
    print("!!! In /opt/stack/keystone/keystone/common/authorization.py:v3_token_to_auth_context: creds=", creds)
    return creds
def check_credential_exists(ec2credential, credential_table, session):
    credential = session.query(credential_table).filter_by(
        id=utils.hash_access_key(ec2credential.access)).first()
    if credential is None:
        return False
    blob = utils.get_blob_from_credential(credential)
    # check if credential with same access key but different
    # secret key already exists in credential table.
    # If exists raise an exception
    if blob['secret'] != ec2credential.secret:
        msg = _('Credential %(access)s already exists with different secret'
                ' in %(table)s table')
        message = msg % {'access': ec2credential.access,
                         'table': credential_table.name}
        raise exception.Conflict(type='credential', details=message)
    # check if credential with same access and secret key but
    # associated with a different project exists. If exists raise
    # an exception
    elif credential.project_id is not None and (
            credential.project_id != ec2credential.tenant_id):
        msg = _('Credential %(access)s already exists with different project'
                ' in %(table)s table')
        message = msg % {'access': ec2credential.access,
                         'table': credential_table.name}
        raise exception.Conflict(type='credential', details=message)
    # if credential with same access and secret key and not associated
    # with any projects already exists in the credential table, then
    # return true.
    else:
        return True
Exemple #25
0
    def get_token_provider(cls):
        """Return package path to the configured token provider.

        The value should come from ``keystone.conf`` ``[token] provider``,
        however this method ensures backwards compatibility for
        ``keystone.conf`` ``[signing] token_format`` until Havana + 2.

        Return the provider based on ``token_format`` if ``provider`` is not
        set. Otherwise, ignore ``token_format`` and return the configured
        ``provider`` instead.

        """

        if CONF.signing.token_format:
            LOG.warn(_('[signing] token_format is deprecated. '
                       'Please change to setting the [token] provider '
                       'configuration value instead'))
            try:

                mapped = _FORMAT_TO_PROVIDER[CONF.signing.token_format]
            except KeyError:
                raise exception.UnexpectedError(
                    _('Unrecognized keystone.conf [signing] token_format: '
                      'expected either \'UUID\' or \'PKI\''))
            return mapped

        if CONF.token.provider is None:
            return PKIZ_PROVIDER
        else:
            return CONF.token.provider
Exemple #26
0
    def __call__(self, message_data):
        """Consumer callback to call a method on a proxy object.

        Parses the message for validity and fires off a thread to call the
        proxy object method.

        Message data should be a dictionary with two keys:
            method: string representing the method to call
            args: dictionary of arg: value

        Example: {'method': 'echo', 'args': {'value': 42}}

        """
        # It is important to clear the context here, because at this point
        # the previous context is stored in local.store.context
        if hasattr(local.store, 'context'):
            del local.store.context
        rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
        self.msg_id_cache.check_duplicate_message(message_data)
        ctxt = unpack_context(self.conf, message_data)
        method = message_data.get('method')
        args = message_data.get('args', {})
        version = message_data.get('version')
        namespace = message_data.get('namespace')
        if not method:
            LOG.warn(_('no method for message: %s') % message_data)
            ctxt.reply(_('No method for message: %s') % message_data,
                       connection_pool=self.connection_pool)
            return
        self.pool.spawn_n(self._process_data, ctxt, version, method,
                          namespace, args)
Exemple #27
0
    def _is_valid_token(self, token):
        """Verify the token is valid format and has not expired."""

        current_time = timeutils.normalize_time(timeutils.utcnow())

        try:
            # Get the data we need from the correct location (V2 and V3 tokens
            # differ in structure, Try V3 first, fall back to V2 second)
            token_data = token.get('token', token.get('access'))
            expires_at = token_data.get('expires_at',
                                        token_data.get('expires'))
            if not expires_at:
                expires_at = token_data['token']['expires']
            expiry = timeutils.normalize_time(
                timeutils.parse_isotime(expires_at))
        except Exception:
            LOG.exception(_('Unexpected error or malformed token determining '
                            'token expiry: %s'), token)
            raise exception.TokenNotFound(_('Failed to validate token'))

        if current_time < expiry:
            self.check_revocation(token)
            # Token has not expired and has not been revoked.
            return None
        else:
            raise exception.TokenNotFound(_('Failed to validate token'))
Exemple #28
0
def to_bytes(text, default=0):
    """Converts a string into an integer of bytes.

    Looks at the last characters of the text to determine
    what conversion is needed to turn the input text into a byte number.
    Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)

    :param text: String input for bytes size conversion.
    :param default: Default return value when text is blank.

    """
    match = BYTE_REGEX.search(text)
    if match:
        magnitude = int(match.group(1))
        mult_key_org = match.group(2)
        if not mult_key_org:
            return magnitude
    elif text:
        msg = _('Invalid string format: %s') % text
        raise TypeError(msg)
    else:
        return default
    mult_key = mult_key_org.lower().replace('b', '', 1)
    multiplier = BYTE_MULTIPLIERS.get(mult_key)
    if multiplier is None:
        msg = _('Unknown byte multiplier: %s') % mult_key_org
        raise TypeError(msg)
    return magnitude * multiplier
Exemple #29
0
    def _process_data(self, ctxt, version, method, namespace, args):
        """Process a message in a new thread.

        If the proxy object we have has a dispatch method
        (see rpc.dispatcher.RpcDispatcher), pass it the version,
        method, and args and let it dispatch as appropriate.  If not, use
        the old behavior of magically calling the specified method on the
        proxy we have here.
        """
        ctxt.update_store()
        try:
            rval = self.proxy.dispatch(ctxt, version, method, namespace,
                                       **args)
            # Check if the result was a generator
            if inspect.isgenerator(rval):
                for x in rval:
                    ctxt.reply(x, None, connection_pool=self.connection_pool)
            else:
                ctxt.reply(rval, None, connection_pool=self.connection_pool)
            # This final None tells multicall that it is done.
            ctxt.reply(ending=True, connection_pool=self.connection_pool)
        except rpc_common.ClientException as e:
            LOG.debug(_('Expected exception during message handling (%s)') %
                      e._exc_info[1])
            ctxt.reply(None, e._exc_info,
                       connection_pool=self.connection_pool,
                       log_failure=False)
        except Exception:
            # sys.exc_info() is deleted by LOG.exception().
            exc_info = sys.exc_info()
            LOG.error(_('Exception during message handling'),
                      exc_info=exc_info)
            ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
Exemple #30
0
    def _get_domain_id_for_list_request(self, context):
        """Get the domain_id for a v3 list call.

        If we running with multiple domain drivers, then the caller must
        specify a domain_id either as a filter or as part of the token scope.

        """
        if not CONF.identity.domain_specific_drivers_enabled:
            # We don't need to specify a domain ID in this case
            return

        if context['query_string'].get('domain_id') is not None:
            return context['query_string'].get('domain_id')

        try:
            token_ref = self.token_api.get_token(context['token_id'])
            token = token_ref['token_data']['token']
        except exception.KeyError:
            raise exception.ValidationError(
                _('domain_id is required as part of entity'))
        except exception.TokenNotFound:
            LOG.warning(_('Invalid token found while getting domain ID '
                          'for list request'))
            raise exception.Unauthorized()

        if 'domain' in token:
            return token['domain']['id']
        else:
            LOG.warning(
                _('No domain information specified as part of list request'))
            raise exception.Unauthorized()
Exemple #31
0
def unpack_context(conf, msg):
    """Unpack context from msg."""
    context_dict = {}
    for key in list(msg.keys()):
        # NOTE(vish): Some versions of python don't like unicode keys
        #             in kwargs.
        key = str(key)
        if key.startswith('_context_'):
            value = msg.pop(key)
            context_dict[key[9:]] = value
    context_dict['msg_id'] = msg.pop('_msg_id', None)
    context_dict['reply_q'] = msg.pop('_reply_q', None)
    context_dict['conf'] = conf
    ctx = RpcContext.from_dict(context_dict)
    rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
    return ctx
Exemple #32
0
    def _dispatch(req):
        """Dispatch the request to the appropriate controller.

        Called by self._router after matching the incoming request to a route
        and putting the information into req.environ.  Either returns 404
        or the routed WSGI app's response.

        """
        match = req.environ['wsgiorg.routing_args'][1]
        if not match:
            msg = _('The resource could not be found.')
            return render_exception(exception.NotFound(msg),
                                    request=req,
                                    user_locale=best_match_language(req))
        app = match['controller']
        return app
Exemple #33
0
 def acquire(self, wait=True):
     client = self.client_fn()
     i = 0
     while True:
         if client.add(self.key, 1, self.lock_timeout):
             return True
         elif not wait:
             return False
         else:
             sleep_time = (((i + 1) * random.random()) + 2**i) / 2.5
             time.sleep(sleep_time)
         if i <= self.max_lock_attempts:
             i += 1
         else:
             raise exception.UnexpectedError(
                 _('Maximum lock attempts on %s occurred.') % self.key)
Exemple #34
0
    def set_rules(self, rules, overwrite=True):
        """Create a new Rules object based on the provided dict of rules.

        :param rules: New rules to use. It should be an instance of dict.
        :param overwrite: Whether to overwrite current rules or update them
                          with the new rules.
        """

        if not isinstance(rules, dict):
            raise TypeError(_("Rules must be an instance of dict or Rules, "
                            "got %s instead") % type(rules))

        if overwrite:
            self.rules = Rules(rules, self.default_rule)
        else:
            self.rules.update(rules)
Exemple #35
0
def _project_filter(query, db_model, context, project_only):
    if project_only and 'project_id' not in db_model.__table__.columns:
        raise ValueError(
            _("There is no `project_id` column in `%s` table.") %
            db_model.__name__)

    if request_context.is_user_context(context) and project_only:
        if project_only == 'allow_none':
            is_none = None
            query = query.filter(
                or_(db_model.project_id == context.project_id,
                    db_model.project_id == is_none))
        else:
            query = query.filter(db_model.project_id == context.project_id)

    return query
    def start_heartbeat(self):
        """Implementation of MatchMakerBase.start_heartbeat.

        Launches greenthread looping send_heartbeats(),
        yielding for CONF.matchmaker_heartbeat_freq seconds
        between iterations.
        """
        if not self.hosts:
            raise MatchMakerException(_("Register before starting heartbeat."))

        def do_heartbeat():
            while True:
                self.send_heartbeats()
                eventlet.sleep(CONF.matchmaker_heartbeat_freq)

        self._heart = eventlet.spawn(do_heartbeat)
Exemple #37
0
 def calculate_type(user_id, group_id, project_id, domain_id):
     if user_id and project_id:
         return AssignmentType.USER_PROJECT
     elif user_id and domain_id:
         return AssignmentType.USER_DOMAIN
     elif group_id and project_id:
         return AssignmentType.GROUP_PROJECT
     elif group_id and domain_id:
         return AssignmentType.GROUP_DOMAIN
     else:
         message_data = ', '.join(
             [user_id, group_id, project_id, domain_id])
         raise exception.Error(
             message=_('Unexpected combination of grant attributes - '
                       'User, Group, Project, Domain: %s') %
             message_data)
Exemple #38
0
    def _get_metadata(self,
                      user_id=None,
                      tenant_id=None,
                      domain_id=None,
                      group_id=None):
        def _get_roles_for_just_user_and_project(user_id, tenant_id):
            self.get_project(tenant_id)
            return [
                self.role._dn_to_id(a.role_dn)
                for a in self.role.get_role_assignments(
                    self.project._id_to_dn(tenant_id))
                if self.user._dn_to_id(a.user_dn) == user_id
            ]

        def _get_roles_for_group_and_project(group_id, project_id):
            self.get_project(project_id)
            group_dn = self.group._id_to_dn(group_id)
            # NOTE(marcos-fermin-lobo): In Active Directory, for functions
            # such as "self.role.get_role_assignments", it returns
            # the key "CN" or "OU" in uppercase.
            # The group_dn var has "CN" and "OU" in lowercase.
            # For this reason, it is necessary to use the "upper()"
            # function so both are consistent.
            return [
                self.role._dn_to_id(a.role_dn)
                for a in self.role.get_role_assignments(
                    self.project._id_to_dn(project_id))
                if a.user_dn.upper() == group_dn.upper()
            ]

        if domain_id is not None:
            msg = _('Domain metadata not supported by LDAP')
            raise exception.NotImplemented(message=msg)
        if group_id is None and user_id is None:
            return {}

        if tenant_id is None:
            return {}
        if user_id is None:
            metadata_ref = _get_roles_for_group_and_project(
                group_id, tenant_id)
        else:
            metadata_ref = _get_roles_for_just_user_and_project(
                user_id, tenant_id)
        if not metadata_ref:
            return {}
        return {'roles': [self._role_to_dict(r, False) for r in metadata_ref]}
Exemple #39
0
def notify(context, message):
    """Sends a notification via RPC."""
    if not context:
        context = req_context.get_admin_context()
    priority = message.get('priority', CONF.default_notification_level)
    priority = priority.lower()
    for topic in CONF.notification_topics:
        topic = '%s.%s' % (topic, priority)
        try:
            rpc.notify(context, topic, message)
        except Exception:
            LOG.exception(
                _("Could not send notification to %(topic)s. "
                  "Payload=%(message)s"), {
                      "topic": topic,
                      "message": message
                  })
def notify_event_callbacks(service, resource_type, operation, payload):
    """Sends a notification to registered extensions."""
    if operation in SUBSCRIBERS:
        if resource_type in SUBSCRIBERS[operation]:
            for cb in SUBSCRIBERS[operation][resource_type]:
                subst_dict = {
                    'cb_name': cb.__name__,
                    'service': service,
                    'resource_type': resource_type,
                    'operation': operation,
                    'payload': payload
                }
                LOG.debug(
                    _('Invoking callback %(cb_name)s for event '
                      '%(service)s %(resource_type)s %(operation)s for'
                      '%(payload)s'), subst_dict)
                cb(service, resource_type, operation, payload)
Exemple #41
0
    def __init__(self, message=None, **kwargs):
        self.kwargs = kwargs

        if not message:
            try:
                message = self.msg_fmt % kwargs

            except Exception:
                # kwargs doesn't match a variable in the message
                # log the issue and the kwargs
                LOG.exception(_('Exception in string format operation'))
                for name, value in six.iteritems(kwargs):
                    LOG.error("%s: %s" % (name, value))
                # at least get the core message out if something happened
                message = self.msg_fmt

        super(RPCException, self).__init__(message)
Exemple #42
0
    def update_user(self, user_id, user):
        session = sql.get_session()
        if 'id' in user and user_id != user['id']:
            raise exception.ValidationError(_('Cannot change user ID'))

        with session.begin():
            user_ref = self._get_user(session, user_id)
            old_user_dict = user_ref.to_dict()
            user = utils.hash_user_password(user)
            for k in user:
                old_user_dict[k] = user[k]
            new_user = User.from_dict(old_user_dict)
            for attr in User.attributes:
                if attr != 'id':
                    setattr(user_ref, attr, getattr(new_user, attr))
            user_ref.extra = new_user.extra
        return identity.filter_user(user_ref.to_dict(include_extra_dict=True))
Exemple #43
0
    def load_rules(self, force_reload=False):
        """Loads policy_path's rules.

        Policy file is cached and will be reloaded if modified.

        :param force_reload: Whether to overwrite current rules.
        """

        if not self.policy_path:
            self.policy_path = self._get_policy_path()

        reloaded, data = fileutils.read_cached_file(self.policy_path,
                                                    force_reload=force_reload)
        if reloaded or not self.rules:
            rules = Rules.load_json(data, self.default_rule)
            self.set_rules(rules)
            LOG.debug(_("Rules successfully reloaded"))
Exemple #44
0
def _check_for_lock():
    if not CONF.debug:
        return None

    if ((hasattr(local.strong_store, 'locks_held')
         and local.strong_store.locks_held)):
        stack = ' :: '.join([frame[3] for frame in inspect.stack()])
        LOG.warn(
            _('A RPC is being made while holding a lock. The locks '
              'currently held are %(locks)s. This is probably a bug. '
              'Please report it. Include the following: [%(stack)s].'), {
                  'locks': local.strong_store.locks_held,
                  'stack': stack
              })
        return True

    return False
Exemple #45
0
    def create_token(self, token_id, data):
        """Create a token by id and data.

        It is assumed the caller has performed data validation on the "data"
        parameter.
        """
        data_copy = copy.deepcopy(data)
        ptk = self._prefix_token_id(token_id)
        if not data_copy.get('expires'):
            data_copy['expires'] = token.default_expire_time()
        if not data_copy.get('user_id'):
            data_copy['user_id'] = data_copy['user']['id']

        # NOTE(morganfainberg): for ease of manipulating the data without
        # concern about the backend, always store the value(s) in the
        # index as the isotime (string) version so this is where the string is
        # built.
        expires_str = timeutils.isotime(data_copy['expires'], subsecond=True)

        self._set_key(ptk, data_copy)
        user_id = data['user']['id']
        user_key = self._prefix_user_id(user_id)
        self._update_user_token_list(user_key, token_id, expires_str)
        if CONF.trust.enabled and data.get('trust_id'):
            # NOTE(morganfainberg): If trusts are enabled and this is a trust
            # scoped token, we add the token to the trustee list as well.  This
            # allows password changes of the trustee to also expire the token.
            # There is no harm in placing the token in multiple lists, as
            # _list_tokens is smart enough to handle almost any case of
            # valid/invalid/expired for a given token.
            token_data = data_copy['token_data']
            if data_copy['token_version'] == token.provider.V2:
                trustee_user_id = token_data['access']['trust'][
                    'trustee_user_id']
            elif data_copy['token_version'] == token.provider.V3:
                trustee_user_id = token_data['OS-TRUST:trust'][
                    'trustee_user_id']
            else:
                raise token.provider.UnsupportedTokenVersionException(
                    _('Unknown token version %s') %
                    data_copy.get('token_version'))

            trustee_key = self._prefix_user_id(trustee_user_id)
            self._update_user_token_list(trustee_key, token_id, expires_str)

        return data_copy
Exemple #46
0
def convert_ldap_result(ldap_result):
    py_result = []
    at_least_one_referral = False
    for dn, attrs in ldap_result:
        if dn is None:
            # this is a Referral object, rather than an Entry object
            at_least_one_referral = True
            continue

        py_result.append((utf8_decode(dn),
                          dict((kind, [ldap2py(x) for x in values])
                               for kind, values in six.iteritems(attrs))))
    if at_least_one_referral:
        LOG.debug(_('Referrals were returned and ignored. Enable referral '
                    'chasing in keystone.conf via [ldap] chase_referrals'))

    return py_result
Exemple #47
0
def validate_token_bind(context, token_ref):
    bind_mode = CONF.token.enforce_token_bind

    if bind_mode == 'disabled':
        return

    bind = token_ref.get('bind', {})

    # permissive and strict modes don't require there to be a bind
    permissive = bind_mode in ('permissive', 'strict')

    # get the named mode if bind_mode is not one of the known
    name = None if permissive or bind_mode == 'required' else bind_mode

    if not bind:
        if permissive:
            # no bind provided and none required
            return
        else:
            LOG.info(_("No bind information present in token"))
            raise exception.Unauthorized()

    if name and name not in bind:
        LOG.info(_("Named bind mode %s not in bind information"), name)
        raise exception.Unauthorized()

    for bind_type, identifier in six.iteritems(bind):
        if bind_type == 'kerberos':
            if not (context['environment'].get('AUTH_TYPE', '').lower()
                    == 'negotiate'):
                LOG.info(_("Kerberos credentials required and not present"))
                raise exception.Unauthorized()

            if not context['environment'].get('REMOTE_USER') == identifier:
                LOG.info(_("Kerberos credentials do not match those in bind"))
                raise exception.Unauthorized()

            LOG.info(_("Kerberos bind authentication successful"))

        elif bind_mode == 'permissive':
            LOG.debug(_("Ignoring unknown bind for permissive mode: "
                        "{%(bind_type)s: %(identifier)s}"),
                      {'bind_type': bind_type, 'identifier': identifier})
        else:
            LOG.info(_("Couldn't verify unknown bind: "
                       "{%(bind_type)s: %(identifier)s}"),
                     {'bind_type': bind_type, 'identifier': identifier})
            raise exception.Unauthorized()
def sync_database_to_version(extension=None, version=None):
    if not extension:
        abs_path = find_migrate_repo()
        init_version = migrate_repo.DB_INIT_VERSION
    else:
        init_version = 0
        try:
            package_name = '.'.join((contrib.__name__, extension))
            package = importutils.import_module(package_name)
        except ImportError:
            raise ImportError(_("%s extension does not exist.")
                              % package_name)
        try:
            abs_path = find_migrate_repo(package)
            try:
                migration.db_version_control(sql.get_engine(), abs_path)
            # Register the repo with the version control API
            # If it already knows about the repo, it will throw
            # an exception that we can safely ignore
            except exceptions.DatabaseAlreadyControlledError:
                pass
        except exception.MigrationNotProvided as e:
            print(e)
            sys.exit(1)

    engine = sql.get_engine()
    try:
        migration.db_sync(engine, abs_path, version=version,
                          init_version=init_version)
    except ValueError:
        # NOTE(morganfainberg): ValueError is raised from the sanity check (
        # verifies that tables are utf8 under mysql). The region table was not
        # initially built with InnoDB and utf8 as part of the table arguments
        # when the migration was initially created. Bug #1334779 is a scenario
        # where the deployer can get wedged, unable to upgrade or downgrade.
        # This is a workaround to "fix" that table if we're under MySQL.
        if (not extension and engine.name == 'mysql' and
                six.text_type(get_db_version()) == '37'):
            _fix_migration_37(engine)
            # Try the migration a second time now that we've done the
            # un-wedge work.
            migration.db_sync(engine, abs_path, version=version,
                              init_version=init_version)
        else:
            raise
Exemple #49
0
    def update(self, object_id, values, old_obj=None):
        if old_obj is None:
            old_obj = self.get(object_id)

        modlist = []
        for k, v in six.iteritems(values):
            if k == 'id' or k in self.attribute_ignore:
                continue

            # attribute value has not changed
            if k in old_obj and old_obj[k] == v:
                continue

            if k in self.immutable_attrs:
                msg = (_("Cannot change %(option_name)s %(attr)s") % {
                    'option_name': self.options_name,
                    'attr': k
                })
                raise exception.ValidationError(msg)

            if v is None:
                if old_obj.get(k) is not None:
                    modlist.append(
                        (ldap.MOD_DELETE, self.attribute_mapping.get(k,
                                                                     k), None))
                continue

            current_value = old_obj.get(k)
            if current_value is None:
                op = ldap.MOD_ADD
                modlist.append((op, self.attribute_mapping.get(k, k), [v]))
            elif current_value != v:
                op = ldap.MOD_REPLACE
                modlist.append((op, self.attribute_mapping.get(k, k), [v]))

        if modlist:
            conn = self.get_connection()
            try:
                conn.modify_s(self._id_to_dn(object_id), modlist)
            except ldap.NO_SUCH_OBJECT:
                raise self._not_found(object_id)
            finally:
                conn.unbind_s()

        return self.get(object_id)
Exemple #50
0
    def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
        try:
            metadata_ref = self._get_metadata(user_id, tenant_id)
        except exception.MetadataNotFound:
            metadata_ref = {}

        try:
            metadata_ref['roles'] = self._remove_role_from_role_dicts(
                role_id, False, metadata_ref.get('roles', []))
        except KeyError:
            raise exception.RoleNotFound(
                message=_('Cannot remove role that has not been granted, %s') %
                role_id)

        if metadata_ref['roles']:
            self._update_metadata(user_id, tenant_id, metadata_ref)
        else:
            self.db.delete('metadata_user-%s-%s' % (tenant_id, user_id))
Exemple #51
0
class RemoteError(RPCException):
    """Signifies that a remote class has raised an exception.

    Contains a string representation of the type of the original exception,
    the value of the original exception, and the traceback.  These are
    sent to the parent as a joined string so printing the exception
    contains all of the relevant info.

    """
    msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")

    def __init__(self, exc_type=None, value=None, traceback=None):
        self.exc_type = exc_type
        self.value = value
        self.traceback = traceback
        super(RemoteError, self).__init__(exc_type=exc_type,
                                          value=value,
                                          traceback=traceback)
Exemple #52
0
 def get_project_users(self, context, tenant_id, **kw):
     self.assert_admin(context)
     user_refs = []
     user_ids = self.assignment_api.list_user_ids_for_project(tenant_id)
     for user_id in user_ids:
         try:
             user_ref = self.identity_api.get_user(user_id)
         except exception.UserNotFound:
             # Log that user is missing and continue on.
             message = _("User %(user_id)s in project %(project_id)s "
                         "doesn't exist.")
             LOG.debug(message, {
                 'user_id': user_id,
                 'project_id': tenant_id
             })
         else:
             user_refs.append(self.v3_to_v2_user(user_ref))
     return {'users': user_refs}
Exemple #53
0
    def _assert_owner(self, user_id, credential_id):
        """Ensure the provided user owns the credential.

        :param user_id: expected credential owner
        :param credential_id: id of credential object
        :raises exception.Forbidden: on failure

        """
        ec2_credential_id = utils.hash_access_key(credential_id)
        cred_ref = self.credential_api.get_credential(ec2_credential_id)
        if user_id != cred_ref['user_id']:
            raise exception.Forbidden(_('Credential belongs to another user'))

#class AccessKeyController(identity.controllers.UserV3):

#@controller.protected()
#def reset_access_key(self, context, user_id):
        '''token_id = context.get('token_id')
Exemple #54
0
def configure_cache_region(region):
    """Configure a cache region.

    :param region: optional CacheRegion object, if not provided a new region
                   will be instantiated
    :raises: exception.ValidationError
    :returns: dogpile.cache.CacheRegion
    """
    if not isinstance(region, dogpile.cache.CacheRegion):
        raise exception.ValidationError(
            _('region not type dogpile.cache.CacheRegion'))

    if not region.is_configured:
        # NOTE(morganfainberg): this is how you tell if a region is configured.
        # There is a request logged with dogpile.cache upstream to make this
        # easier / less ugly.

        config_dict = build_cache_config()
        region.configure_from_config(config_dict,
                                     '%s.' % CONF.cache.config_prefix)

        if CONF.cache.debug_cache_backend:
            region.wrap(DebugProxy)

        # NOTE(morganfainberg): if the backend requests the use of a
        # key_mangler, we should respect that key_mangler function.  If a
        # key_mangler is not defined by the backend, use the sha1_mangle_key
        # mangler provided by dogpile.cache. This ensures we always use a fixed
        # size cache-key.
        if region.key_mangler is None:
            region.key_mangler = util.sha1_mangle_key

        for class_path in CONF.cache.proxies:
            # NOTE(morganfainberg): if we have any proxy wrappers, we should
            # ensure they are added to the cache region's backend.  Since
            # configure_from_config doesn't handle the wrap argument, we need
            # to manually add the Proxies. For information on how the
            # ProxyBackends work, see the dogpile.cache documents on
            # "changing-backend-behavior"
            cls = importutils.import_class(class_path)
            LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
            region.wrap(cls)

    return region
    def start(self, key=None, backlog=128):
        """Run a WSGI server with the given application."""
        LOG.info(_('Starting %(arg0)s on %(host)s:%(port)s'),
                 {'arg0': sys.argv[0],
                  'host': self.host,
                  'port': self.port})

        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        info = socket.getaddrinfo(self.host,
                                  self.port,
                                  socket.AF_UNSPEC,
                                  socket.SOCK_STREAM)[0]
        _socket = eventlet.listen(info[-1],
                                  family=info[0],
                                  backlog=backlog)
        if key:
            self.socket_info[key] = _socket.getsockname()
        # SSL is enabled
        if self.do_ssl:
            if self.cert_required:
                cert_reqs = ssl.CERT_REQUIRED
            else:
                cert_reqs = ssl.CERT_NONE
            sslsocket = eventlet.wrap_ssl(_socket, certfile=self.certfile,
                                          keyfile=self.keyfile,
                                          server_side=True,
                                          cert_reqs=cert_reqs,
                                          ca_certs=self.ca_certs)
            _socket = sslsocket

        # Optionally enable keepalive on the wsgi socket.
        if self.keepalive:
            _socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

            # This option isn't available in the OS X version of eventlet
            if hasattr(socket, 'TCP_KEEPIDLE') and self.keepidle is not None:
                _socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
                                   self.keepidle)

        self.greenthread = self.pool.spawn(self._run,
                                           self.application,
                                           _socket)
Exemple #56
0
    def create_grant(self, role_id, user_id=None, group_id=None,
                     domain_id=None, project_id=None,
                     inherited_to_projects=False):

        def calculate_type(user_id, group_id, project_id, domain_id):
            if user_id and project_id:
                return AssignmentType.USER_PROJECT
            elif user_id and domain_id:
                return AssignmentType.USER_DOMAIN
            elif group_id and project_id:
                return AssignmentType.GROUP_PROJECT
            elif group_id and domain_id:
                return AssignmentType.GROUP_DOMAIN
            else:
                message_data = ', '.join(
                    [user_id, group_id, project_id, domain_id])
                raise exception.Error(message=_(
                    'Unexpected combination of grant attributes - '
                    'User, Group, Project, Domain: %s') % message_data)

        with sql.transaction() as session:
            self._get_role(session, role_id)

            if domain_id:
                self._get_domain(session, domain_id)
            if project_id:
                self._get_project(session, project_id)

            if project_id and inherited_to_projects:
                msg = _('Inherited roles can only be assigned to domains')
                raise exception.Conflict(type='role grant', details=msg)

        type = calculate_type(user_id, group_id, project_id, domain_id)
        try:
            with sql.transaction() as session:
                session.add(RoleAssignment(
                    type=type,
                    actor_id=user_id or group_id,
                    target_id=project_id or domain_id,
                    role_id=role_id,
                    inherited=inherited_to_projects))
        except sql.DBDuplicateEntry:
            # The v3 grant APIs are silent if the assignment already exists
            pass
Exemple #57
0
    def add_user_to_project(self, tenant_id, user_id):
        """Add user to a tenant by creating a default role relationship.

        :raises: keystone.exception.ProjectNotFound,
                 keystone.exception.UserNotFound

        """
        try:
            self.driver.add_role_to_user_and_project(
                user_id, tenant_id, config.CONF.member_role_id)
        except exception.RoleNotFound:
            LOG.info(
                _("Creating the default role %s "
                  "because it does not exist."), config.CONF.member_role_id)
            role = {'id': CONF.member_role_id, 'name': CONF.member_role_name}
            self.driver.create_role(config.CONF.member_role_id, role)
            #now that default role exists, the add should succeed
            self.driver.add_role_to_user_and_project(
                user_id, tenant_id, config.CONF.member_role_id)
Exemple #58
0
    def authenticate(self, context, auth_payload, user_context):
        """Try to authenticate against the identity backend."""
        user_info = UserAuthInfo.create(auth_payload)

        # FIXME(gyee): identity.authenticate() can use some refactoring since
        # all we care is password matches
        try:
            self.identity_api.authenticate(
                context,
                user_id=user_info.user_id,
                password=user_info.password,
                domain_scope=user_info.domain_id)
        except AssertionError:
            # authentication failed because of invalid username or password
            msg = _('Invalid username or password')
            raise exception.Unauthorized(msg)

        if 'user_id' not in user_context:
            user_context['user_id'] = user_info.user_id
Exemple #59
0
    def pem_header(self, pem_header):
        if pem_header is None:
            self._pem_type = None
            self._pem_header = None
        else:
            pem_type = PEM_HEADER_TO_TYPE.get(pem_header)
            if pem_type is None:
                raise ValueError(
                    _('unknown pem header "%(pem_header)s", '
                      'valid headers are: '
                      '%(valid_pem_headers)s') % {
                          'pem_header':
                          pem_header,
                          'valid_pem_headers':
                          ', '.join("'%s'" % [x for x in pem_headers])
                      })

            self._pem_type = pem_type
            self._pem_header = pem_header
Exemple #60
0
def v3_token_to_auth_context(token):
    creds = {'is_delegated_auth': False}
    token_data = token['token']
    try:
        creds['user_id'] = token_data['user']['id']
    except AttributeError:
        LOG.warning(_('RBAC: Invalid user data in v3 token'))
        raise exception.Unauthorized()
    if 'project' in token_data:
        creds['project_id'] = token_data['project']['id']
    else:
        LOG.debug('RBAC: Proceeding without project')
    if 'domain' in token_data:
        creds['domain_id'] = token_data['domain']['id']
    if 'roles' in token_data:
        creds['roles'] = []
        for role in token_data['roles']:
            creds['roles'].append(role['name'])
    creds['group_ids'] = [
        g['id'] for g in token_data['user'].get(federation.FEDERATION, {}).get(
            'groups', [])
    ]

    trust = token_data.get('OS-TRUST:trust')
    if trust is None:
        creds['trust_id'] = None
        creds['trustor_id'] = None
        creds['trustee_id'] = None
    else:
        creds['trust_id'] = trust['id']
        creds['trustor_id'] = trust['trustor_user']['id']
        creds['trustee_id'] = trust['trustee_user']['id']
        creds['is_delegated_auth'] = True

    oauth1 = token_data.get('OS-OAUTH1')
    if oauth1 is None:
        creds['consumer_id'] = None
        creds['access_token_id'] = None
    else:
        creds['consumer_id'] = oauth1['consumer_id']
        creds['access_token_id'] = oauth1['access_token_id']
        creds['is_delegated_auth'] = True
    return creds