Ejemplo n.º 1
0
    def _validate_trusted_issuer(self, env):
        """To further filter the certificates that are trusted.

        If the config option 'trusted_issuer' is absent or does
        not contain the trusted issuer DN, no certificates
        will be allowed in tokenless authorization.

        :param env: The env contains the client issuer's attributes
        :type env: dict
        :returns: True if client_issuer is trusted; otherwise False
        """
        if not CONF.tokenless_auth.trusted_issuer:
            return False

        client_issuer = env.get(CONF.tokenless_auth.issuer_attribute)
        if not client_issuer:
            msg = _LI('Cannot find client issuer in env by the '
                      'issuer attribute - %s.')
            LOG.info(msg, CONF.tokenless_auth.issuer_attribute)
            return False

        if client_issuer in CONF.tokenless_auth.trusted_issuer:
            return True

        msg = _LI('The client issuer %(client_issuer)s does not match with '
                  'the trusted issuer %(trusted_issuer)s')
        LOG.info(
            msg, {'client_issuer': client_issuer,
                  'trusted_issuer': CONF.tokenless_auth.trusted_issuer})

        return False
Ejemplo n.º 2
0
    def _page_feed(self, session, url, retry=True):
        """Page event feed(s).

        This method will read all events and add them to a list. This process
        of paging will continue until no previous url is returned in the links
        header.

        :param session: request session
        :param url: feed endpoint
        :param retry: boolean used for single re-authentication

        :raise SystemExit: If response from feed is not 200 OK and
        re-authentication failed.
        """
        self._headers['X-Auth-Token'] = self._admin_token
        resp = session.get(url, headers=self._headers)
        if resp.status_code == requests.codes.ok:
            feed = json.loads(resp.content)['feed']
            if 'entry' in feed:
                self._entries = list(feed['entry']) + self._entries
            if 'previous' in resp.links:
                self._page_feed(session, resp.links['previous']['url'])
        elif resp.status_code == requests.codes.unauthorized and retry:
            self._admin_token = get_admin_token()
            LOG.info(_LI('Authorization failed: Re-authenticating'))
            self._page_feed(session, url, False)
        else:
            msg = (_LI('Failed to parse feed: response code %s') %
                   resp.status_code)
            LOG.info(msg)
            raise RuntimeError(msg)
Ejemplo n.º 3
0
    def _validate_trusted_issuer(self, request):
        """To further filter the certificates that are trusted.

        If the config option 'trusted_issuer' is absent or does
        not contain the trusted issuer DN, no certificates
        will be allowed in tokenless authorization.

        :param env: The env contains the client issuer's attributes
        :type env: dict
        :returns: True if client_issuer is trusted; otherwise False
        """
        if not CONF.tokenless_auth.trusted_issuer:
            return False

        issuer = request.environ.get(CONF.tokenless_auth.issuer_attribute)
        if not issuer:
            msg = _LI('Cannot find client issuer in env by the '
                      'issuer attribute - %s.')
            LOG.info(msg, CONF.tokenless_auth.issuer_attribute)
            return False

        if issuer in CONF.tokenless_auth.trusted_issuer:
            return True

        msg = _LI('The client issuer %(client_issuer)s does not match with '
                  'the trusted issuer %(trusted_issuer)s')
        LOG.info(
            msg, {
                'client_issuer': issuer,
                'trusted_issuer': CONF.tokenless_auth.trusted_issuer
            })

        return False
Ejemplo n.º 4
0
    def get_token_data(self, user_id, method_names, extras=None,
                       domain_id=None, project_id=None, expires=None,
                       trust=None, token=None, include_catalog=True,
                       bind=None, access_token=None, issued_at=None,
                       audit_info=None):
        username = self._token_data['access']['user']['name']
        LOG.info(_LI('Building token data for user %s.'), username)
        token_data = {
            'methods': method_names,
            const.TOKEN_RESPONSE: self._token_data,
        }

        # Rackspace doesn't have projects that act as domains
        token_data['is_domain'] = False

        self._populate_scope(token_data, domain_id, project_id)
        self._populate_user(token_data, user_id, trust)
        self._populate_roles(token_data, user_id, domain_id, project_id, trust,
                             access_token)
        self._populate_audit_info(token_data, audit_info)

        if include_catalog:
            self._populate_service_catalog(token_data, user_id, domain_id,
                                           project_id, trust)
        self._populate_token_dates(token_data, expires=expires, trust=trust,
                                   issued_at=issued_at)

        # Remove Rackspace's response from token data
        del token_data['rackspace:token_response']

        LOG.info(_LI('Successfully built token data for user %s.'), username)
        return {'token': token_data}
Ejemplo n.º 5
0
    def _page_feed(self, session, url, retry=True):
        """Page event feed(s).

        This method will read all events and add them to a list. This process
        of paging will continue until no previous url is returned in the links
        header.

        :param session: request session
        :param url: feed endpoint
        :param retry: boolean used for single re-authentication

        :raise SystemExit: If response from feed is not 200 OK and
        re-authentication failed.
        """
        self._headers['X-Auth-Token'] = self._admin_token
        resp = session.get(url, headers=self._headers)
        if resp.status_code == requests.codes.ok:
            feed = json.loads(resp.content)['feed']
            if 'entry' in feed:
                self._entries = list(feed['entry']) + self._entries
            if 'previous' in resp.links:
                self._page_feed(session, resp.links['previous']['url'])
        elif resp.status_code == requests.codes.unauthorized and retry:
            self._admin_token = get_admin_token()
            LOG.info(_LI('Authorization failed: Re-authenticating'))
            self._page_feed(session, url, False)
        else:
            msg = (_LI('Failed to parse feed: response code %s') %
                   resp.status_code)
            LOG.info(msg)
            raise RuntimeError(msg)
Ejemplo n.º 6
0
    def add_user_to_project(self, tenant_id, user_id):
        """Add user to a tenant by creating a default role relationship.

        :raises: keystone.exception.ProjectNotFound,
                 keystone.exception.UserNotFound

        """
        self.resource_api.get_project(tenant_id)
        try:
            self.role_api.get_role(CONF.member_role_id)
            self.driver.add_role_to_user_and_project(
                user_id,
                tenant_id,
                CONF.member_role_id)
        except exception.RoleNotFound:
            LOG.info(_LI("Creating the default role %s "
                         "because it does not exist."),
                     CONF.member_role_id)
            role = {'id': CONF.member_role_id,
                    'name': CONF.member_role_name}
            try:
                self.role_api.create_role(CONF.member_role_id, role)
            except exception.Conflict:
                LOG.info(_LI("Creating the default role %s failed because it "
                             "was already created"),
                         CONF.member_role_id)
            # now that default role exists, the add should succeed
            self.driver.add_role_to_user_and_project(
                user_id,
                tenant_id,
                CONF.member_role_id)
Ejemplo n.º 7
0
    def add_user_to_project(self, tenant_id, user_id):
        """Add user to a tenant by creating a default role relationship.

        :raises: keystone.exception.ProjectNotFound,
                 keystone.exception.UserNotFound

        """
        self.resource_api.get_project(tenant_id)
        try:
            self.role_api.get_role(CONF.member_role_id)
            self.driver.add_role_to_user_and_project(
                user_id,
                tenant_id,
                CONF.member_role_id)
        except exception.RoleNotFound:
            LOG.info(_LI("Creating the default role %s "
                         "because it does not exist."),
                     CONF.member_role_id)
            role = {'id': CONF.member_role_id,
                    'name': CONF.member_role_name}
            try:
                self.role_api.create_role(CONF.member_role_id, role)
            except exception.Conflict:
                LOG.info(_LI("Creating the default role %s failed because it "
                             "was already created"),
                         CONF.member_role_id)
            # now that default role exists, the add should succeed
            self.driver.add_role_to_user_and_project(
                user_id,
                tenant_id,
                CONF.member_role_id)
Ejemplo n.º 8
0
def validate_token_bind(context, token_ref):
    bind_mode = CONF.token.enforce_token_bind

    if bind_mode == 'disabled':
        return

    if not isinstance(token_ref, token_model.KeystoneToken):
        raise exception.UnexpectedError(
            _('token reference must be a '
              'KeystoneToken type, got: %s') % type(token_ref))
    bind = token_ref.bind

    # permissive and strict modes don't require there to be a bind
    permissive = bind_mode in ('permissive', 'strict')

    if not bind:
        if permissive:
            # no bind provided and none required
            return
        else:
            LOG.info(_LI("No bind information present in token"))
            raise exception.Unauthorized()

    # get the named mode if bind_mode is not one of the known
    name = None if permissive or bind_mode == 'required' else bind_mode

    if name and name not in bind:
        LOG.info(_LI("Named bind mode %s not in bind information"), name)
        raise exception.Unauthorized()

    for bind_type, identifier in bind.items():
        if bind_type == 'kerberos':
            if (context['environment'].get('AUTH_TYPE', '').lower() !=
                    'negotiate'):
                msg = _('Kerberos credentials required and not present')
                LOG.info(msg)
                raise exception.Unauthorized(msg)

            if context['environment'].get('REMOTE_USER') != identifier:
                msg = _('Kerberos credentials do not match those in bind')
                LOG.info(msg)
                raise exception.Unauthorized(msg)

            LOG.info(_LI('Kerberos bind authentication successful'))

        elif bind_mode == 'permissive':
            LOG.debug(("Ignoring unknown bind (due to permissive mode): "
                       "{%(bind_type)s: %(identifier)s}"), {
                           'bind_type': bind_type,
                           'identifier': identifier
                       })
        else:
            msg = _('Could not verify unknown bind: {%(bind_type)s: '
                    '%(identifier)s}') % {
                        'bind_type': bind_type,
                        'identifier': identifier
                    }
            LOG.info(msg)
            raise exception.Unauthorized(msg)
Ejemplo n.º 9
0
    def do_bootstrap(self):
        """Perform the bootstrap actions.

        Create bootstrap user, project, and role so that CMS, humans, or
        scripts can continue to perform initial setup (domains, projects,
        services, endpoints, etc) of Keystone when standing up a new
        deployment.
        """
        self._get_config()

        if self.password is None:
            print(
                _('Either --bootstrap-password argument or '
                  'OS_BOOTSTRAP_PASSWORD must be set.'))
            raise ValueError

        # NOTE(morganfainberg): Ensure the default domain is in-fact created
        default_domain = migration_helpers.get_default_domain()
        try:
            self.resource_manager.create_domain(domain_id=default_domain['id'],
                                                domain=default_domain)
            LOG.info(_LI('Created domain %s'), default_domain['id'])
        except exception.Conflict:
            # NOTE(morganfainberg): Domain already exists, continue on.
            LOG.info(_LI('Domain %s already exists, skipping creation.'),
                     default_domain['id'])

        LOG.info(_LI('Creating project %s'), self.project_name)
        self.resource_manager.create_project(
            tenant_id=self.tenant_id,
            tenant={
                'enabled': True,
                'id': self.tenant_id,
                'domain_id': default_domain['id'],
                'description': 'Bootstrap project for initializing the '
                'cloud.',
                'name': self.project_name
            },
        )
        LOG.info(_LI('Creating user %s'), self.username)
        user = self.identity_manager.create_user(
            user_ref={
                'name': self.username,
                'enabled': True,
                'domain_id': default_domain['id'],
                'password': self.password
            })
        LOG.info(_LI('Creating Role %s'), self.role_name)
        self.role_manager.create_role(
            role_id=self.role_id,
            role={
                'name': self.role_name,
                'id': self.role_id
            },
        )
        self.assignment_manager.add_role_to_user_and_project(
            user_id=user['id'], tenant_id=self.tenant_id, role_id=self.role_id)
Ejemplo n.º 10
0
def validate_token_bind(context, token_ref):
    bind_mode = CONF.token.enforce_token_bind

    if bind_mode == 'disabled':
        return

    if not isinstance(token_ref, token_model.KeystoneToken):
        raise exception.UnexpectedError(_('token reference must be a '
                                          'KeystoneToken type, got: %s') %
                                        type(token_ref))
    bind = token_ref.bind

    # permissive and strict modes don't require there to be a bind
    permissive = bind_mode in ('permissive', 'strict')

    if not bind:
        if permissive:
            # no bind provided and none required
            return
        else:
            LOG.info(_LI("No bind information present in token"))
            raise exception.Unauthorized()

    # get the named mode if bind_mode is not one of the known
    name = None if permissive or bind_mode == 'required' else bind_mode

    if name and name not in bind:
        LOG.info(_LI("Named bind mode %s not in bind information"), name)
        raise exception.Unauthorized()

    for bind_type, identifier in bind.items():
        if bind_type == 'kerberos':
            if (context['environment'].get('AUTH_TYPE', '').lower() !=
                    'negotiate'):
                msg = _('Kerberos credentials required and not present')
                LOG.info(msg)
                raise exception.Unauthorized(msg)

            if context['environment'].get('REMOTE_USER') != identifier:
                msg = _('Kerberos credentials do not match those in bind')
                LOG.info(msg)
                raise exception.Unauthorized(msg)

            LOG.info(_LI('Kerberos bind authentication successful'))

        elif bind_mode == 'permissive':
            LOG.debug(("Ignoring unknown bind (due to permissive mode): "
                       "{%(bind_type)s: %(identifier)s}"), {
                           'bind_type': bind_type,
                           'identifier': identifier})
        else:
            msg = _('Could not verify unknown bind: {%(bind_type)s: '
                    '%(identifier)s}') % {
                        'bind_type': bind_type,
                        'identifier': identifier}
            LOG.info(msg)
            raise exception.Unauthorized(msg)
Ejemplo n.º 11
0
    def do_bootstrap(self):
        """Perform the bootstrap actions.

        Create bootstrap user, project, and role so that CMS, humans, or
        scripts can continue to perform initial setup (domains, projects,
        services, endpoints, etc) of Keystone when standing up a new
        deployment.
        """
        self._get_config()

        if self.password is None:
            print(_('Either --bootstrap-password argument or '
                    'OS_BOOTSTRAP_PASSWORD must be set.'))
            raise ValueError

        # NOTE(morganfainberg): Ensure the default domain is in-fact created
        default_domain = migration_helpers.get_default_domain()
        try:
            self.resource_manager.create_domain(
                domain_id=default_domain['id'],
                domain=default_domain)
            LOG.info(_LI('Created domain %s'), default_domain['id'])
        except exception.Conflict:
            # NOTE(morganfainberg): Domain already exists, continue on.
            LOG.info(_LI('Domain %s already exists, skipping creation.'),
                     default_domain['id'])

        LOG.info(_LI('Creating project %s'), self.project_name)
        self.resource_manager.create_project(
            tenant_id=self.tenant_id,
            tenant={'enabled': True,
                    'id': self.tenant_id,
                    'domain_id': default_domain['id'],
                    'description': 'Bootstrap project for initializing the '
                                   'cloud.',
                    'name': self.project_name},
        )
        LOG.info(_LI('Creating user %s'), self.username)
        user = self.identity_manager.create_user(
            user_ref={'name': self.username,
                      'enabled': True,
                      'domain_id': default_domain['id'],
                      'password': self.password
                      }
        )
        LOG.info(_LI('Creating Role %s'), self.role_name)
        self.role_manager.create_role(
            role_id=self.role_id,
            role={'name': self.role_name,
                  'id': self.role_id},
        )
        self.assignment_manager.add_role_to_user_and_project(
            user_id=user['id'],
            tenant_id=self.tenant_id,
            role_id=self.role_id
        )
Ejemplo n.º 12
0
 def _assert_domain_scope(self, token_data):
     if not self._is_in_domain(self._scope_domain_id, token_data):
         msg = (_LI('User %(u_name)s cannot scope to domain %(d_id)s.') % {
             'u_name': self._username,
             'd_id': self._scope_domain_id
         })
         LOG.info(msg)
         raise exception.Unauthorized(msg)
     LOG.info(_LI('User %(u_name)s can scope to domain %(d_id)s.'), {
         'u_name': self._username,
         'd_id': self._scope_domain_id
     })
Ejemplo n.º 13
0
 def _assert_user_domain(self, token_data):
     user_domain = self._user_domain_id or self._user_domain_name
     if not self._is_in_domain(user_domain, token_data):
         msg = (_LI('User %(u_name)s does not belong to domain %(d_id)s.') %
                {
                    'u_name': self._username,
                    'd_id': user_domain
                })
         LOG.info(msg)
         raise exception.Unauthorized(msg)
     LOG.info(_LI('User %(u_name)s belongs to domain %(d_id)s.'), {
         'u_name': self._username,
         'd_id': user_domain
     })
Ejemplo n.º 14
0
 def _assert_project_scope(self, token_data):
     sentinal = object()
     tenants = (role.get('tenantId', sentinal)
                for role in token_data['access']['user']['roles'])
     if self._scope_project_id not in tenants:
         msg = (_LI('User %(u_name)s cannot scope to project %(p_id)s.') % {
             'u_name': self._username,
             'p_id': self._scope_project_id
         })
         LOG.info(msg)
         raise exception.Unauthorized(msg)
     LOG.info(_LI('User %(u_name)s can scope to project %(p_id)s.'), {
         'u_name': self._username,
         'p_id': self._scope_project_id
     })
Ejemplo n.º 15
0
    def _apply_region_proxy(self, proxy_list):
        if isinstance(proxy_list, list):
            proxies = []

            for item in proxy_list:
                if isinstance(item, str):
                    LOG.debug('Importing class %s as KVS proxy.', item)
                    pxy = importutils.import_class(item)
                else:
                    pxy = item

                if issubclass(pxy, proxy.ProxyBackend):
                    proxies.append(pxy)
                else:
                    pxy_cls_name = reflection.get_class_name(
                        pxy, fully_qualified=False)
                    LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
                                pxy_cls_name)

            for proxy_cls in reversed(proxies):
                proxy_cls_name = reflection.get_class_name(
                    proxy_cls, fully_qualified=False)
                LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
                         {'proxy': proxy_cls_name,
                          'name': self._region.name})
                self._region.wrap(proxy_cls)
Ejemplo n.º 16
0
    def _token_authenticate(self):
        hash_token = hashlib.sha1(self._token_id).hexdigest()
        cached_data = cache.token_region.get(hash_token)
        if cached_data:
            return cached_data

        headers = const.HEADERS.copy()
        if self._x_forwarded_for:
            headers['X-Forwarded-For'] = self._x_forwarded_for

        LOG.info(_LI('Token authentication against v2.'))
        token_data = self.POST('/tokens',
                               headers=headers,
                               data={
                                   'auth': {
                                       'token': {
                                           'id': self._token_id,
                                       },
                                       'tenantId': self._scope_project_id,
                                   },
                               },
                               expected_status=requests.codes.ok)

        cache.token_region.set(hash_token, token_data)
        return token_data
Ejemplo n.º 17
0
def load_keys():
    """Load keys from disk into a list.

    The first key in the list is the primary key used for encryption. All
    other keys are active secondary keys that can be used for decrypting
    tokens.

    """
    if not validate_key_repository():
        return []

    # build a dictionary of key_number:encryption_key pairs
    keys = dict()
    for filename in os.listdir(CONF.fernet_tokens.key_repository):
        path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
        if os.path.isfile(path):
            with open(path, 'r') as key_file:
                keys[int(filename)] = key_file.read()

    LOG.info(_LI('Loaded %(count)s encryption keys from: %(dir)s'), {
        'count': len(keys),
        'dir': CONF.fernet_tokens.key_repository
    })

    # return the encryption_keys, sorted by key number, descending
    return [keys[x] for x in sorted(keys.keys(), reverse=True)]
Ejemplo n.º 18
0
 def exec_command(self, command):
     to_exec = []
     for cmd_part in command:
         to_exec.append(cmd_part % self.ssl_dictionary)
     LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
     # NOTE(Jeffrey4l): Redirect both stdout and stderr to pipe, so the
     # output can be captured.
     # NOTE(Jeffrey4l): check_output is not compatible with Python 2.6.
     # So use Popen instead.
     process = environment.subprocess.Popen(
         to_exec,
         stdout=environment.subprocess.PIPE,
         stderr=environment.subprocess.STDOUT)
     output = process.communicate()[0]
     retcode = process.poll()
     if retcode:
         LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s'
                       '- %(output)s'),
                   {'to_exec': to_exec,
                    'retcode': retcode,
                    'output': output})
         e = environment.subprocess.CalledProcessError(retcode, to_exec[0])
         # NOTE(Jeffrey4l): Python 2.6 compatibility:
         # CalledProcessError did not have output keyword argument
         e.output = output
         raise e
Ejemplo n.º 19
0
def _expiry_range_batched(session, upper_bound_func, batch_size):
    """Returns the stop point of the next batch for expiration.

    Return the timestamp of the next token that is `batch_size` rows from
    being the oldest expired token.
    """

    # This expiry strategy splits the tokens into roughly equal sized batches
    # to be deleted.  It does this by finding the timestamp of a token
    # `batch_size` rows from the oldest token and yielding that to the caller.
    # It's expected that the caller will then delete all rows with a timestamp
    # equal to or older than the one yielded.  This may delete slightly more
    # tokens than the batch_size, but that should be ok in almost all cases.
    LOG.info(_LI('Token expiration batch size: %d') % batch_size)
    query = session.query(TokenModel.expires)
    query = query.filter(TokenModel.expires < upper_bound_func())
    query = query.order_by(TokenModel.expires)
    query = query.offset(batch_size - 1)
    query = query.limit(1)
    while True:
        try:
            next_expiration = query.one()[0]
        except sql.NotFound:
            # There are less than `batch_size` rows remaining, so fall
            # through to the normal delete
            break
        yield next_expiration
    yield upper_bound_func()
    def create_projects_from_mapping(shadow_projects, idp_domain_id,
                                     existing_roles, user, assignment_api,
                                     resource_api):
        for shadow_project in shadow_projects:
            try:
                # Check and see if the project already exists and if it
                # does not, try to create it.
                project = resource_api.get_project_by_name(
                    shadow_project['name'], idp_domain_id)
            except exception.ProjectNotFound:
                LOG.info(
                    _LI('Project %(project_name)s does not exist. It will be '
                        'automatically provisioning for user %(user_id)s.'), {
                            'project_name': shadow_project['name'],
                            'user_id': user['id']
                        })
                project_ref = {
                    'id': uuid.uuid4().hex,
                    'name': shadow_project['name'],
                    'domain_id': idp_domain_id
                }
                project = resource_api.create_project(project_ref['id'],
                                                      project_ref)

            shadow_roles = shadow_project['roles']
            for shadow_role in shadow_roles:
                assignment_api.create_grant(
                    existing_roles[shadow_role['name']]['id'],
                    user_id=user['id'],
                    project_id=project['id'])
Ejemplo n.º 21
0
    def _apply_region_proxy(self, proxy_list):
        if isinstance(proxy_list, list):
            proxies = []

            for item in proxy_list:
                if isinstance(item, str):
                    LOG.debug('Importing class %s as KVS proxy.', item)
                    pxy = importutils.import_class(item)
                else:
                    pxy = item

                if issubclass(pxy, proxy.ProxyBackend):
                    proxies.append(pxy)
                else:
                    pxy_cls_name = reflection.get_class_name(
                        pxy, fully_qualified=False)
                    LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
                                pxy_cls_name)

            for proxy_cls in reversed(proxies):
                proxy_cls_name = reflection.get_class_name(
                    proxy_cls, fully_qualified=False)
                LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'), {
                    'proxy': proxy_cls_name,
                    'name': self._region.name
                })
                self._region.wrap(proxy_cls)
Ejemplo n.º 22
0
    def invalidate(self):
        """Invalidate entry.

        Invalidate capstone's cache by event type.
        """
        if self.resource_type in ['USER', 'TRR_USER']:
            # Invalidate all tokens for updated/deleted user
            if self.type in ['UPDATE', 'SUSPEND', 'DELETE']:
                user_id = self.resource_id
                LOG.info(_LI('Invalidate tokens for updated/deleted user %s'),
                         user_id)
        elif self.resource_type == 'TOKEN':
            # Invalidate token by id
            if self.type == 'DELETE':
                token_id = self.resource_id
                LOG.info(_LI('Invalidate token %s'), token_id)
Ejemplo n.º 23
0
def load_auth_methods():
    global AUTH_PLUGINS_LOADED

    if AUTH_PLUGINS_LOADED:
        # Only try and load methods a single time.
        return
    # config.setup_authentication should be idempotent, call it to ensure we
    # have setup all the appropriate configuration options we may need.
    config.setup_authentication()
    for plugin in CONF.auth.methods:
        if '.' in plugin:
            # NOTE(morganfainberg): if '.' is in the plugin name, it should be
            # imported rather than used as a plugin identifier.
            plugin_class = plugin
            driver = importutils.import_object(plugin)
            if not hasattr(driver, 'method'):
                raise ValueError(_('Cannot load an auth-plugin by class-name '
                                   'without a "method" attribute defined: %s'),
                                 plugin_class)

            LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
            plugin_name = driver.method
        else:
            plugin_name = plugin
            plugin_class = CONF.auth.get(plugin)
            driver = importutils.import_object(plugin_class)
        if plugin_name in AUTH_METHODS:
            raise ValueError(_('Auth plugin %(plugin)s is requesting '
                               'previously registered method %(method)s') %
                             {'plugin': plugin_class, 'method': driver.method})
        AUTH_METHODS[plugin_name] = driver
    AUTH_PLUGINS_LOADED = True
Ejemplo n.º 24
0
    def create_key_directory(self,
                             keystone_user_id=None,
                             keystone_group_id=None):
        """Attempt to create the key directory if it doesn't exist."""
        if not os.access(self.key_repository, os.F_OK):
            LOG.info(
                _LI('key_repository does not appear to exist; attempting to '
                    'create it'))

            try:
                os.makedirs(self.key_repository, 0o700)
            except OSError:
                LOG.error(
                    _LE('Failed to create key_repository: either it already '
                        'exists or you don\'t have sufficient permissions to '
                        'create it'))

            if keystone_user_id and keystone_group_id:
                os.chown(self.key_repository, keystone_user_id,
                         keystone_group_id)
            elif keystone_user_id or keystone_group_id:
                LOG.warning(
                    _LW('Unable to change the ownership of key_repository without '
                        'a keystone user ID and keystone group ID both being '
                        'provided: %s') % self.key_repository)
Ejemplo n.º 25
0
def create_key_directory(keystone_user_id=None, keystone_group_id=None):
    """If the configured key directory does not exist, attempt to create it."""
    if not os.access(CONF.fernet_tokens.key_repository, os.F_OK):
        LOG.info(_LI(
            '[fernet_tokens] key_repository does not appear to exist; '
            'attempting to create it'))

        try:
            os.makedirs(CONF.fernet_tokens.key_repository, 0o700)
        except OSError:
            LOG.error(_LE(
                'Failed to create [fernet_tokens] key_repository: either it '
                'already exists or you don\'t have sufficient permissions to '
                'create it'))

        if keystone_user_id and keystone_group_id:
            os.chown(
                CONF.fernet_tokens.key_repository,
                keystone_user_id,
                keystone_group_id)
        elif keystone_user_id or keystone_group_id:
            LOG.warning(_LW(
                'Unable to change the ownership of [fernet_tokens] '
                'key_repository without a keystone user ID and keystone group '
                'ID both being provided: %s') %
                CONF.fernet_tokens.key_repository)
Ejemplo n.º 26
0
def get_admin_token(retry=True):
    """Authentication request to v2.

    Make a v2 authentication request to retrieve an admin token. Retry on
    first failure.
    """
    data = {
        "auth": {
            "passwordCredentials": {
                "username": CONF.service_admin.username,
                "password": CONF.service_admin.password,
            },
        },
    }
    token_url = CONF.rackspace.base_url + '/tokens'
    resp = requests.post(token_url, headers=const.HEADERS, json=data)
    if resp.status_code == requests.codes.ok:
        token_data = resp.json()
        return token_data['access']['token']['id']
    elif retry:
        return get_admin_token(retry=False)
    else:
        msg = (_LI('Authentication failed against v2: response code %s') %
               resp.status_code)
        LOG.info(msg)
        raise RuntimeError(msg)
Ejemplo n.º 27
0
    def invalidate(self):
        """Invalidate entry.

        Invalidate capstone's cache by event type.
        """
        if self.resource_type in ['USER', 'TRR_USER']:
            # Invalidate all tokens for updated/deleted user
            if self.type in ['UPDATE', 'SUSPEND', 'DELETE']:
                user_id = self.resource_id
                LOG.info(_LI('Invalidate tokens for updated/deleted user %s'),
                         user_id)
        elif self.resource_type == 'TOKEN':
            # Invalidate token by id
            if self.type == 'DELETE':
                token_id = self.resource_id
                LOG.info(_LI('Invalidate token %s'), token_id)
Ejemplo n.º 28
0
def load_auth_methods():
    global AUTH_PLUGINS_LOADED

    if AUTH_PLUGINS_LOADED:
        # Only try and load methods a single time.
        return
    # config.setup_authentication should be idempotent, call it to ensure we
    # have setup all the appropriate configuration options we may need.
    config.setup_authentication()
    for plugin in CONF.auth.methods:
        if '.' in plugin:
            # NOTE(morganfainberg): if '.' is in the plugin name, it should be
            # imported rather than used as a plugin identifier.
            plugin_class = plugin
            driver = importutils.import_object(plugin)
            if not hasattr(driver, 'method'):
                raise ValueError(_('Cannot load an auth-plugin by class-name '
                                   'without a "method" attribute defined: %s'),
                                 plugin_class)

            LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
            plugin_name = driver.method
        else:
            plugin_name = plugin
            plugin_class = CONF.auth.get(plugin)
            driver = importutils.import_object(plugin_class)
        if plugin_name in AUTH_METHODS:
            raise ValueError(_('Auth plugin %(plugin)s is requesting '
                               'previously registered method %(method)s') %
                             {'plugin': plugin_class, 'method': driver.method})
        AUTH_METHODS[plugin_name] = driver
    AUTH_PLUGINS_LOADED = True
Ejemplo n.º 29
0
    def _authenticate(self):
        cached_data = cache.token_region.get(self._user_ref['id'])
        if cached_data:
            cached_password_hash, token_data = cached_data
            if utils.check_password(self._password, cached_password_hash):
                return token_data

        headers = const.HEADERS.copy()
        if self._x_forwarded_for:
            headers['X-Forwarded-For'] = self._x_forwarded_for

        LOG.info(_LI('Authenticating user %s against v2.'), self._username)
        token_data = self.POST('/tokens',
                               headers=headers,
                               data={
                                   'auth': {
                                       'passwordCredentials': {
                                           'username': self._username,
                                           'password': self._password,
                                       },
                                   },
                               },
                               expected_status=requests.codes.ok)

        users_password_hash = utils.hash_password(self._password)
        cache.token_region.set(self._user_ref['id'],
                               (users_password_hash, token_data))
        cache.token_map_region.set(token_data['access']['token']['id'],
                                   self._user_ref['id'])
        return token_data
Ejemplo n.º 30
0
 def exec_command(self, command):
     to_exec = []
     for cmd_part in command:
         to_exec.append(cmd_part % self.ssl_dictionary)
     LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
     # NOTE(Jeffrey4l): Redirect both stdout and stderr to pipe, so the
     # output can be captured.
     # NOTE(Jeffrey4l): check_output is not compatible with Python 2.6.
     # So use Popen instead.
     process = environment.subprocess.Popen(
         to_exec,
         stdout=environment.subprocess.PIPE,
         stderr=environment.subprocess.STDOUT)
     output = process.communicate()[0]
     retcode = process.poll()
     if retcode:
         LOG.error(
             _LE('Command %(to_exec)s exited with %(retcode)s'
                 '- %(output)s'), {
                     'to_exec': to_exec,
                     'retcode': retcode,
                     'output': output
                 })
         e = environment.subprocess.CalledProcessError(retcode, to_exec[0])
         # NOTE(Jeffrey4l): Python 2.6 compatibility:
         # CalledProcessError did not have output keyword argument
         e.output = output
         raise e
Ejemplo n.º 31
0
    def listen(self, key=None, backlog=128):
        """Create and start listening on socket.

        Call before forking worker processes.

        Raises Exception if this has already been called.
        """

        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix.
        # Please refer below link
        # (https://bitbucket.org/eventlet/eventlet/
        # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/
        # greendns.py?at=0.12#cl-163)
        info = socket.getaddrinfo(self.host,
                                  self.port,
                                  socket.AF_UNSPEC,
                                  socket.SOCK_STREAM)[0]

        try:
            self.socket = eventlet.listen(info[-1], family=info[0],
                                          backlog=backlog)
        except EnvironmentError:
            LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
                      {'host': self.host, 'port': self.port})
            raise

        LOG.info(_LI('Starting %(arg0)s on %(host)s:%(port)s'),
                 {'arg0': sys.argv[0],
                  'host': self.host,
                  'port': self.port})
Ejemplo n.º 32
0
def load_keys():
    """Load keys from disk into a list.

    The first key in the list is the primary key used for encryption. All
    other keys are active secondary keys that can be used for decrypting
    tokens.

    """
    if not validate_key_repository():
        return []

    # build a dictionary of key_number:encryption_key pairs
    keys = dict()
    for filename in os.listdir(CONF.fernet_tokens.key_repository):
        path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
        if os.path.isfile(path):
            with open(path, "r") as key_file:
                try:
                    key_id = int(filename)
                except ValueError:
                    pass
                else:
                    keys[key_id] = key_file.read()

    LOG.info(
        _LI("Loaded %(count)s encryption keys from: %(dir)s"),
        {"count": len(keys), "dir": CONF.fernet_tokens.key_repository},
    )

    # return the encryption_keys, sorted by key number, descending
    return [keys[x] for x in sorted(keys.keys(), reverse=True)]
Ejemplo n.º 33
0
    def listen(self, key=None, backlog=128):
        """Create and start listening on socket.

        Call before forking worker processes.

        Raises Exception if this has already been called.
        """

        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix.
        # Please refer below link
        # (https://bitbucket.org/eventlet/eventlet/
        # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/
        # greendns.py?at=0.12#cl-163)
        info = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
                                  socket.SOCK_STREAM)[0]

        try:
            self.socket = eventlet.listen(info[-1],
                                          family=info[0],
                                          backlog=backlog)
        except EnvironmentError:
            LOG.error(_LE("Could not bind to %(host)s:%(port)s"), {
                'host': self.host,
                'port': self.port
            })
            raise

        LOG.info(_LI('Starting %(arg0)s on %(host)s:%(port)s'), {
            'arg0': sys.argv[0],
            'host': self.host,
            'port': self.port
        })
Ejemplo n.º 34
0
    def create_key_directory(self, keystone_user_id=None, keystone_group_id=None):
        """Attempt to create the key directory if it doesn't exist."""
        if not os.access(self.key_repository, os.F_OK):
            LOG.info(_LI("key_repository does not appear to exist; attempting to " "create it"))

            try:
                os.makedirs(self.key_repository, 0o700)
            except OSError:
                LOG.error(
                    _LE(
                        "Failed to create key_repository: either it already "
                        "exists or you don't have sufficient permissions to "
                        "create it"
                    )
                )

            if keystone_user_id and keystone_group_id:
                os.chown(self.key_repository, keystone_user_id, keystone_group_id)
            elif keystone_user_id or keystone_group_id:
                LOG.warning(
                    _LW(
                        "Unable to change the ownership of key_repository without "
                        "a keystone user ID and keystone group ID both being "
                        "provided: %s"
                    )
                    % self.key_repository
                )
Ejemplo n.º 35
0
def create_key_directory(keystone_user_id=None, keystone_group_id=None):
    """If the configured key directory does not exist, attempt to create it."""
    if not os.access(CONF.fernet_tokens.key_repository, os.F_OK):
        LOG.info(_LI("[fernet_tokens] key_repository does not appear to exist; " "attempting to create it"))

        try:
            os.makedirs(CONF.fernet_tokens.key_repository, 0o700)
        except OSError:
            LOG.error(
                _LE(
                    "Failed to create [fernet_tokens] key_repository: either it "
                    "already exists or you don't have sufficient permissions to "
                    "create it"
                )
            )

        if keystone_user_id and keystone_group_id:
            os.chown(CONF.fernet_tokens.key_repository, keystone_user_id, keystone_group_id)
        elif keystone_user_id or keystone_group_id:
            LOG.warning(
                _LW(
                    "Unable to change the ownership of [fernet_tokens] "
                    "key_repository without a keystone user ID and keystone group "
                    "ID both being provided: %s"
                )
                % CONF.fernet_tokens.key_repository
            )
Ejemplo n.º 36
0
 def __setitem__(self, key, val):
     if key in self.IDENTITY_ATTRIBUTES and key in self:
         existing_val = self[key]
         if key == 'expires_at':
             # special treatment for 'expires_at', we are going to take
             # the earliest expiration instead.
             if existing_val != val:
                 LOG.info(
                     _LI('"expires_at" has conflicting values '
                         '%(existing)s and %(new)s.  Will use the '
                         'earliest value.'), {
                             'existing': existing_val,
                             'new': val
                         })
             if existing_val is None or val is None:
                 val = existing_val or val
             else:
                 val = min(existing_val, val)
         elif existing_val != val:
             msg = _('Unable to reconcile identity attribute %(attribute)s '
                     'as it has conflicting values %(new)s and %(old)s') % (
                         {
                             'attribute': key,
                             'new': val,
                             'old': existing_val
                         })
             raise exception.Unauthorized(msg)
     return super(AuthContext, self).__setitem__(key, val)
Ejemplo n.º 37
0
 def issue_v3_token(self,
                    user_id,
                    method_names,
                    expires_at=None,
                    project_id=None,
                    domain_id=None,
                    auth_context=None,
                    trust=None,
                    metadata_ref=None,
                    include_catalog=True,
                    parent_audit_id=None):
     LOG.info(_LI('Issuing token for user %s.'), user_id)
     expires_at = (
         auth_context[const.TOKEN_RESPONSE]['access']['token']['expires'])
     self.v3_token_data_helper = RackspaceTokenDataHelper(
         auth_context[const.TOKEN_RESPONSE])
     try:
         return super(Provider,
                      self).issue_v3_token(user_id,
                                           method_names,
                                           expires_at=expires_at,
                                           project_id=project_id,
                                           domain_id=domain_id,
                                           auth_context=auth_context,
                                           trust=trust,
                                           metadata_ref=metadata_ref,
                                           include_catalog=include_catalog,
                                           parent_audit_id=parent_audit_id)
     finally:
         self.v3_token_data_helper = None
Ejemplo n.º 38
0
def get_admin_token(retry=True):
    """Authentication request to v2.

    Make a v2 authentication request to retrieve an admin token. Retry on
    first failure.
    """
    data = {
        "auth": {
            "passwordCredentials": {
                "username": CONF.service_admin.username,
                "password": CONF.service_admin.password,
            },
        },
    }
    token_url = CONF.rackspace.base_url + '/tokens'
    resp = requests.post(token_url, headers=const.HEADERS, json=data)
    if resp.status_code == requests.codes.ok:
        token_data = resp.json()
        return token_data['access']['token']['id']
    elif retry:
        return get_admin_token(retry=False)
    else:
        msg = (_LI('Authentication failed against v2: response code %s') %
               resp.status_code)
        LOG.info(msg)
        raise RuntimeError(msg)
Ejemplo n.º 39
0
def _create_new_key(keystone_user_id, keystone_group_id):
    """Securely create a new encryption key.

    Create a new key that is readable by the Keystone group and Keystone user.
    """
    key = fernet.Fernet.generate_key()  # key is bytes

    # This ensures the key created is not world-readable
    old_umask = os.umask(0o177)
    if keystone_user_id and keystone_group_id:
        old_egid = os.getegid()
        old_euid = os.geteuid()
        os.setegid(keystone_group_id)
        os.seteuid(keystone_user_id)
    elif keystone_user_id or keystone_group_id:
        LOG.warning(_LW(
            'Unable to change the ownership of the new key without a keystone '
            'user ID and keystone group ID both being provided: %s') %
            CONF.fernet_tokens.key_repository)
    # Determine the file name of the new key
    key_file = os.path.join(CONF.fernet_tokens.key_repository, '0')
    try:
        with open(key_file, 'w') as f:
            f.write(key.decode('utf-8'))  # convert key to str for the file.
    finally:
        # After writing the key, set the umask back to it's original value. Do
        # the same with group and user identifiers if a Keystone group or user
        # was supplied.
        os.umask(old_umask)
        if keystone_user_id and keystone_group_id:
            os.seteuid(old_euid)
            os.setegid(old_egid)

    LOG.info(_LI('Created a new key: %s'), key_file)
Ejemplo n.º 40
0
 def __setitem__(self, key, val):
     if key in self.IDENTITY_ATTRIBUTES and key in self:
         existing_val = self[key]
         if key == "expires_at":
             # special treatment for 'expires_at', we are going to take
             # the earliest expiration instead.
             if existing_val != val:
                 LOG.info(
                     _LI(
                         '"expires_at" has conflicting values '
                         "%(existing)s and %(new)s.  Will use the "
                         "earliest value."
                     ),
                     {"existing": existing_val, "new": val},
                 )
             if existing_val is None or val is None:
                 val = existing_val or val
             else:
                 val = min(existing_val, val)
         elif existing_val != val:
             msg = _(
                 "Unable to reconcile identity attribute %(attribute)s "
                 "as it has conflicting values %(new)s and %(old)s"
             ) % ({"attribute": key, "new": val, "old": existing_val})
             raise exception.Unauthorized(msg)
     return super(AuthContext, self).__setitem__(key, val)
Ejemplo n.º 41
0
    def create_token(self, user_id, expires_at, audit_ids, methods=None,
                     domain_id=None, project_id=None, trust_id=None,
                     federated_info=None, access_token_id=None):
        """Given a set of payload attributes, generate a Fernet token."""
        for payload_class in PAYLOAD_CLASSES:
            if payload_class.create_arguments_apply(
                    project_id=project_id, domain_id=domain_id,
                    trust_id=trust_id, federated_info=federated_info,
                    access_token_id=access_token_id):
                break

        version = payload_class.version
        payload = payload_class.assemble(
            user_id, methods, project_id, domain_id, expires_at, audit_ids,
            trust_id, federated_info, access_token_id
        )

        versioned_payload = (version,) + payload
        serialized_payload = msgpack.packb(versioned_payload)
        token = self.pack(serialized_payload)

        # NOTE(lbragstad): We should warn against Fernet tokens that are over
        # 255 characters in length. This is mostly due to persisting the tokens
        # in a backend store of some kind that might have a limit of 255
        # characters. Even though Keystone isn't storing a Fernet token
        # anywhere, we can't say it isn't being stored somewhere else with
        # those kind of backend constraints.
        if len(token) > 255:
            LOG.info(_LI('Fernet token created with length of %d '
                         'characters, which exceeds 255 characters'),
                     len(token))

        return token
Ejemplo n.º 42
0
def _expiry_range_batched(session, upper_bound_func, batch_size):
    """Returns the stop point of the next batch for expiration.

    Return the timestamp of the next token that is `batch_size` rows from
    being the oldest expired token.
    """

    # This expiry strategy splits the tokens into roughly equal sized batches
    # to be deleted.  It does this by finding the timestamp of a token
    # `batch_size` rows from the oldest token and yielding that to the caller.
    # It's expected that the caller will then delete all rows with a timestamp
    # equal to or older than the one yielded.  This may delete slightly more
    # tokens than the batch_size, but that should be ok in almost all cases.
    LOG.info(_LI('Token expiration batch size: %d') % batch_size)
    query = session.query(TokenModel.expires)
    query = query.filter(TokenModel.expires < upper_bound_func())
    query = query.order_by(TokenModel.expires)
    query = query.offset(batch_size - 1)
    query = query.limit(1)
    while True:
        try:
            next_expiration = query.one()[0]
        except sql.NotFound:
            # There are less than `batch_size` rows remaining, so fall
            # through to the normal delete
            break
        yield next_expiration
    yield upper_bound_func()
Ejemplo n.º 43
0
    def flush_expired_tokens(self):
        # The DBAPI itself is in a "never autocommit" mode,
        # BEGIN is emitted automatically as soon as any work is done,
        # COMMIT is emitted when SQLAlchemy invokes commit() on the
        # underlying DBAPI connection. So SQLAlchemy is only simulating
        # "begin" here in any case, it is in fact automatic by the DBAPI.
        with sql.session_for_write() as session:  # Calls session.begin()
            dialect = session.bind.dialect.name
            expiry_range_func = self._expiry_range_strategy(dialect)
            query = session.query(TokenModel.expires)
            total_removed = 0
            upper_bound_func = _expiry_upper_bound_func
            for expiry_time in expiry_range_func(session, upper_bound_func):
                delete_query = query.filter(TokenModel.expires <= expiry_time)
                row_count = delete_query.delete(synchronize_session=False)
                # Explicitly commit each batch so as to free up
                # resources early. We do not actually need
                # transactional semantics here.
                session.commit()  # Emits connection.commit() on DBAPI
                # Tells SQLAlchemy to "begin", e.g. hold a new connection
                # open in a transaction
                session.begin()
                total_removed += row_count
                LOG.debug('Removed %d total expired tokens', total_removed)

            # When the "with: " block ends, the final "session.commit()"
            # is emitted by enginefacade
            session.flush()
            LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
Ejemplo n.º 44
0
    def _set_key_mangler(self, key_mangler):
        # Set the key_mangler that is appropriate for the given region being
        # configured here.  The key_mangler function is called prior to storing
        # the value(s) in the backend.  This is to help prevent collisions and
        # limit issues such as memcache's limited cache_key size.
        use_backend_key_mangler = getattr(self._region.backend,
                                          'use_backend_key_mangler', False)
        if ((key_mangler is None or use_backend_key_mangler)
                and (self._region.backend.key_mangler is not None)):
            # NOTE(morganfainberg): Use the configured key_mangler as a first
            # choice. Second choice would be the key_mangler defined by the
            # backend itself.  Finally, fall back to the defaults.  The one
            # exception is if the backend defines `use_backend_key_mangler`
            # as True, which indicates the backend's key_mangler should be
            # the first choice.
            key_mangler = self._region.backend.key_mangler

        if CONF.kvs.enable_key_mangler:
            if key_mangler is not None:
                msg = _LI('Using %(func)s as KVS region %(name)s key_mangler')
                if callable(key_mangler):
                    self._region.key_mangler = key_mangler
                    LOG.info(msg, {
                        'func': key_mangler.__name__,
                        'name': self._region.name
                    })
                else:
                    # NOTE(morganfainberg): We failed to set the key_mangler,
                    # we should error out here to ensure we aren't causing
                    # key-length or collision issues.
                    raise exception.ValidationError(
                        _('`key_mangler` option must be a function reference'))
            else:
                LOG.info(
                    _LI('Using default dogpile sha1_mangle_key as KVS '
                        'region %s key_mangler'), self._region.name)
                # NOTE(morganfainberg): Sane 'default' keymangler is the
                # dogpile sha1_mangle_key function.  This ensures that unless
                # explicitly changed, we mangle keys.  This helps to limit
                # unintended cases of exceeding cache-key in backends such
                # as memcache.
                self._region.key_mangler = dogpile_util.sha1_mangle_key
            self._set_keymangler_on_backend(self._region.key_mangler)
        else:
            LOG.info(_LI('KVS region %s key_mangler disabled.'),
                     self._region.name)
            self._set_keymangler_on_backend(None)
    def create_token(self,
                     user_id,
                     expires_at,
                     audit_ids,
                     methods=None,
                     domain_id=None,
                     project_id=None,
                     trust_id=None,
                     federated_info=None):
        """Given a set of payload attributes, generate a Fernet token."""
        if trust_id:
            version = TrustScopedPayload.version
            payload = TrustScopedPayload.assemble(user_id, methods, project_id,
                                                  expires_at, audit_ids,
                                                  trust_id)
        elif project_id and federated_info:
            version = FederatedProjectScopedPayload.version
            payload = FederatedProjectScopedPayload.assemble(
                user_id, methods, project_id, expires_at, audit_ids,
                federated_info)
        elif domain_id and federated_info:
            version = FederatedDomainScopedPayload.version
            payload = FederatedDomainScopedPayload.assemble(
                user_id, methods, domain_id, expires_at, audit_ids,
                federated_info)
        elif federated_info:
            version = FederatedUnscopedPayload.version
            payload = FederatedUnscopedPayload.assemble(
                user_id, methods, expires_at, audit_ids, federated_info)
        elif project_id:
            version = ProjectScopedPayload.version
            payload = ProjectScopedPayload.assemble(user_id, methods,
                                                    project_id, expires_at,
                                                    audit_ids)
        elif domain_id:
            version = DomainScopedPayload.version
            payload = DomainScopedPayload.assemble(user_id, methods, domain_id,
                                                   expires_at, audit_ids)
        else:
            version = UnscopedPayload.version
            payload = UnscopedPayload.assemble(user_id, methods, expires_at,
                                               audit_ids)

        versioned_payload = (version, ) + payload
        serialized_payload = msgpack.packb(versioned_payload)
        token = self.pack(serialized_payload)

        # NOTE(lbragstad): We should warn against Fernet tokens that are over
        # 255 characters in length. This is mostly due to persisting the tokens
        # in a backend store of some kind that might have a limit of 255
        # characters. Even though Keystone isn't storing a Fernet token
        # anywhere, we can't say it isn't being stored somewhere else with
        # those kind of backend constraints.
        if len(token) > 255:
            LOG.info(
                _LI('Fernet token created with length of %d '
                    'characters, which exceeds 255 characters'), len(token))

        return token
Ejemplo n.º 46
0
 def project_created_callback(self, service, resource_type, operation,
                              payload):
     # The code below is merely an example.
     msg = _LI('Received the following notification: service %(service)s, '
               'resource_type: %(resource_type)s, operation %(operation)s '
               'payload %(payload)s')
     LOG.info(msg, {'service': service, 'resource_type': resource_type,
                    'operation': operation, 'payload': payload})
Ejemplo n.º 47
0
 def project_created_callback(self, service, resource_type, operation, payload):
     # The code below is merely an example.
     msg = _LI(
         "Received the following notification: service %(service)s, "
         "resource_type: %(resource_type)s, operation %(operation)s "
         "payload %(payload)s"
     )
     LOG.info(msg, {"service": service, "resource_type": resource_type, "operation": operation, "payload": payload})
Ejemplo n.º 48
0
    def _set_key_mangler(self, key_mangler):
        # Set the key_mangler that is appropriate for the given region being
        # configured here.  The key_mangler function is called prior to storing
        # the value(s) in the backend.  This is to help prevent collisions and
        # limit issues such as memcache's limited cache_key size.
        use_backend_key_mangler = getattr(self._region.backend,
                                          'use_backend_key_mangler', False)
        if ((key_mangler is None or use_backend_key_mangler) and
                (self._region.backend.key_mangler is not None)):
            # NOTE(morganfainberg): Use the configured key_mangler as a first
            # choice. Second choice would be the key_mangler defined by the
            # backend itself.  Finally, fall back to the defaults.  The one
            # exception is if the backend defines `use_backend_key_mangler`
            # as True, which indicates the backend's key_mangler should be
            # the first choice.
            key_mangler = self._region.backend.key_mangler

        if CONF.kvs.enable_key_mangler:
            if key_mangler is not None:
                msg = _LI('Using %(func)s as KVS region %(name)s key_mangler')
                if callable(key_mangler):
                    self._region.key_mangler = key_mangler
                    LOG.info(msg, {'func': key_mangler.__name__,
                                   'name': self._region.name})
                else:
                    # NOTE(morganfainberg): We failed to set the key_mangler,
                    # we should error out here to ensure we aren't causing
                    # key-length or collision issues.
                    raise exception.ValidationError(
                        _('`key_mangler` option must be a function reference'))
            else:
                msg = _LI('Using default keystone.common.kvs.sha1_mangle_key '
                          'as KVS region %s key_mangler')
                LOG.info(msg, self._region.name)
                # NOTE(morganfainberg): Use 'default' keymangler to ensure
                # that unless explicitly changed, we mangle keys.  This helps
                # to limit unintended cases of exceeding cache-key in backends
                # such as memcache.
                self._region.key_mangler = sha1_mangle_key
            self._set_keymangler_on_backend(self._region.key_mangler)
        else:
            LOG.info(_LI('KVS region %s key_mangler disabled.'),
                     self._region.name)
            self._set_keymangler_on_backend(None)
Ejemplo n.º 49
0
    def get_token_data(self,
                       user_id,
                       method_names,
                       extras=None,
                       domain_id=None,
                       project_id=None,
                       expires=None,
                       trust=None,
                       token=None,
                       include_catalog=True,
                       bind=None,
                       access_token=None,
                       issued_at=None,
                       audit_info=None):
        username = self._token_data['access']['user']['name']
        LOG.info(_LI('Building token data for user %s.'), username)
        token_data = {
            'methods': method_names,
            const.TOKEN_RESPONSE: self._token_data,
        }

        # Rackspace doesn't have projects that act as domains
        token_data['is_domain'] = False

        self._populate_scope(token_data, domain_id, project_id)
        self._populate_user(token_data, user_id, trust)
        self._populate_roles(token_data, user_id, domain_id, project_id, trust,
                             access_token)
        self._populate_audit_info(token_data, audit_info)

        if include_catalog:
            self._populate_service_catalog(token_data, user_id, domain_id,
                                           project_id, trust)
        self._populate_token_dates(token_data,
                                   expires=expires,
                                   trust=trust,
                                   issued_at=issued_at)

        # Remove Rackspace's response from token data
        del token_data['rackspace:token_response']

        LOG.info(_LI('Successfully built token data for user %s.'), username)
        return {'token': token_data}
Ejemplo n.º 50
0
    def _become_valid_new_key(self):
        """Make the tmp new key a valid new key.

        The tmp new key must be created by _create_tmp_new_key().
        """
        tmp_key_file = os.path.join(self.key_repository, '0.tmp')
        valid_key_file = os.path.join(self.key_repository, '0')

        os.rename(tmp_key_file, valid_key_file)

        LOG.info(_LI('Become a valid new key: %s'), valid_key_file)
Ejemplo n.º 51
0
    def flush_expired_tokens(self):
        session = sql.get_session()
        dialect = session.bind.dialect.name
        expiry_range_func = self._expiry_range_strategy(dialect)
        query = session.query(TokenModel.expires)
        total_removed = 0
        upper_bound_func = timeutils.utcnow
        for expiry_time in expiry_range_func(session, upper_bound_func):
            delete_query = query.filter(TokenModel.expires <= expiry_time)
            row_count = delete_query.delete(synchronize_session=False)
            total_removed += row_count
            LOG.debug("Removed %d total expired tokens", total_removed)

        session.flush()
        LOG.info(_LI("Total expired tokens removed: %d"), total_removed)
Ejemplo n.º 52
0
    def flush_expired_tokens(self):
        with sql.session_for_write() as session:
            dialect = session.bind.dialect.name
            expiry_range_func = self._expiry_range_strategy(dialect)
            query = session.query(TokenModel.expires)
            total_removed = 0
            upper_bound_func = _expiry_upper_bound_func
            for expiry_time in expiry_range_func(session, upper_bound_func):
                delete_query = query.filter(TokenModel.expires <=
                                            expiry_time)
                row_count = delete_query.delete(synchronize_session=False)
                total_removed += row_count
                LOG.debug('Removed %d total expired tokens', total_removed)

            session.flush()
            LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
Ejemplo n.º 53
0
 def exec_command(self, command):
     to_exec = [part % self.ssl_dictionary for part in command]
     LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
     try:
         # NOTE(shaleh): use check_output instead of the simpler
         # `check_call()` in order to log any output from an error.
         environment.subprocess.check_output(
             to_exec,
             stderr=environment.subprocess.STDOUT)
     except environment.subprocess.CalledProcessError as e:
         LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s '
                       '- %(output)s'),
                   {'to_exec': to_exec,
                    'retcode': e.returncode,
                    'output': e.output})
         raise e
Ejemplo n.º 54
0
    def initialize_key_repository(self, keystone_user_id=None, keystone_group_id=None):
        """Create a key repository and bootstrap it with a key.

        :param keystone_user_id: User ID of the Keystone user.
        :param keystone_group_id: Group ID of the Keystone user.

        """
        # make sure we have work to do before proceeding
        if os.access(os.path.join(self.key_repository, "0"), os.F_OK):
            LOG.info(_LI("Key repository is already initialized; aborting."))
            return

        # bootstrap an existing key
        self._create_new_key(keystone_user_id, keystone_group_id)

        # ensure that we end up with a primary and secondary key
        self.rotate_keys(keystone_user_id, keystone_group_id)
Ejemplo n.º 55
0
def _domain_config_finder(conf_dir):
    """Return a generator of all domain config files found in a directory.

    Donmain configs match the filename pattern of
    'keystone.<domain_name>.conf'.

    :returns: generator yeilding (filename, domain_name) tuples
    """
    LOG.info(_LI("Scanning %r for domain config files"), conf_dir)
    for r, d, f in os.walk(conf_dir):
        for fname in f:
            if fname.startswith(DOMAIN_CONF_FHEAD) and fname.endswith(DOMAIN_CONF_FTAIL):
                if fname.count(".") >= 2:
                    domain_name = fname[len(DOMAIN_CONF_FHEAD) : -len(DOMAIN_CONF_FTAIL)]
                    yield (os.path.join(r, fname), domain_name)
                    continue

            LOG.warning(_LW("Ignoring file (%s) while scanning " "domain config directory"), fname)
Ejemplo n.º 56
0
 def exec_command(self, command):
     to_exec = [part % self.ssl_dictionary for part in command]
     LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
     try:
         # NOTE(shaleh): use check_output instead of the simpler
         # `check_call()` in order to log any output from an error.
         subprocess.check_output(  # nosec : the arguments being passed
             # in are defined in this file and trusted to build CAs, keys
             # and certs
             to_exec,
             stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s '
                       '- %(output)s'),
                   {'to_exec': to_exec,
                    'retcode': e.returncode,
                    'output': e.output})
         raise e
Ejemplo n.º 57
0
def validate_token_bind(context, token_ref):
    bind_mode = CONF.token.enforce_token_bind

    if bind_mode == "disabled":
        return

    if not isinstance(token_ref, token_model.KeystoneToken):
        raise exception.UnexpectedError(_("token reference must be a " "KeystoneToken type, got: %s") % type(token_ref))
    bind = token_ref.bind

    # permissive and strict modes don't require there to be a bind
    permissive = bind_mode in ("permissive", "strict")

    # get the named mode if bind_mode is not one of the known
    name = None if permissive or bind_mode == "required" else bind_mode

    if not bind:
        if permissive:
            # no bind provided and none required
            return
        else:
            LOG.info(_LI("No bind information present in token"))
            raise exception.Unauthorized()

    if name and name not in bind:
        LOG.info(_LI("Named bind mode %s not in bind information"), name)
        raise exception.Unauthorized()

    for bind_type, identifier in six.iteritems(bind):
        if bind_type == "kerberos":
            if not (context["environment"].get("AUTH_TYPE", "").lower() == "negotiate"):
                LOG.info(_LI("Kerberos credentials required and not present"))
                raise exception.Unauthorized()

            if not context["environment"].get("REMOTE_USER") == identifier:
                LOG.info(_LI("Kerberos credentials do not match " "those in bind"))
                raise exception.Unauthorized()

            LOG.info(_LI("Kerberos bind authentication successful"))

        elif bind_mode == "permissive":
            LOG.debug(
                ("Ignoring unknown bind for permissive mode: " "{%(bind_type)s: %(identifier)s}"),
                {"bind_type": bind_type, "identifier": identifier},
            )
        else:
            LOG.info(
                _LI("Couldn't verify unknown bind: " "{%(bind_type)s: %(identifier)s}"),
                {"bind_type": bind_type, "identifier": identifier},
            )
            raise exception.Unauthorized()
Ejemplo n.º 58
0
def load_keys():
    """Load keys from disk into a list.

    The first key in the list is the primary key used for encryption. All
    other keys are active secondary keys that can be used for decrypting
    tokens.

    """
    if not validate_key_repository():
        return []

    # build a dictionary of key_number:encryption_key pairs
    keys = dict()
    for filename in os.listdir(CONF.fernet_tokens.key_repository):
        path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
        if os.path.isfile(path):
            with open(path, 'r') as key_file:
                try:
                    key_id = int(filename)
                except ValueError:  # nosec : filename isn't a number, ignore
                    # this file since it's not a key.
                    pass
                else:
                    keys[key_id] = key_file.read()

    if len(keys) != CONF.fernet_tokens.max_active_keys:
        # If there haven't been enough key rotations to reach max_active_keys,
        # or if the configured value of max_active_keys has changed since the
        # last rotation, then reporting the discrepancy might be useful. Once
        # the number of keys matches max_active_keys, this log entry is too
        # repetitive to be useful.
        LOG.info(_LI(
            'Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: '
            '%(dir)s'), {
                'count': len(keys),
                'max': CONF.fernet_tokens.max_active_keys,
                'dir': CONF.fernet_tokens.key_repository})

    # return the encryption_keys, sorted by key number, descending
    return [keys[x] for x in sorted(keys.keys(), reverse=True)]