Esempio n. 1
0
def format_url(url, substitutions):
    """Formats a user-defined URL with the given substitutions.

    :param string url: the URL to be formatted
    :param dict substitutions: the dictionary used for substitution
    :returns: a formatted URL

    """
    try:
        result = url.replace('$(', '%(') % substitutions
    except AttributeError:
        LOG.error(_('Malformed endpoint - %(url)r is not a string'),
                  {"url": url})
        raise exception.MalformedEndpoint(endpoint=url)
    except KeyError as e:
        LOG.error(_LE("Malformed endpoint %(url)s - unknown key %(keyerror)s"),
                  {"url": url,
                   "keyerror": e})
        raise exception.MalformedEndpoint(endpoint=url)
    except TypeError as e:
        LOG.error(_LE("Malformed endpoint '%(url)s'. The following type error "
                      "occurred during string substitution: %(typeerror)s"),
                  {"url": url,
                   "typeerror": e})
        raise exception.MalformedEndpoint(endpoint=url)
    except ValueError as e:
        LOG.error(_LE("Malformed endpoint %s - incomplete format "
                      "(are you missing a type notifier ?)"), url)
        raise exception.MalformedEndpoint(endpoint=url)
    return result
Esempio n. 2
0
def cms_sign_data(data_to_sign, signing_cert_file_name, signing_key_file_name,
                  outform=PKI_ASN1_FORM,
                  message_digest=DEFAULT_TOKEN_DIGEST_ALGORITHM):
    """Uses OpenSSL to sign a document.

    Produces a Base64 encoding of a DER formatted CMS Document
    http://en.wikipedia.org/wiki/Cryptographic_Message_Syntax

    :param data_to_sign: data to sign
    :param signing_cert_file_name:  path to the X509 certificate containing
        the public key associated with the private key used to sign the data
    :param signing_key_file_name: path to the private key used to sign
        the data
    :param outform: Format for the signed document PKIZ_CMS_FORM or
        PKI_ASN1_FORM
    :param message_digest: Digest algorithm to use when signing or resigning

    """
    _ensure_subprocess()
    if isinstance(data_to_sign, six.string_types):
        data = bytearray(data_to_sign, encoding='utf-8')
    else:
        data = data_to_sign
    process = subprocess.Popen(['openssl', 'cms', '-sign',
                                '-signer', signing_cert_file_name,
                                '-inkey', signing_key_file_name,
                                '-outform', 'PEM',
                                '-nosmimecap', '-nodetach',
                                '-nocerts', '-noattr',
                                '-md', message_digest, ],
                               stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               close_fds=True)

    output, err, retcode = _process_communicate_handle_oserror(
        process, data, (signing_cert_file_name, signing_key_file_name))

    if retcode != OpensslCmsExitStatus.SUCCESS or ('Error' in err):
        if retcode == OpensslCmsExitStatus.CREATE_CMS_READ_MIME_ERROR:
            LOG.error(_LE('Signing error: Unable to load certificate - '
                          'ensure you have configured PKI with '
                          '"keystone-manage pki_setup"'))
        else:
            LOG.error(_LE('Signing error: %s'), err)
        raise subprocess.CalledProcessError(retcode, 'openssl')
    if outform == PKI_ASN1_FORM:
        return output.decode('utf-8')
    else:
        return output
Esempio n. 3
0
def format_url(url, substitutions, silent_keyerror_failures=None):
    """Formats a user-defined URL with the given substitutions.

    :param string url: the URL to be formatted
    :param dict substitutions: the dictionary used for substitution
    :param list silent_keyerror_failures: keys for which we should be silent
        if there is a KeyError exception on substitution attempt
    :returns: a formatted URL

    """

    WHITELISTED_PROPERTIES = [
        "tenant_id",
        "user_id",
        "public_bind_host",
        "admin_bind_host",
        "compute_host",
        "admin_port",
        "public_port",
        "public_endpoint",
        "admin_endpoint",
    ]

    substitutions = utils.WhiteListedItemFilter(WHITELISTED_PROPERTIES, substitutions)
    allow_keyerror = silent_keyerror_failures or []
    try:
        result = url.replace("$(", "%(") % substitutions
    except AttributeError:
        LOG.error(_LE("Malformed endpoint - %(url)r is not a string"), {"url": url})
        raise exception.MalformedEndpoint(endpoint=url)
    except KeyError as e:
        if not e.args or e.args[0] not in allow_keyerror:
            LOG.error(_LE("Malformed endpoint %(url)s - unknown key " "%(keyerror)s"), {"url": url, "keyerror": e})
            raise exception.MalformedEndpoint(endpoint=url)
        else:
            result = None
    except TypeError as e:
        LOG.error(
            _LE(
                "Malformed endpoint '%(url)s'. The following type error "
                "occurred during string substitution: %(typeerror)s"
            ),
            {"url": url, "typeerror": e},
        )
        raise exception.MalformedEndpoint(endpoint=url)
    except ValueError as e:
        LOG.error(_LE("Malformed endpoint %s - incomplete format " "(are you missing a type notifier ?)"), url)
        raise exception.MalformedEndpoint(endpoint=url)
    return result
Esempio n. 4
0
        def _look_for_policy_for_region_and_service(endpoint):
            """Look in the region and its parents for a policy.

            Examine the region of the endpoint for a policy appropriate for
            the service of the endpoint. If there isn't a match, then chase up
            the region tree to find one.

            """
            region_id = endpoint['region_id']
            regions_examined = []
            while region_id is not None:
                try:
                    ref = self.driver.get_policy_association(
                        service_id=endpoint['service_id'], region_id=region_id)
                    return ref['policy_id']
                except exception.PolicyAssociationNotFound:  # nosec
                    # There wasn't one for that region & service, handle below.
                    pass

                # There wasn't one for that region & service, let's
                # chase up the region tree
                regions_examined.append(region_id)
                region = self.catalog_api.get_region(region_id)
                region_id = None
                if region.get('parent_region_id') is not None:
                    region_id = region['parent_region_id']
                    if region_id in regions_examined:
                        msg = _LE('Circular reference or a repeated entry '
                                  'found in region tree - %(region_id)s.')
                        LOG.error(msg, {'region_id': region_id})
                        break
Esempio n. 5
0
    def clean_up_existing_files(self):
        files_to_clean = [
            self.ssl_dictionary['ca_private_key'],
            self.ssl_dictionary['ca_cert'],
            self.ssl_dictionary['signing_key'],
            self.ssl_dictionary['signing_cert'],
        ]

        existing_files = []

        for file_path in files_to_clean:
            if file_exists(file_path):
                if self.rebuild:
                    # The file exists but the user wants to rebuild it, so blow
                    # it away
                    try:
                        os.remove(file_path)
                    except OSError as exc:
                        LOG.error(
                            _LE('Failed to remove file %(file_path)r: '
                                '%(error)s'), {
                                    'file_path': file_path,
                                    'error': exc.strerror
                                })
                        raise
                else:
                    existing_files.append(file_path)

        return existing_files
Esempio n. 6
0
def _send_notification(operation, resource_type, resource_id, public=True):
    """Send notification to inform observers about the affected resource.

    This method doesn't raise an exception when sending the notification fails.

    :param operation: operation being performed (created, updated, or deleted)
    :param resource_type: type of resource being operated on
    :param resource_id: ID of resource being operated on
    :param public:  if True (default), the event will be sent
                    to the notifier API.
                    if False, the event will only be sent via
                    notify_event_callbacks to in process listeners.
    """
    payload = {'resource_info': resource_id}

    notify_event_callbacks(SERVICE, resource_type, operation, payload)

    if public:
        notifier = _get_notifier()
        if notifier:
            context = {}
            event_type = '%(service)s.%(resource_type)s.%(operation)s' % {
                'service': SERVICE,
                'resource_type': resource_type,
                'operation': operation}
            try:
                notifier.info(context, event_type, payload)
            except Exception:
                LOG.exception(_LE(
                    'Failed to send %(res_id)s %(event_type)s notification'),
                    {'res_id': resource_id, 'event_type': event_type})
Esempio n. 7
0
    def _is_valid_token(self, token):
        """Verify the token is valid format and has not expired."""
        current_time = timeutils.normalize_time(timeutils.utcnow())

        try:
            # Get the data we need from the correct location (V2 and V3 tokens
            # differ in structure, Try V3 first, fall back to V2 second)
            token_data = token.get('token', token.get('access'))
            expires_at = token_data.get('expires_at',
                                        token_data.get('expires'))
            if not expires_at:
                expires_at = token_data['token']['expires']
            expiry = timeutils.normalize_time(
                timeutils.parse_isotime(expires_at))
        except Exception:
            LOG.exception(_LE('Unexpected error or malformed token '
                              'determining token expiry: %s'), token)
            raise exception.TokenNotFound(_('Failed to validate token'))

        if current_time < expiry:
            self.check_revocation(token)
            # Token has not expired and has not been revoked.
            return None
        else:
            raise exception.TokenNotFound(_('Failed to validate token'))
Esempio n. 8
0
    def clean_up_existing_files(self):
        files_to_clean = [self.ssl_dictionary['ca_private_key'],
                          self.ssl_dictionary['ca_cert'],
                          self.ssl_dictionary['signing_key'],
                          self.ssl_dictionary['signing_cert'],
                          ]

        existing_files = []

        for file_path in files_to_clean:
            if file_exists(file_path):
                if self.rebuild:
                    # The file exists but the user wants to rebuild it, so blow
                    # it away
                    try:
                        os.remove(file_path)
                    except OSError as exc:
                        LOG.error(_LE('Failed to remove file %(file_path)r: '
                                      '%(error)s'),
                                  {'file_path': file_path,
                                   'error': exc.strerror})
                        raise
                else:
                    existing_files.append(file_path)

        return existing_files
Esempio n. 9
0
    def listen(self, key=None, backlog=128):
        """Create and start listening on socket.

        Call before forking worker processes.

        Raises Exception if this has already been called.
        """

        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix.
        # Please refer below link
        # (https://bitbucket.org/eventlet/eventlet/
        # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/
        # greendns.py?at=0.12#cl-163)
        info = socket.getaddrinfo(self.host,
                                  self.port,
                                  socket.AF_UNSPEC,
                                  socket.SOCK_STREAM)[0]

        try:
            self.socket = eventlet.listen(info[-1], family=info[0],
                                          backlog=backlog)
        except EnvironmentError:
            LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
                      {'host': self.host, 'port': self.port})
            raise

        LOG.info(_LI('Starting %(arg0)s on %(host)s:%(port)s'),
                 {'arg0': sys.argv[0],
                  'host': self.host,
                  'port': self.port})
Esempio n. 10
0
    def validate_v2_token(self, token_ref):
        try:
            self._assert_is_not_federation_token(token_ref)
            self._assert_default_domain(token_ref)
            # FIXME(gyee): performance or correctness? Should we return the
            # cached token or reconstruct it? Obviously if we are going with
            # the cached token, any role, project, or domain name changes
            # will not be reflected. One may argue that with PKI tokens,
            # we are essentially doing cached token validation anyway.
            # Lets go with the cached token strategy. Since token
            # management layer is now pluggable, one can always provide
            # their own implementation to suit their needs.
            token_data = token_ref.get('token_data')
            if (self.get_token_version(token_data) != token.provider.V2):
                # Validate the V3 token as V2
                token_data = self.v2_token_data_helper.v3_to_v2_token(
                    token_data)

            trust_id = token_data['access'].get('trust', {}).get('id')
            if trust_id:
                msg = ('Unable to validate trust-scoped tokens using version '
                       'v2.0 API.')
                raise exception.Unauthorized(msg)

            return token_data
        except exception.ValidationError:
            LOG.exception(_LE('Failed to validate token'))
            token_id = token_ref['token_data']['access']['token']['id']
            raise exception.TokenNotFound(token_id=token_id)
Esempio n. 11
0
    def _add_to_revocation_list(self, data, lock):
        filtered_list = []
        revoked_token_data = {}

        current_time = self._get_current_time()
        expires = data['expires']

        if isinstance(expires, six.string_types):
            expires = timeutils.parse_isotime(expires)

        expires = timeutils.normalize_time(expires)

        if expires < current_time:
            LOG.warning(
                _LW('Token `%s` is expired, not adding to the '
                    'revocation list.'), data['id'])
            return

        revoked_token_data['expires'] = timeutils.isotime(expires,
                                                          subsecond=True)
        revoked_token_data['id'] = data['id']

        token_list = self._get_key_or_default(self.revocation_key, default=[])
        if not isinstance(token_list, list):
            # NOTE(morganfainberg): In the case that the revocation list is not
            # in a format we understand, reinitialize it. This is an attempt to
            # not allow the revocation list to be completely broken if
            # somehow the key is changed outside of keystone (e.g. memcache
            # that is shared by multiple applications). Logging occurs at error
            # level so that the cloud administrators have some awareness that
            # the revocation_list needed to be cleared out. In all, this should
            # be recoverable. Keystone cannot control external applications
            # from changing a key in some backends, however, it is possible to
            # gracefully handle and notify of this event.
            LOG.error(
                _LE('Reinitializing revocation list due to error '
                    'in loading revocation list from backend.  '
                    'Expected `list` type got `%(type)s`. Old '
                    'revocation list data: %(list)r'), {
                        'type': type(token_list),
                        'list': token_list
                    })
            token_list = []

        # NOTE(morganfainberg): on revocation, cleanup the expired entries, try
        # to keep the list of tokens revoked at the minimum.
        for token_data in token_list:
            try:
                expires_at = timeutils.normalize_time(
                    timeutils.parse_isotime(token_data['expires']))
            except ValueError:
                LOG.warning(
                    _LW('Removing `%s` from revocation list due to '
                        'invalid expires data in revocation list.'),
                    token_data.get('id', 'INVALID_TOKEN_DATA'))
                continue
            if expires_at > current_time:
                filtered_list.append(token_data)
        filtered_list.append(revoked_token_data)
        self._set_key(self.revocation_key, filtered_list, lock)
Esempio n. 12
0
def _send_audit_notification(action, initiator, outcome, target, event_type,
                             **kwargs):
    """Send CADF notification to inform observers about the affected resource.

    This method logs an exception when sending the notification fails.

    :param action: CADF action being audited (e.g., 'authenticate')
    :param initiator: CADF resource representing the initiator
    :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING,
        taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE)
    :param target: CADF resource representing the target
    :param event_type: An OpenStack-ism, typically this is the meter name that
        Ceilometer uses to poll events.
    :param kwargs: Any additional arguments passed in will be added as
        key-value pairs to the CADF event.

    """
    if _check_notification_opt_out(event_type, outcome):
        return

    global _CATALOG_HELPER_OBJ
    if _CATALOG_HELPER_OBJ is None:
        _CATALOG_HELPER_OBJ = _CatalogHelperObj()
    service_list = _CATALOG_HELPER_OBJ.catalog_api.list_services()
    service_id = None

    for i in service_list:
        if i['type'] == SERVICE:
            service_id = i['id']
            break

    event = eventfactory.EventFactory().new_event(
        eventType=cadftype.EVENTTYPE_ACTIVITY,
        outcome=outcome,
        action=action,
        initiator=initiator,
        target=target,
        observer=resource.Resource(typeURI=taxonomy.SERVICE_SECURITY))

    if service_id is not None:
        event.observer.id = service_id

    for key, value in kwargs.items():
        setattr(event, key, value)

    context = {}
    payload = event.as_dict()
    notifier = _get_notifier()

    if notifier:
        try:
            notifier.info(context, event_type, payload)
        except Exception:
            # diaper defense: any exception that occurs while emitting the
            # notification should not interfere with the API request
            LOG.exception(
                _LE('Failed to send %(action)s %(event_type)s notification'), {
                    'action': action,
                    'event_type': event_type
                })
Esempio n. 13
0
    def list_project_parents(self, project_id):
        """List a project's ancestors.

        The current manager expects the ancestor tree to end with the project
        acting as the domain (since that's now the top of the tree), but a
        legacy driver will not have that top project in their projects table,
        since it's still in the domain table. Hence we lift the algorithm for
        traversing up the tree from the driver to here, so that our version of
        get_project() is called, which will fetch the "project" from the right
        table.

        """
        project = self.get_project(project_id)
        parents = []
        examined = set()
        while project.get('parent_id') is not None:
            if project['id'] in examined:
                msg = _LE('Circular reference or a repeated '
                          'entry found in projects hierarchy - '
                          '%(project_id)s.')
                LOG.error(msg, {'project_id': project['id']})
                return

            examined.add(project['id'])
            parent_project = self.get_project(project['parent_id'])
            parents.append(parent_project)
            project = parent_project
        return parents
Esempio n. 14
0
 def exec_command(self, command):
     to_exec = []
     for cmd_part in command:
         to_exec.append(cmd_part % self.ssl_dictionary)
     LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
     # NOTE(Jeffrey4l): Redirect both stdout and stderr to pipe, so the
     # output can be captured.
     # NOTE(Jeffrey4l): check_output is not compatible with Python 2.6.
     # So use Popen instead.
     process = environment.subprocess.Popen(
         to_exec,
         stdout=environment.subprocess.PIPE,
         stderr=environment.subprocess.STDOUT)
     output = process.communicate()[0]
     retcode = process.poll()
     if retcode:
         LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s'
                       '- %(output)s'),
                   {'to_exec': to_exec,
                    'retcode': retcode,
                    'output': output})
         e = environment.subprocess.CalledProcessError(retcode, to_exec[0])
         # NOTE(Jeffrey4l): Python 2.6 compatibility:
         # CalledProcessError did not have output keyword argument
         e.output = output
         raise e
Esempio n. 15
0
    def validate_key_repository(self, requires_write=False):
        """Validate permissions on the key repository directory."""
        # NOTE(lbragstad): We shouldn't need to check if the directory was
        # passed in as None because we don't set allow_no_values to True.

        # ensure current user has sufficient access to the key repository
        is_valid = os.access(self.key_repository, os.R_OK) and os.access(self.key_repository, os.X_OK)
        if requires_write:
            is_valid = is_valid and os.access(self.key_repository, os.W_OK)

        if not is_valid:
            LOG.error(
                _LE(
                    "Either [fernet_tokens] key_repository does not exist or "
                    "Keystone does not have sufficient permission to access "
                    "it: %s"
                ),
                self.key_repository,
            )
        else:
            # ensure the key repository isn't world-readable
            stat_info = os.stat(self.key_repository)
            if stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH:
                LOG.warning(_LW("key_repository is world readable: %s"), self.key_repository)

        return is_valid
Esempio n. 16
0
    def validate_v2_token(self, token_ref):
        try:
            self._assert_is_not_federation_token(token_ref)
            self._assert_default_domain(token_ref)
            # FIXME(gyee): performance or correctness? Should we return the
            # cached token or reconstruct it? Obviously if we are going with
            # the cached token, any role, project, or domain name changes
            # will not be reflected. One may argue that with PKI tokens,
            # we are essentially doing cached token validation anyway.
            # Lets go with the cached token strategy. Since token
            # management layer is now pluggable, one can always provide
            # their own implementation to suit their needs.
            token_data = token_ref.get('token_data')
            if (self.get_token_version(token_data) != token.provider.V2):
                # Validate the V3 token as V2
                token_data = self.v2_token_data_helper.v3_to_v2_token(
                    token_data)

            trust_id = token_data['access'].get('trust', {}).get('id')
            if trust_id:
                # token trust validation
                self.trust_api.get_trust(trust_id)

            return token_data
        except exception.ValidationError as e:
            LOG.exception(_LE('Failed to validate token'))
            raise exception.TokenNotFound(e)
Esempio n. 17
0
    def create_key_directory(self, keystone_user_id=None, keystone_group_id=None):
        """Attempt to create the key directory if it doesn't exist."""
        if not os.access(self.key_repository, os.F_OK):
            LOG.info(_LI("key_repository does not appear to exist; attempting to " "create it"))

            try:
                os.makedirs(self.key_repository, 0o700)
            except OSError:
                LOG.error(
                    _LE(
                        "Failed to create key_repository: either it already "
                        "exists or you don't have sufficient permissions to "
                        "create it"
                    )
                )

            if keystone_user_id and keystone_group_id:
                os.chown(self.key_repository, keystone_user_id, keystone_group_id)
            elif keystone_user_id or keystone_group_id:
                LOG.warning(
                    _LW(
                        "Unable to change the ownership of key_repository without "
                        "a keystone user ID and keystone group ID both being "
                        "provided: %s"
                    )
                    % self.key_repository
                )
Esempio n. 18
0
    def validate_key_repository(self, requires_write=False):
        """Validate permissions on the key repository directory."""
        # NOTE(lbragstad): We shouldn't need to check if the directory was
        # passed in as None because we don't set allow_no_values to True.

        # ensure current user has sufficient access to the key repository
        is_valid = (os.access(self.key_repository, os.R_OK)
                    and os.access(self.key_repository, os.X_OK))
        if requires_write:
            is_valid = (is_valid and os.access(self.key_repository, os.W_OK))

        if not is_valid:
            LOG.error(
                _LE('Either [fernet_tokens] key_repository does not exist or '
                    'Keystone does not have sufficient permission to access '
                    'it: %s'), self.key_repository)
        else:
            # ensure the key repository isn't world-readable
            stat_info = os.stat(self.key_repository)
            if (stat_info.st_mode & stat.S_IROTH
                    or stat_info.st_mode & stat.S_IXOTH):
                LOG.warning(_LW('key_repository is world readable: %s'),
                            self.key_repository)

        return is_valid
Esempio n. 19
0
    def create_key_directory(self,
                             keystone_user_id=None,
                             keystone_group_id=None):
        """Attempt to create the key directory if it doesn't exist."""
        if not os.access(self.key_repository, os.F_OK):
            LOG.info(
                _LI('key_repository does not appear to exist; attempting to '
                    'create it'))

            try:
                os.makedirs(self.key_repository, 0o700)
            except OSError:
                LOG.error(
                    _LE('Failed to create key_repository: either it already '
                        'exists or you don\'t have sufficient permissions to '
                        'create it'))

            if keystone_user_id and keystone_group_id:
                os.chown(self.key_repository, keystone_user_id,
                         keystone_group_id)
            elif keystone_user_id or keystone_group_id:
                LOG.warning(
                    _LW('Unable to change the ownership of key_repository without '
                        'a keystone user ID and keystone group ID both being '
                        'provided: %s') % self.key_repository)
Esempio n. 20
0
    def _is_valid_token(self, token):
        """Verify the token is valid format and has not expired."""
        current_time = timeutils.normalize_time(timeutils.utcnow())

        try:
            # Get the data we need from the correct location (V2 and V3 tokens
            # differ in structure, Try V3 first, fall back to V2 second)
            token_data = token.get('token', token.get('access'))
            expires_at = token_data.get('expires_at',
                                        token_data.get('expires'))
            if not expires_at:
                expires_at = token_data['token']['expires']
            expiry = timeutils.normalize_time(
                timeutils.parse_isotime(expires_at))
        except Exception:
            LOG.exception(_LE('Unexpected error or malformed token '
                              'determining token expiry: %s'), token)
            raise exception.TokenNotFound(_('Failed to validate token'))

        if current_time < expiry:
            self.check_revocation(token)
            # Token has not expired and has not been revoked.
            return None
        else:
            raise exception.TokenNotFound(_('Failed to validate token'))
Esempio n. 21
0
    def list_project_parents(self, project_id):
        """List a project's ancestors.

        The current manager expects the ancestor tree to end with the project
        acting as the domain (since that's now the top of the tree), but a
        legacy driver will not have that top project in their projects table,
        since it's still in the domain table. Hence we lift the algorithm for
        traversing up the tree from the driver to here, so that our version of
        get_project() is called, which will fetch the "project" from the right
        table.

        """
        project = self.get_project(project_id)
        parents = []
        examined = set()
        while project.get('parent_id') is not None:
            if project['id'] in examined:
                msg = _LE('Circular reference or a repeated '
                          'entry found in projects hierarchy - '
                          '%(project_id)s.')
                LOG.error(msg, {'project_id': project['id']})
                return

            examined.add(project['id'])
            parent_project = self.get_project(project['parent_id'])
            parents.append(parent_project)
            project = parent_project
        return parents
Esempio n. 22
0
def _send_notification(operation, resource_type, resource_id, public=True):
    """Send notification to inform observers about the affected resource.

    This method doesn't raise an exception when sending the notification fails.

    :param operation: operation being performed (created, updated, or deleted)
    :param resource_type: type of resource being operated on
    :param resource_id: ID of resource being operated on
    :param public:  if True (default), the event will be sent
                    to the notifier API.
                    if False, the event will only be sent via
                    notify_event_callbacks to in process listeners.
    """
    payload = {'resource_info': resource_id}

    notify_event_callbacks(SERVICE, resource_type, operation, payload)

    # Only send this notification if the 'basic' format is used, otherwise
    # let the CADF functions handle sending the notification. But we check
    # here so as to not disrupt the notify_event_callbacks function.
    if public and CONF.notification_format == 'basic':
        notifier = _get_notifier()
        if notifier:
            context = {}
            event_type = '%(service)s.%(resource_type)s.%(operation)s' % {
                'service': SERVICE,
                'resource_type': resource_type,
                'operation': operation}
            try:
                notifier.info(context, event_type, payload)
            except Exception:
                LOG.exception(_LE(
                    'Failed to send %(res_id)s %(event_type)s notification'),
                    {'res_id': resource_id, 'event_type': event_type})
Esempio n. 23
0
def build_cache_config():
    """Build the cache region dictionary configuration.

    :returns: dict
    """
    prefix = CONF.cache.config_prefix
    conf_dict = {}
    conf_dict['%s.backend' % prefix] = CONF.cache.backend
    conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
    for argument in CONF.cache.backend_argument:
        try:
            (argname, argvalue) = argument.split(':', 1)
        except ValueError:
            msg = _LE('Unable to build cache config-key. Expected format '
                      '"<argname>:<value>". Skipping unknown format: %s')
            LOG.error(msg, argument)
            continue

        arg_key = '.'.join([prefix, 'arguments', argname])
        conf_dict[arg_key] = argvalue

        LOG.debug('Keystone Cache Config: %s', conf_dict)
    # NOTE(yorik-sar): these arguments will be used for memcache-related
    # backends. Use setdefault for url to support old-style setting through
    # backend_argument=url:127.0.0.1:11211
    conf_dict.setdefault('%s.arguments.url' % prefix,
                         CONF.cache.memcache_servers)
    for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
                'pool_unused_timeout', 'pool_connection_get_timeout'):
        value = getattr(CONF.cache, 'memcache_' + arg)
        conf_dict['%s.arguments.%s' % (prefix, arg)] = value

    return conf_dict
Esempio n. 24
0
def _send_notification(operation, resource_type, resource_id, public=True):
    """Send notification to inform observers about the affected resource.

    This method doesn't raise an exception when sending the notification fails.

    :param operation: operation being performed (created, updated, or deleted)
    :param resource_type: type of resource being operated on
    :param resource_id: ID of resource being operated on
    :param public:  if True (default), the event will be sent
                    to the notifier API.
                    if False, the event will only be sent via
                    notify_event_callbacks to in process listeners.
    """
    payload = {'resource_info': resource_id}
    service = 'identity'

    notify_event_callbacks(service, resource_type, operation, payload)

    if public:
        notifier = _get_notifier()
        if notifier:
            context = {}
            event_type = '%(service)s.%(resource_type)s.%(operation)s' % {
                'service': service,
                'resource_type': resource_type,
                'operation': operation}
            try:
                notifier.info(context, event_type, payload)
            except Exception:
                LOG.exception(_LE(
                    'Failed to send %(res_id)s %(event_type)s notification'),
                    {'res_id': resource_id, 'event_type': event_type})
Esempio n. 25
0
    def listen(self, key=None, backlog=128):
        """Create and start listening on socket.

        Call before forking worker processes.

        Raises Exception if this has already been called.
        """

        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix.
        # Please refer below link
        # (https://bitbucket.org/eventlet/eventlet/
        # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/
        # greendns.py?at=0.12#cl-163)
        info = socket.getaddrinfo(self.host,
                                  self.port,
                                  socket.AF_UNSPEC,
                                  socket.SOCK_STREAM)[0]

        try:
            self.socket = eventlet.listen(info[-1], family=info[0],
                                          backlog=backlog)
        except EnvironmentError:
            LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
                      {'host': self.host, 'port': self.port})
            raise

        LOG.info(_LI('Starting %(arg0)s on %(host)s:%(port)s'),
                 {'arg0': sys.argv[0],
                  'host': self.host,
                  'port': self.port})
Esempio n. 26
0
 def exec_command(self, command):
     to_exec = []
     for cmd_part in command:
         to_exec.append(cmd_part % self.ssl_dictionary)
     LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
     # NOTE(Jeffrey4l): Redirect both stdout and stderr to pipe, so the
     # output can be captured.
     # NOTE(Jeffrey4l): check_output is not compatible with Python 2.6.
     # So use Popen instead.
     process = environment.subprocess.Popen(
         to_exec,
         stdout=environment.subprocess.PIPE,
         stderr=environment.subprocess.STDOUT)
     output = process.communicate()[0]
     retcode = process.poll()
     if retcode:
         LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s'
                       '- %(output)s'),
                   {'to_exec': to_exec,
                    'retcode': retcode,
                    'output': output})
         e = environment.subprocess.CalledProcessError(retcode, to_exec[0])
         # NOTE(Jeffrey4l): Python 2.6 compatibility:
         # CalledProcessError did not have output keyword argument
         e.output = output
         raise e
Esempio n. 27
0
def create_key_directory(keystone_user_id=None, keystone_group_id=None):
    """If the configured key directory does not exist, attempt to create it."""
    if not os.access(CONF.fernet_tokens.key_repository, os.F_OK):
        LOG.info(_LI("[fernet_tokens] key_repository does not appear to exist; " "attempting to create it"))

        try:
            os.makedirs(CONF.fernet_tokens.key_repository, 0o700)
        except OSError:
            LOG.error(
                _LE(
                    "Failed to create [fernet_tokens] key_repository: either it "
                    "already exists or you don't have sufficient permissions to "
                    "create it"
                )
            )

        if keystone_user_id and keystone_group_id:
            os.chown(CONF.fernet_tokens.key_repository, keystone_user_id, keystone_group_id)
        elif keystone_user_id or keystone_group_id:
            LOG.warning(
                _LW(
                    "Unable to change the ownership of [fernet_tokens] "
                    "key_repository without a keystone user ID and keystone group "
                    "ID both being provided: %s"
                )
                % CONF.fernet_tokens.key_repository
            )
Esempio n. 28
0
def build_cache_config():
    """Build the cache region dictionary configuration.

    :returns: dict
    """
    prefix = CONF.cache.config_prefix
    conf_dict = {}
    conf_dict['%s.backend' % prefix] = CONF.cache.backend
    conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
    for argument in CONF.cache.backend_argument:
        try:
            (argname, argvalue) = argument.split(':', 1)
        except ValueError:
            msg = _LE('Unable to build cache config-key. Expected format '
                      '"<argname>:<value>". Skipping unknown format: %s')
            LOG.error(msg, argument)
            continue

        arg_key = '.'.join([prefix, 'arguments', argname])
        conf_dict[arg_key] = argvalue

        LOG.debug('Keystone Cache Config: %s', conf_dict)
    # NOTE(yorik-sar): these arguments will be used for memcache-related
    # backends. Use setdefault for url to support old-style setting through
    # backend_argument=url:127.0.0.1:11211
    conf_dict.setdefault('%s.arguments.url' % prefix,
                         CONF.cache.memcache_servers)
    for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
                'pool_unused_timeout', 'pool_connection_get_timeout'):
        value = getattr(CONF.cache, 'memcache_' + arg)
        conf_dict['%s.arguments.%s' % (prefix, arg)] = value

    return conf_dict
Esempio n. 29
0
        def _look_for_policy_for_region_and_service(endpoint):
            """Look in the region and its parents for a policy.

            Examine the region of the endpoint for a policy appropriate for
            the service of the endpoint. If there isn't a match, then chase up
            the region tree to find one.

            """
            region_id = endpoint['region_id']
            regions_examined = []
            while region_id is not None:
                try:
                    ref = self.driver.get_policy_association(
                        service_id=endpoint['service_id'],
                        region_id=region_id)
                    return ref['policy_id']
                except exception.PolicyAssociationNotFound:  # nosec
                    # There wasn't one for that region & service, handle below.
                    pass

                # There wasn't one for that region & service, let's
                # chase up the region tree
                regions_examined.append(region_id)
                region = self.catalog_api.get_region(region_id)
                region_id = None
                if region.get('parent_region_id') is not None:
                    region_id = region['parent_region_id']
                    if region_id in regions_examined:
                        msg = _LE('Circular reference or a repeated entry '
                                  'found in region tree - %(region_id)s.')
                        LOG.error(msg, {'region_id': region_id})
                        break
Esempio n. 30
0
def validate_key_repository():
    """Validate permissions on the key repository directory."""
    # NOTE(lbragstad): We shouldn't need to check if the directory was passed
    # in as None because we don't set allow_no_values to True.

    # ensure current user has full access to the key repository
    if (
        not os.access(CONF.fernet_tokens.key_repository, os.R_OK)
        or not os.access(CONF.fernet_tokens.key_repository, os.W_OK)
        or not os.access(CONF.fernet_tokens.key_repository, os.X_OK)
    ):
        LOG.error(
            _LE(
                "Either [fernet_tokens] key_repository does not exist or "
                "Keystone does not have sufficient permission to access it: "
                "%s"
            ),
            CONF.fernet_tokens.key_repository,
        )
        return False

    # ensure the key repository isn't world-readable
    stat_info = os.stat(CONF.fernet_tokens.key_repository)
    if stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH:
        LOG.warning(_LW("[fernet_tokens] key_repository is world readable: %s"), CONF.fernet_tokens.key_repository)

    return True
Esempio n. 31
0
File: idp.py Progetto: ging/keystone
def _sign_assertion(assertion):
    """Sign a SAML assertion.

    This method utilizes ``xmlsec1`` binary and signs SAML assertions in a
    separate process. ``xmlsec1`` cannot read input data from stdin so the
    prepared assertion needs to be serialized and stored in a temporary
    file. This file will be deleted immediately after ``xmlsec1`` returns.
    The signed assertion is redirected to a standard output and read using
    subprocess.PIPE redirection. A ``saml.Assertion`` class is created
    from the signed string again and returned.

    Parameters that are required in the CONF::
    * xmlsec_binary
    * private key file path
    * public key file path
    :return: XML <Assertion> object

    """
    xmlsec_binary = CONF.saml.xmlsec1_binary
    idp_private_key = CONF.saml.keyfile
    idp_public_key = CONF.saml.certfile

    # xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID <tag> <file>
    certificates = '%(idp_private_key)s,%(idp_public_key)s' % {
        'idp_public_key': idp_public_key,
        'idp_private_key': idp_private_key
    }

    command_list = [xmlsec_binary, '--sign', '--privkey-pem', certificates,
                    '--id-attr:ID', 'Assertion']

    try:
        # NOTE(gyee): need to make the namespace prefixes explicit so
        # they won't get reassigned when we wrap the assertion into
        # SAML2 response
        file_path = fileutils.write_to_tempfile(assertion.to_string(
            nspair={'saml': saml2.NAMESPACE,
                    'xmldsig': xmldsig.NAMESPACE}))
        command_list.append(file_path)
        process = subprocess.Popen(command_list,
                                   stdin=subprocess.PIPE,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   close_fds=True)
        stdout, stderr = process.communicate()
        retcode = process.poll()
        if retcode:
            msg = _LE('Error when signing assertion, reason: %(reason)s')
            msg = msg % {'reason': stderr}
            LOG.error(msg)
            raise exception.SAMLSigningError(reason=stderr)
    finally:
        try:
            os.remove(file_path)
        except OSError:
            pass

    return saml2.create_class_from_xml_string(saml.Assertion, stdout)
Esempio n. 32
0
def _send_audit_notification(action, initiator, outcome, target,
                             event_type, **kwargs):
    """Send CADF notification to inform observers about the affected resource.

    This method logs an exception when sending the notification fails.

    :param action: CADF action being audited (e.g., 'authenticate')
    :param initiator: CADF resource representing the initiator
    :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING,
        taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE)
    :param target: CADF resource representing the target
    :param event_type: An OpenStack-ism, typically this is the meter name that
        Ceilometer uses to poll events.
    :param kwargs: Any additional arguments passed in will be added as
        key-value pairs to the CADF event.

    """
    if _check_notification_opt_out(event_type, outcome):
        return

    global _CATALOG_HELPER_OBJ
    if _CATALOG_HELPER_OBJ is None:
        _CATALOG_HELPER_OBJ = _CatalogHelperObj()
    service_list = _CATALOG_HELPER_OBJ.catalog_api.list_services()
    service_id = None

    for i in service_list:
        if i['type'] == SERVICE:
            service_id = i['id']
            break

    event = eventfactory.EventFactory().new_event(
        eventType=cadftype.EVENTTYPE_ACTIVITY,
        outcome=outcome,
        action=action,
        initiator=initiator,
        target=target,
        observer=resource.Resource(typeURI=taxonomy.SERVICE_SECURITY))

    if service_id is not None:
        event.observer.id = service_id

    for key, value in kwargs.items():
        setattr(event, key, value)

    context = {}
    payload = event.as_dict()
    notifier = _get_notifier()

    if notifier:
        try:
            notifier.info(context, event_type, payload)
        except Exception:
            # diaper defense: any exception that occurs while emitting the
            # notification should not interfere with the API request
            LOG.exception(_LE(
                'Failed to send %(action)s %(event_type)s notification'),
                {'action': action, 'event_type': event_type})
Esempio n. 33
0
def format_url(url, substitutions, silent_keyerror_failures=None):
    """Formats a user-defined URL with the given substitutions.

    :param string url: the URL to be formatted
    :param dict substitutions: the dictionary used for substitution
    :param list silent_keyerror_failures: keys for which we should be silent
        if there is a KeyError exception on substitution attempt
    :returns: a formatted URL

    """

    substitutions = utils.WhiteListedItemFilter(WHITELISTED_PROPERTIES,
                                                substitutions)
    allow_keyerror = silent_keyerror_failures or []
    try:
        result = url.replace('$(', '%(') % substitutions
    except AttributeError:
        LOG.error(_LE('Malformed endpoint - %(url)r is not a string'),
                  {"url": url})
        raise exception.MalformedEndpoint(endpoint=url)
    except KeyError as e:
        if not e.args or e.args[0] not in allow_keyerror:
            LOG.error(
                _LE("Malformed endpoint %(url)s - unknown key "
                    "%(keyerror)s"), {
                        "url": url,
                        "keyerror": e
                    })
            raise exception.MalformedEndpoint(endpoint=url)
        else:
            result = None
    except TypeError as e:
        LOG.error(
            _LE("Malformed endpoint '%(url)s'. The following type error "
                "occurred during string substitution: %(typeerror)s"), {
                    "url": url,
                    "typeerror": e
                })
        raise exception.MalformedEndpoint(endpoint=url)
    except ValueError as e:
        LOG.error(
            _LE("Malformed endpoint %s - incomplete format "
                "(are you missing a type notifier ?)"), url)
        raise exception.MalformedEndpoint(endpoint=url)
    return result
Esempio n. 34
0
def _sign_assertion(assertion):
    """Sign a SAML assertion.

    This method utilizes ``xmlsec1`` binary and signs SAML assertions in a
    separate process. ``xmlsec1`` cannot read input data from stdin so the
    prepared assertion needs to be serialized and stored in a temporary
    file. This file will be deleted immediately after ``xmlsec1`` returns.
    The signed assertion is redirected to a standard output and read using
    subprocess.PIPE redirection. A ``saml.Assertion`` class is created
    from the signed string again and returned.

    Parameters that are required in the CONF::
    * xmlsec_binary
    * private key file path
    * public key file path
    :return: XML <Assertion> object

    """
    xmlsec_binary = CONF.saml.xmlsec1_binary
    idp_private_key = CONF.saml.keyfile
    idp_public_key = CONF.saml.certfile

    # xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID <tag> <file>
    certificates = '%(idp_private_key)s,%(idp_public_key)s' % {
        'idp_public_key': idp_public_key,
        'idp_private_key': idp_private_key
    }

    command_list = [
        xmlsec_binary, '--sign', '--privkey-pem', certificates, '--id-attr:ID',
        'Assertion'
    ]

    try:
        # NOTE(gyee): need to make the namespace prefixes explicit so
        # they won't get reassigned when we wrap the assertion into
        # SAML2 response
        file_path = fileutils.write_to_tempfile(
            assertion.to_string(nspair={
                'saml': saml2.NAMESPACE,
                'xmldsig': xmldsig.NAMESPACE
            }))
        command_list.append(file_path)
        stdout = subprocess.check_output(command_list)
    except Exception as e:
        msg = _LE('Error when signing assertion, reason: %(reason)s')
        msg = msg % {'reason': e}
        LOG.error(msg)
        raise exception.SAMLSigningError(reason=e)
    finally:
        try:
            os.remove(file_path)
        except OSError:
            pass

    return saml2.create_class_from_xml_string(saml.Assertion, stdout)
Esempio n. 35
0
    def authenticate(self, request, auth_info, auth_context):
        """Authenticate user."""
        # NOTE(notmorgan): This is not super pythonic, but we lean on the
        # __setitem__ method in auth_context to handle edge cases and security
        # of the attributes set by the plugins. This check to ensure
        # `auth_context` is an instance of AuthContext is extra insurance and
        # will prevent regressions.
        if not isinstance(auth_context, AuthContext):
            LOG.error(
                _LE('`auth_context` passed to the Auth controller '
                    '`authenticate` method is not of type '
                    '`keystone.auth.controllers.AuthContext`. For security '
                    'purposes this is required. This is likely a programming '
                    'error. Received object of type `%s`'), type(auth_context))
            raise exception.Unauthorized(
                _('Cannot Authenticate due to internal error.'))
        # The 'external' method allows any 'REMOTE_USER' based authentication
        # In some cases the server can set REMOTE_USER as '' instead of
        # dropping it, so this must be filtered out
        if request.remote_user:
            try:
                external = get_auth_method('external')
                external.authenticate(request, auth_info, auth_context)
            except exception.AuthMethodNotSupported:
                # This will happen there is no 'external' plugin registered
                # and the container is performing authentication.
                # The 'kerberos'  and 'saml' methods will be used this way.
                # In those cases, it is correct to not register an
                # 'external' plugin;  if there is both an 'external' and a
                # 'kerberos' plugin, it would run the check on identity twice.
                LOG.debug("No 'external' plugin is registered.")
            except exception.Unauthorized:
                # If external fails then continue and attempt to determine
                # user identity using remaining auth methods
                LOG.debug("Authorization failed for 'external' auth method.")

        # need to aggregate the results in case two or more methods
        # are specified
        auth_response = {'methods': []}
        for method_name in auth_info.get_method_names():
            method = get_auth_method(method_name)
            resp = method.authenticate(request,
                                       auth_info.get_method_data(method_name),
                                       auth_context)
            if resp:
                auth_response['methods'].append(method_name)
                auth_response[method_name] = resp

        if auth_response["methods"]:
            # authentication continuation required
            raise exception.AdditionalAuthRequired(auth_response)

        if 'user_id' not in auth_context:
            msg = _('User not found by auth plugin; authentication failed')
            LOG.warning(msg)
            raise exception.Unauthorized(msg)
Esempio n. 36
0
    def _upload_config_to_database(self, file_name, domain_name):
        """Upload a single config file to the database.

        :param file_name: the file containing the config options
        :param domain_name: the domain name
        :returns: a boolean indicating if the upload succeeded

        """
        try:
            domain_ref = (
                self.resource_manager.get_domain_by_name(domain_name))
        except exception.DomainNotFound:
            print(_('Invalid domain name: %(domain)s found in config file '
                    'name: %(file)s - ignoring this file.') % {
                        'domain': domain_name,
                        'file': file_name})
            return False

        if self.domain_config_manager.get_config_with_sensitive_info(
                domain_ref['id']):
            print(_('Domain: %(domain)s already has a configuration '
                    'defined - ignoring file: %(file)s.') % {
                        'domain': domain_name,
                        'file': file_name})
            return False

        sections = {}
        try:
            parser = cfg.ConfigParser(file_name, sections)
            parser.parse()
        except Exception:
            # We explicitly don't try and differentiate the error cases, in
            # order to keep the code in this tool more robust as oslo.config
            # changes.
            print(_('Error parsing configuration file for domain: %(domain)s, '
                    'file: %(file)s.') % {
                        'domain': domain_name,
                        'file': file_name})
            return False

        try:
            for group in sections:
                for option in sections[group]:
                        sections[group][option] = sections[group][option][0]
            self.domain_config_manager.create_config(domain_ref['id'],
                                                     sections)
            return True
        except Exception as e:
            msg = _LE('Error processing config file for domain: '
                      '%(domain_name)s, file: %(filename)s, error: %(error)s')
            LOG.error(msg,
                      {'domain_name': domain_name,
                       'filename': file_name,
                       'error': e},
                      exc_info=True)
            return False
Esempio n. 37
0
    def _add_to_revocation_list(self, data, lock):
        filtered_list = []
        revoked_token_data = {}

        current_time = self._get_current_time()
        expires = data['expires']

        if isinstance(expires, six.string_types):
            expires = timeutils.parse_isotime(expires)

        expires = timeutils.normalize_time(expires)

        if expires < current_time:
            LOG.warning(_LW('Token `%s` is expired, not adding to the '
                            'revocation list.'), data['id'])
            return

        revoked_token_data['expires'] = utils.isotime(expires,
                                                      subsecond=True)
        revoked_token_data['id'] = data['id']

        token_list = self._get_key_or_default(self.revocation_key, default=[])
        if not isinstance(token_list, list):
            # NOTE(morganfainberg): In the case that the revocation list is not
            # in a format we understand, reinitialize it. This is an attempt to
            # not allow the revocation list to be completely broken if
            # somehow the key is changed outside of keystone (e.g. memcache
            # that is shared by multiple applications). Logging occurs at error
            # level so that the cloud administrators have some awareness that
            # the revocation_list needed to be cleared out. In all, this should
            # be recoverable. Keystone cannot control external applications
            # from changing a key in some backends, however, it is possible to
            # gracefully handle and notify of this event.
            LOG.error(_LE('Reinitializing revocation list due to error '
                          'in loading revocation list from backend.  '
                          'Expected `list` type got `%(type)s`. Old '
                          'revocation list data: %(list)r'),
                      {'type': type(token_list), 'list': token_list})
            token_list = []

        # NOTE(morganfainberg): on revocation, cleanup the expired entries, try
        # to keep the list of tokens revoked at the minimum.
        for token_data in token_list:
            try:
                expires_at = timeutils.normalize_time(
                    timeutils.parse_isotime(token_data['expires']))
            except ValueError:
                LOG.warning(_LW('Removing `%s` from revocation list due to '
                                'invalid expires data in revocation list.'),
                            token_data.get('id', 'INVALID_TOKEN_DATA'))
                continue
            if expires_at > current_time:
                filtered_list.append(token_data)
        filtered_list.append(revoked_token_data)
        self._set_key(self.revocation_key, filtered_list, lock)
Esempio n. 38
0
 def _get_token_id(self, token_data):
     try:
         # force conversion to a string as the keystone client cms code
         # produces unicode.  This can be removed if the client returns
         # str()
         # TODO(ayoung): Make to a byte_str for Python3
         token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder)
         token_id = str(cms.cms_sign_token(token_json, CONF.signing.certfile, CONF.signing.keyfile))
         return token_id
     except environment.subprocess.CalledProcessError:
         LOG.exception(_LE("Unable to sign token"))
         raise exception.UnexpectedError(_("Unable to sign token."))
Esempio n. 39
0
 def _run(self, application, socket):
     """Start a WSGI server with a new green thread pool."""
     logger = log.getLogger('eventlet.wsgi.server')
     try:
         eventlet.wsgi.server(socket, application, custom_pool=self.pool,
                              log=EventletFilteringLogger(logger),
                              debug=False)
     except greenlet.GreenletExit:
         # Wait until all servers have completed running
         pass
     except Exception:
         LOG.exception(_LE('Server error'))
         raise
Esempio n. 40
0
def format_url(url, substitutions):
    """Formats a user-defined URL with the given substitutions.

    :param string url: the URL to be formatted
    :param dict substitutions: the dictionary used for substitution
    :returns: a formatted URL

    """

    WHITELISTED_PROPERTIES = [
        'tenant_id', 'user_id', 'public_bind_host', 'admin_bind_host',
        'compute_host', 'compute_port', 'admin_port', 'public_port',
        'public_endpoint', 'admin_endpoint', ]

    substitutions = utils.WhiteListedItemFilter(
        WHITELISTED_PROPERTIES,
        substitutions)
    try:
        result = url.replace('$(', '%(') % substitutions
    except AttributeError:
        LOG.error(_LE('Malformed endpoint - %(url)r is not a string'),
                  {"url": url})
        raise exception.MalformedEndpoint(endpoint=url)
    except KeyError as e:
        LOG.error(_LE("Malformed endpoint %(url)s - unknown key %(keyerror)s"),
                  {"url": url,
                   "keyerror": e})
        raise exception.MalformedEndpoint(endpoint=url)
    except TypeError as e:
        LOG.error(_LE("Malformed endpoint '%(url)s'. The following type error "
                      "occurred during string substitution: %(typeerror)s"),
                  {"url": url,
                   "typeerror": e})
        raise exception.MalformedEndpoint(endpoint=url)
    except ValueError as e:
        LOG.error(_LE("Malformed endpoint %s - incomplete format "
                      "(are you missing a type notifier ?)"), url)
        raise exception.MalformedEndpoint(endpoint=url)
    return result
Esempio n. 41
0
def _send_notification(operation,
                       resource_type,
                       resource_id,
                       actor_dict=None,
                       public=True):
    """Send notification to inform observers about the affected resource.

    This method doesn't raise an exception when sending the notification fails.

    :param operation: operation being performed (created, updated, or deleted)
    :param resource_type: type of resource being operated on
    :param resource_id: ID of resource being operated on
    :param actor_dict: a dictionary containing the actor's ID and type
    :param public:  if True (default), the event will be sent
                    to the notifier API.
                    if False, the event will only be sent via
                    notify_event_callbacks to in process listeners.
    """
    payload = {'resource_info': resource_id}

    if actor_dict:
        payload['actor_id'] = actor_dict['id']
        payload['actor_type'] = actor_dict['type']
        payload['actor_operation'] = actor_dict['actor_operation']

    notify_event_callbacks(SERVICE, resource_type, operation, payload)

    # Only send this notification if the 'basic' format is used, otherwise
    # let the CADF functions handle sending the notification. But we check
    # here so as to not disrupt the notify_event_callbacks function.
    if public and CONF.notification_format == 'basic':
        notifier = _get_notifier()
        if notifier:
            context = {}
            event_type = '%(service)s.%(resource_type)s.%(operation)s' % {
                'service': SERVICE,
                'resource_type': resource_type,
                'operation': operation
            }
            if _check_notification_opt_out(event_type, outcome=None):
                return
            try:
                notifier.info(context, event_type, payload)
            except Exception:
                LOG.exception(
                    _LE('Failed to send %(res_id)s %(event_type)s notification'
                        ), {
                            'res_id': resource_id,
                            'event_type': event_type
                        })
Esempio n. 42
0
def _convert_to_integers(id_value):
    """Cast user and group system identifiers to integers."""
    # NOTE(lbragstad) os.chown() will raise a TypeError here if
    # keystone_user_id and keystone_group_id are not integers. Let's
    # cast them to integers if we can because it's possible to pass non-integer
    # values into the fernet_setup utility.
    try:
        id_int = int(id_value)
    except ValueError as e:
        msg = _LE('Unable to convert Keystone user or group ID. Error: %s')
        LOG.error(msg, e)
        raise

    return id_int
Esempio n. 43
0
 def __init__(self, application):
     super(AdminTokenAuthMiddleware, self).__init__(application)
     # NOTE(notmorgan): This is deprecated and emits a significant error
     # message to make sure deployers update their deployments so in the
     # future release upgrade the deployment does not break.
     LOG.error(
         _LE('The admin_token_auth middleware functionality has been '
             'merged into the main auth middleware '
             '(keystone.middleware.auth.AuthContextMiddleware). '
             '`admin_token_auth` must be removed from the '
             '[pipeline:api_v3], [pipeline:admin_api], and '
             '[pipeline:public_api] sections of your paste ini '
             'file. The [filter:admin_token_auth] block will also '
             'need to be removed from your paste ini file. '))
Esempio n. 44
0
def _convert_to_integers(id_value):
    """Cast user and group system identifiers to integers."""
    # NOTE(lbragstad) os.chown() will raise a TypeError here if
    # keystone_user_id and keystone_group_id are not integers. Let's
    # cast them to integers if we can because it's possible to pass non-integer
    # values into the fernet_setup utility.
    try:
        id_int = int(id_value)
    except ValueError as e:
        msg = _LE("Unable to convert Keystone user or group ID. Error: %s")
        LOG.error(msg, e)
        raise

    return id_int
Esempio n. 45
0
 def _get_token_id(self, token_data):
     try:
         # force conversion to a string as the keystone client cms code
         # produces unicode.  This can be removed if the client returns
         # str()
         # TODO(ayoung): Make to a byte_str for Python3
         token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder)
         token_id = str(
             cms.cms_sign_token(token_json, CONF.signing.certfile,
                                CONF.signing.keyfile))
         return token_id
     except environment.subprocess.CalledProcessError:
         LOG.exception(_LE('Unable to sign token'))
         raise exception.UnexpectedError(_('Unable to sign token.'))
Esempio n. 46
0
def format_url(url, substitutions):
    """Formats a user-defined URL with the given substitutions.

    :param string url: the URL to be formatted
    :param dict substitutions: the dictionary used for substitution
    :returns: a formatted URL

    """
    substitutions = utils.WhiteListedItemFilter(
        CONF.catalog.endpoint_substitution_whitelist, substitutions)
    try:
        result = url.replace('$(', '%(') % substitutions
    except AttributeError:
        LOG.error(_('Malformed endpoint - %(url)r is not a string'),
                  {"url": url})
        raise exception.MalformedEndpoint(endpoint=url)
    except KeyError as e:
        LOG.error(_LE("Malformed endpoint %(url)s - unknown key %(keyerror)s"),
                  {
                      "url": url,
                      "keyerror": e
                  })
        raise exception.MalformedEndpoint(endpoint=url)
    except TypeError as e:
        LOG.error(
            _LE("Malformed endpoint '%(url)s'. The following type error "
                "occurred during string substitution: %(typeerror)s"), {
                    "url": url,
                    "typeerror": e
                })
        raise exception.MalformedEndpoint(endpoint=url)
    except ValueError as e:
        LOG.error(
            _LE("Malformed endpoint %s - incomplete format "
                "(are you missing a type notifier ?)"), url)
        raise exception.MalformedEndpoint(endpoint=url)
    return result
Esempio n. 47
0
 def _run(self, application, socket):
     """Start a WSGI server with a new green thread pool."""
     logger = log.getLogger('eventlet.wsgi.server')
     socket_timeout = CONF.eventlet_server.client_socket_timeout or None
     try:
         eventlet.wsgi.server(
             socket, application, log=EventletFilteringLogger(logger),
             debug=False, keepalive=CONF.eventlet_server.wsgi_keep_alive,
             socket_timeout=socket_timeout)
     except greenlet.GreenletExit:
         # Wait until all servers have completed running
         pass
     except Exception:
         LOG.exception(_LE('Server error'))
         raise
Esempio n. 48
0
    def validate_v2_token(self, token_ref):
        try:
            self._assert_is_not_federation_token(token_ref)
            self._assert_default_domain(token_ref)
            # FIXME(gyee): performance or correctness? Should we return the
            # cached token or reconstruct it? Obviously if we are going with
            # the cached token, any role, project, or domain name changes
            # will not be reflected. One may argue that with PKI tokens,
            # we are essentially doing cached token validation anyway.
            # Lets go with the cached token strategy. Since token
            # management layer is now pluggable, one can always provide
            # their own implementation to suit their needs.
            token_data = token_ref.get('token_data')
            if (not token_data or
                    self.get_token_version(token_data) !=
                    token.provider.V2):
                # token is created by old v2 logic
                metadata_ref = token_ref['metadata']
                roles_ref = []
                for role_id in metadata_ref.get('roles', []):
                    roles_ref.append(self.role_api.get_role(role_id))

                # Get a service catalog if possible
                # This is needed for on-behalf-of requests
                catalog_ref = None
                if token_ref.get('tenant'):
                    catalog_ref = self.catalog_api.get_catalog(
                        token_ref['user']['id'],
                        token_ref['tenant']['id'],
                        metadata_ref)

                trust_ref = None
                if CONF.trust.enabled and 'trust_id' in metadata_ref:
                    trust_ref = self.trust_api.get_trust(
                        metadata_ref['trust_id'])

                token_data = self.v2_token_data_helper.format_token(
                    token_ref, roles_ref, catalog_ref, trust_ref)

            trust_id = token_data['access'].get('trust', {}).get('id')
            if trust_id:
                # token trust validation
                self.trust_api.get_trust(trust_id)

            return token_data
        except exception.ValidationError as e:
            LOG.exception(_LE('Failed to validate token'))
            raise exception.TokenNotFound(e)
Esempio n. 49
0
 def exec_command(self, command):
     to_exec = [part % self.ssl_dictionary for part in command]
     LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
     try:
         # NOTE(shaleh): use check_output instead of the simpler
         # `check_call()` in order to log any output from an error.
         environment.subprocess.check_output(
             to_exec,
             stderr=environment.subprocess.STDOUT)
     except environment.subprocess.CalledProcessError as e:
         LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s '
                       '- %(output)s'),
                   {'to_exec': to_exec,
                    'retcode': e.returncode,
                    'output': e.output})
         raise e
Esempio n. 50
0
def _sign_assertion(assertion):
    """Sign a SAML assertion.

    This method utilizes ``xmlsec1`` binary and signs SAML assertions in a
    separate process. ``xmlsec1`` cannot read input data from stdin so the
    prepared assertion needs to be serialized and stored in a temporary
    file. This file will be deleted immediately after ``xmlsec1`` returns.
    The signed assertion is redirected to a standard output and read using
    subprocess.PIPE redirection. A ``saml.Assertion`` class is created
    from the signed string again and returned.

    Parameters that are required in the CONF::
    * xmlsec_binary
    * private key file path
    * public key file path
    :return: XML <Assertion> object

    """
    xmlsec_binary = CONF.saml.xmlsec1_binary
    idp_private_key = CONF.saml.keyfile
    idp_public_key = CONF.saml.certfile

    # xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID <tag> <file>
    certificates = '%(idp_private_key)s,%(idp_public_key)s' % {
        'idp_public_key': idp_public_key,
        'idp_private_key': idp_private_key
    }

    command_list = [xmlsec_binary, '--sign', '--privkey-pem', certificates,
                    '--id-attr:ID', 'Assertion']

    try:
        file_path = fileutils.write_to_tempfile(assertion.to_string())
        command_list.append(file_path)
        stdout = subprocess.check_output(command_list)
    except Exception as e:
        msg = _LE('Error when signing assertion, reason: %(reason)s')
        msg = msg % {'reason': e}
        LOG.error(msg)
        raise exception.SAMLSigningError(reason=e)
    finally:
        try:
            os.remove(file_path)
        except OSError:
            pass

    return saml2.create_class_from_xml_string(saml.Assertion, stdout)
Esempio n. 51
0
    def validate_v2_token(self, token_ref):
        try:
            self._assert_is_not_federation_token(token_ref)
            self._assert_default_domain(token_ref)
            # FIXME(gyee): performance or correctness? Should we return the
            # cached token or reconstruct it? Obviously if we are going with
            # the cached token, any role, project, or domain name changes
            # will not be reflected. One may argue that with PKI tokens,
            # we are essentially doing cached token validation anyway.
            # Lets go with the cached token strategy. Since token
            # management layer is now pluggable, one can always provide
            # their own implementation to suit their needs.
            token_data = token_ref.get('token_data')
            if (not token_data or
                    self.get_token_version(token_data) !=
                    token.provider.V2):
                # token is created by old v2 logic
                metadata_ref = token_ref['metadata']
                roles_ref = []
                for role_id in metadata_ref.get('roles', []):
                    roles_ref.append(self.role_api.get_role(role_id))

                # Get a service catalog if possible
                # This is needed for on-behalf-of requests
                catalog_ref = None
                if token_ref.get('tenant'):
                    catalog_ref = self.catalog_api.get_catalog(
                        token_ref['user']['id'],
                        token_ref['tenant']['id'])

                trust_ref = None
                if CONF.trust.enabled and 'trust_id' in metadata_ref:
                    trust_ref = self.trust_api.get_trust(
                        metadata_ref['trust_id'])

                token_data = self.v2_token_data_helper.format_token(
                    token_ref, roles_ref, catalog_ref, trust_ref)

            trust_id = token_data['access'].get('trust', {}).get('id')
            if trust_id:
                # token trust validation
                self.trust_api.get_trust(trust_id)

            return token_data
        except exception.ValidationError as e:
            LOG.exception(_LE('Failed to validate token'))
            raise exception.TokenNotFound(e)
Esempio n. 52
0
 def _run(self, application, socket):
     """Start a WSGI server with a new green thread pool."""
     logger = log.getLogger('eventlet.wsgi.server')
     socket_timeout = CONF.eventlet_server.client_socket_timeout or None
     try:
         eventlet.wsgi.server(
             socket,
             application,
             log=EventletFilteringLogger(logger),
             debug=False,
             keepalive=CONF.eventlet_server.wsgi_keep_alive,
             socket_timeout=socket_timeout)
     except greenlet.GreenletExit:
         # Wait until all servers have completed running
         pass
     except Exception:
         LOG.exception(_LE('Server error'))
         raise
Esempio n. 53
0
 def exec_command(self, command):
     to_exec = [part % self.ssl_dictionary for part in command]
     LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
     try:
         # NOTE(shaleh): use check_output instead of the simpler
         # `check_call()` in order to log any output from an error.
         subprocess.check_output(  # nosec : the arguments being passed
             # in are defined in this file and trusted to build CAs, keys
             # and certs
             to_exec,
             stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s '
                       '- %(output)s'),
                   {'to_exec': to_exec,
                    'retcode': e.returncode,
                    'output': e.output})
         raise e
Esempio n. 54
0
    def _create_tmp_new_key(self, keystone_user_id, keystone_group_id):
        """Securely create a new tmp encryption key.

        This created key is not effective until _become_valid_new_key().
        """
        key = fernet.Fernet.generate_key()  # key is bytes

        # This ensures the key created is not world-readable
        old_umask = os.umask(0o177)
        if keystone_user_id and keystone_group_id:
            old_egid = os.getegid()
            old_euid = os.geteuid()
            os.setegid(keystone_group_id)
            os.seteuid(keystone_user_id)
        elif keystone_user_id or keystone_group_id:
            LOG.warning(
                _LW('Unable to change the ownership of the new key without a '
                    'keystone user ID and keystone group ID both being provided: '
                    '%s') % self.key_repository)
        # Determine the file name of the new key
        key_file = os.path.join(self.key_repository, '0.tmp')
        create_success = False
        try:
            with open(key_file, 'w') as f:
                # convert key to str for the file.
                f.write(key.decode('utf-8'))
                f.flush()
                create_success = True
        except IOError:
            LOG.error(_LE('Failed to create new temporary key: %s'), key_file)
            raise
        finally:
            # After writing the key, set the umask back to it's original value.
            # Do the same with group and user identifiers if a Keystone group
            # or user was supplied.
            os.umask(old_umask)
            if keystone_user_id and keystone_group_id:
                os.seteuid(old_euid)
                os.setegid(old_egid)
            # Deal with the tmp key file
            if not create_success and os.access(key_file, os.F_OK):
                os.remove(key_file)

        LOG.info(_LI('Created a new temporary key: %s'), key_file)
Esempio n. 55
0
def _send_audit_notification(action, initiator, outcome, **kwargs):
    """Send CADF notification to inform observers about the affected resource.

    This method logs an exception when sending the notification fails.

    :param action: CADF action being audited (e.g., 'authenticate')
    :param initiator: CADF resource representing the initiator
    :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING,
        taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE)

    """

    event = eventfactory.EventFactory().new_event(
        eventType=cadftype.EVENTTYPE_ACTIVITY,
        outcome=outcome,
        action=action,
        initiator=initiator,
        target=resource.Resource(typeURI=taxonomy.ACCOUNT_USER),
        observer=resource.Resource(typeURI=taxonomy.SERVICE_SECURITY))

    for key, value in kwargs.items():
        setattr(event, key, value)

    context = {}
    payload = event.as_dict()
    service = 'identity'
    event_type = '%(service)s.%(action)s' % {
        'service': service,
        'action': action
    }

    notifier = _get_notifier()

    if notifier:
        try:
            notifier.info(context, event_type, payload)
        except Exception:
            # diaper defense: any exception that occurs while emitting the
            # notification should not interfere with the API request
            LOG.exception(
                _LE('Failed to send %(action)s %(event_type)s notification'), {
                    'action': action,
                    'event_type': event_type
                })