예제 #1
0
    def _get_facet_terms(self, fields, request_context,
                         all_projects, limit_terms,
                         exclude_options=False):
        # Fields can be empty if there are no facet terms desired,
        # but we will run a size=0 search to get the doc count. If the
        # user does not want the options associated with the facet fields,
        # do not aggregate. This is controlled with a parameter to the
        # API call.
        body = {}
        term_aggregations = {}

        if fields:
            mapping = self.get_mapping()['properties']
            # Nested fields will be all the mapped fields with type=='nested'
            # Not tremendously keen on the way this is structured but the
            # nested fields are a bit special case
            nested_fields = [name
                             for name, properties in six.iteritems(mapping)
                             if properties['type'] == 'nested']
            if not exclude_options:
                term_aggregations = utils.get_facets_query(fields,
                                                           nested_fields,
                                                           limit_terms)
            if term_aggregations:
                body['aggs'] = term_aggregations

        role_filter = request_context.user_role_filter
        plugin_filters = [{
            "term": {ROLE_USER_FIELD: role_filter}
        }]
        if not (request_context.is_admin and all_projects):
            plugin_filters.extend(
                self._get_rbac_field_filters(request_context))

        body['query'] = {
            "filtered": {
                "filter": {
                    "and": plugin_filters
                }}}

        results = self.engine.search(
            index=self.alias_name_search,
            doc_type=self.get_document_type(),
            body=body,
            ignore_unavailable=True,
            size=0)

        agg_results = results.get('aggregations', {})
        doc_count = results['hits']['total']

        facet_terms = utils.transform_facets_results(
            agg_results,
            self.get_document_type())

        if term_aggregations and not agg_results:
            LOG.warning(_LW(
                "No aggregations found for %(resource_type)s. There may "
                "be a mapping problem.") %
                {'resource_type': self.get_document_type()})
        return facet_terms, doc_count
예제 #2
0
 def get_docs_by_nested_field(self, path, field, value, version=False):
     """Query ElasticSearch based on a nested field. The caller will
        need to specify the path of the nested field as well as the
        field itself. We will include the 'version' field if commanded
        as such by the caller.
     """
     # Set up query for accessing a nested field.
     nested_field = path + "." + field
     body = {
         "query": {
             "nested": {
                 "path": path,
                 "query": {
                     "term": {
                         nested_field: value
                     }
                 }
             }
         }
     }
     if version:
         body['version'] = True
     try:
         return self.engine.search(index=self.alias_name,
                                   doc_type=self.document_type,
                                   body=body,
                                   ignore_unavailable=True)
     except Exception as exc:
         LOG.warning(
             _LW('Error querying %(p)s %(f)s. Error %(exc)s') % {
                 'p': path,
                 'f': field,
                 'exc': exc
             })
         raise
예제 #3
0
 def _remove_children(self, pid):
     if pid in self.children:
         self.children.remove(pid)
         LOG.info(_LI('Removed dead child %s'), pid)
     elif pid in self.stale_children:
         self.stale_children.remove(pid)
         LOG.info(_LI('Removed stale child %s'), pid)
     else:
         LOG.warning(_LW('Unrecognised child %s') % pid)
예제 #4
0
 def _remove_children(self, pid):
     if pid in self.children:
         self.children.remove(pid)
         LOG.info(_LI('Removed dead child %s') % pid)
     elif pid in self.stale_children:
         self.stale_children.remove(pid)
         LOG.info(_LI('Removed stale child %s') % pid)
     else:
         LOG.warning(_LW('Unrecognised child %s') % pid)
예제 #5
0
def validate_key_cert(key_file, cert_file):
    try:
        error_key_name = "private key"
        error_filename = key_file
        with open(key_file, 'r') as keyfile:
            key_str = keyfile.read()
        key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)

        error_key_name = "certificate"
        error_filename = cert_file
        with open(cert_file, 'r') as certfile:
            cert_str = certfile.read()
        cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
    except IOError as ioe:
        raise RuntimeError(
            _("There is a problem with your %(error_key_name)s "
              "%(error_filename)s.  Please verify it."
              "  Error: %(ioe)s") % {
                  'error_key_name': error_key_name,
                  'error_filename': error_filename,
                  'ioe': ioe
              })
    except crypto.Error as ce:
        raise RuntimeError(
            _("There is a problem with your %(error_key_name)s "
              "%(error_filename)s.  Please verify it. OpenSSL"
              " error: %(ce)s") % {
                  'error_key_name': error_key_name,
                  'error_filename': error_filename,
                  'ce': ce
              })

    try:
        data = uuidutils.generate_uuid()
        digest = CONF.digest_algorithm
        if digest == 'sha1':
            LOG.warning(
                _LW('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)'
                    ' state that the SHA-1 is not suitable for'
                    ' general-purpose digital signature applications (as'
                    ' specified in FIPS 186-3) that require 112 bits of'
                    ' security. The default value is sha1 in Kilo for a'
                    ' smooth upgrade process, and it will be updated'
                    ' with sha256 in next release(L).'))
        out = crypto.sign(key, data, digest)
        crypto.verify(cert, out, data, digest)
    except crypto.Error as ce:
        raise RuntimeError(
            _("There is a problem with your key pair.  "
              "Please verify that cert %(cert_file)s and "
              "key %(key_file)s belong together.  OpenSSL "
              "error %(ce)s") % {
                  'cert_file': cert_file,
                  'key_file': key_file,
                  'ce': ce
              })
예제 #6
0
 def create_or_update(self, payload, timestamp):
     snapshot_id = payload['snapshot_id']
     LOG.debug("Updating cinder snapshot information for %s", snapshot_id)
     try:
         payload = serialize_cinder_snapshot(snapshot_id)
         version = self.get_version(payload, timestamp)
         self.index_helper.save_document(payload, version=version)
     except cinderclient.exceptions.NotFound:
         LOG.warning(_LW("Snapshot %s not found; deleting") % snapshot_id)
         self.delete(payload, timestamp)
 def create_or_update(self, payload, timestamp):
     snapshot_id = payload['snapshot_id']
     LOG.debug("Updating cinder snapshot information for %s", snapshot_id)
     try:
         payload = serialize_cinder_snapshot(snapshot_id)
         version = self.get_version(payload, timestamp)
         self.index_helper.save_document(payload, version=version)
     except cinderclient.exceptions.NotFound:
         LOG.warning(_LW("Snapshot %s not found; deleting") % snapshot_id)
         self.delete(payload, timestamp)
예제 #8
0
    def delete_document_unknown_parent(self, doc_id, version=None):
        """Deletes a document that requires routing but where the routing is
        not known. Since this involves a query to find it, there is a potential
        race condition in between storing and indexing the document. A search
        is run to find the routing parent, and the document is then deleted.
        """
        search_doc_id = doc_id
        if self.plugin.requires_role_separation:
            # Just look for the admin document though it doesn't matter which.
            # This has to match whatever we strip later with strip_role_suffix
            search_doc_id += ADMIN_ID_SUFFIX

        # Necessary to request 'fields' for e-s 1.x, not for 2.x
        query = {'filter': {'term': {'_id': search_doc_id}},
                 'fields': ['_parent', '_routing']}
        search_results = self.engine.search(index=self.alias_name,
                                            doc_type=self.document_type,
                                            body=query)
        total_hits = search_results['hits']['total']
        if not total_hits:
            ctx = {'doc_type': self.document_type, 'id': doc_id}
            LOG.warning(_LW(
                "No results found for %(doc_type)s id %(id)s; can't find "
                "routing to delete") % ctx)
            return

        # There are results. Check that there's only one unique result (may be
        # two in different indices from the same alias). ES 1.x and 2.x differ
        # slightly; metafields are returned by default and not in 'fields' in
        # 2.x whether you like it or not
        distinct_results = set((r['_id'], get_metafield(r, '_parent'))
                               for r in search_results['hits']['hits'])
        if len(distinct_results) != 1:
            ctx = {'count': len(distinct_results),
                   'results': ", ".join(distinct_results),
                   'doc_type': self.document_type,
                   'id': doc_id}
            LOG.error(_LE("%(count)d distinct results (%(results)s) found for "
                          "get_document_by_query for %(doc_type)s id %(id)s") %
                      ctx)

        first_hit = search_results['hits']['hits'][0]
        routing = get_metafield(first_hit, '_routing')
        parent = get_metafield(first_hit, '_parent')
        parent = strip_role_suffix(parent, ADMIN_ID_SUFFIX)

        LOG.debug("Deleting %s id %s with parent %s routing %s",
                  self.document_type, doc_id, parent, routing)
        delete_info = {'_id': doc_id, '_parent': parent}
        if routing:
            delete_info['_routing'] = routing
        if version:
            delete_info['_version'] = version
        self.delete_document(delete_info)
예제 #9
0
def _get_flavor_access(flavor):
    if flavor.is_public:
        return None
    try:
        n_client = openstack_clients.get_novaclient()
        return [access.tenant_id for access in
                n_client.flavor_access.list(flavor=flavor)] or None
    except novaclient.exceptions.Unauthorized:
        LOG.warning(_LW("Could not return tenant for %s; forbidden") %
                    flavor)
        return None
예제 #10
0
def _get_flavor_access(flavor):
    if flavor.is_public:
        return None
    try:
        n_client = openstack_clients.get_novaclient()
        return [
            access.tenant_id
            for access in n_client.flavor_access.list(flavor=flavor)
        ] or None
    except novaclient.exceptions.Unauthorized:
        LOG.warning(_LW("Could not return tenant for %s; forbidden") % flavor)
        return None
예제 #11
0
def _get_image_members(image):
    if image['visibility'] == 'public':
        return []

    try:
        g_client = openstack_clients.get_glanceclient()
        members = g_client.image_members.list(image['id'])
        return members
    except glanceclient.exc.Unauthorized:
        LOG.warning(_LW("Could not list image members for %s; forbidden") %
                    image['id'])
        return []
 def create_or_update(self, event_type, payload, timestamp):
     image_id = payload['id']
     try:
         image_payload = self.serialize_notification(payload)
         self.index_helper.save_document(image_payload,
                                         version=self.get_version(
                                             image_payload, timestamp))
         return pipeline.IndexItem(self.index_helper.plugin, event_type,
                                   payload, image_payload)
     except glanceclient.exc.NotFound:
         LOG.warning(_LW("Image %s not found; deleting") % image_id)
         return self.delete(event_type, payload, timestamp)
예제 #13
0
def _get_image_members(image):
    if image['visibility'] == 'public':
        return []

    try:
        g_client = openstack_clients.get_glanceclient()
        members = g_client.image_members.list(image['id'])
        return members
    except glanceclient.exc.Unauthorized:
        LOG.warning(
            _LW("Could not list image members for %s; forbidden") %
            image['id'])
        return []
예제 #14
0
    def save_documents(self, documents, versions=None, index=None):
        """Send list of serialized documents into search engine.

           Warning: Index vs Alias usage.
           Listeners [plugins/*/notification_handlers.py]:
           When the plugin listeners are indexing documents, we will want
           to use the normal ES alias for their resource group. In this case
           the index parameter will not be set. Listeners are by far the most
           common usage case.

           Re-Indexing [plugins/base.py::index_initial_data()]:
           When we are re-indexing we will want to use the new ES index.
           Bypassing the alias means we will not send duplicate documents
           to the old index. In this case the index will be set. Re-indexing
           is an event that will rarely happen.
        """
        if not index:
            use_index = self.alias_name
        else:
            use_index = index

        for document in documents:
            # NOTE: In Elasticsearch 2.0 field names cannot contain '.', change
            # '.' to '__'.
            utils.replace_dots_in_field_names(document)

        try:
            result = helpers.bulk(
                client=self.engine,
                index=use_index,
                doc_type=self.document_type,
                chunk_size=self.index_chunk_size,
                actions=self._prepare_actions(documents, versions))
        except helpers.BulkIndexError as e:
            err_msg = []
            for err in e.errors:
                if "VersionConflict" not in err['index']['error']:
                    raise
                err_msg.append("id %(_id)s: %(error)s" % err['index'])
            LOG.warning(_LW('Version conflict %s') % ';'.join(err_msg))
            result = 0
        except es_exc.RequestError as e:
            if _is_multiple_alias_exception(e):
                LOG.error(_LE("Alias [%(a)s] with multiple indexes error") %
                          {'a': self.alias_name})
                self._index_alias_multiple_indexes_bulk(documents=documents,
                                                        versions=versions)

            result = 0
        LOG.debug("Indexing result: %s", result)
예제 #15
0
def validate_key_cert(key_file, cert_file):
    try:
        error_key_name = "private key"
        error_filename = key_file
        with open(key_file, 'r') as keyfile:
            key_str = keyfile.read()
        key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)

        error_key_name = "certificate"
        error_filename = cert_file
        with open(cert_file, 'r') as certfile:
            cert_str = certfile.read()
        cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
    except IOError as ioe:
        raise RuntimeError(_("There is a problem with your %(error_key_name)s "
                             "%(error_filename)s.  Please verify it."
                             "  Error: %(ioe)s") %
                           {'error_key_name': error_key_name,
                            'error_filename': error_filename,
                            'ioe': ioe})
    except crypto.Error as ce:
        raise RuntimeError(_("There is a problem with your %(error_key_name)s "
                             "%(error_filename)s.  Please verify it. OpenSSL"
                             " error: %(ce)s") %
                           {'error_key_name': error_key_name,
                            'error_filename': error_filename,
                            'ce': ce})

    try:
        data = str(uuid.uuid4())
        digest = CONF.digest_algorithm
        if digest == 'sha1':
            LOG.warning(
                _LW('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)'
                    ' state that the SHA-1 is not suitable for'
                    ' general-purpose digital signature applications (as'
                    ' specified in FIPS 186-3) that require 112 bits of'
                    ' security. The default value is sha1 in Kilo for a'
                    ' smooth upgrade process, and it will be updated'
                    ' with sha256 in next release(L).'))
        out = crypto.sign(key, data, digest)
        crypto.verify(cert, out, data, digest)
    except crypto.Error as ce:
        raise RuntimeError(_("There is a problem with your key pair.  "
                             "Please verify that cert %(cert_file)s and "
                             "key %(key_file)s belong together.  OpenSSL "
                             "error %(ce)s") % {'cert_file': cert_file,
                                                'key_file': key_file,
                                                'ce': ce})
예제 #16
0
 def create_or_update(self, payload, timestamp):
     image_id = payload['id']
     try:
         payload = self.serialize_notification(payload)
         self.index_helper.save_document(
             payload,
             version=self.get_version(payload, timestamp))
     except glanceclient.exc.NotFound:
         LOG.warning(_LW("Image %s not found; deleting") % image_id)
         try:
             self.index_helper.delete_document({'_id': image_id})
         except Exception as exc:
             LOG.error(_LE(
                 'Error deleting image %(image_id)s from index: %(exc)s') %
                 {'image_id': image_id, 'exc': exc})
    def process(self, ctxt, publisher_id, event_type, payload, metadata):
        if (event_type == 'dns.zone.update' and
                payload.get('status') == 'DELETED'):
            LOG.debug("Ignoring update notification for Domain with DELETED "
                      "status; event will be processed on delete event")
            return None

        items = super(ZoneHandler, self).process(
            ctxt, publisher_id, event_type, payload, metadata)
        try:
            # NOTE: So if this is an initial zone we need to index the SOA / NS
            # records it will have. Let's do this when receiving the create
            # event.
            if event_type == 'dns.zone.create':
                if not items:
                    LOG.warning(_LW("Not writing initial recordsets; exception"
                                    "occurred during zone indexing"))
                    return None

                recordsets = designate._get_recordsets(payload['id'])
                serialized_recordsets = []
                recordset_versions = []
                for rs in recordsets:
                    rs = designate._serialize_recordset(rs)

                    # So project ID isn't provided in the recordset api.
                    rs['project_id'] = payload['project_id']

                    serialized_recordsets.append(rs)

                    # Use the timestamp from *this* notification but the
                    # updated_at from each recordset (which empirically appears
                    # to be the same for all initial recordsets)
                    recordset_versions.append(
                        self.get_version(rs, metadata['timestamp']))
                    items.append(
                        pipeline.IndexItem(self.recordset_helper.plugin,
                                           event_type,
                                           payload,
                                           rs
                                           )
                    )
                self.recordset_helper.save_documents(
                    serialized_recordsets, versions=recordset_versions)
                return items
        except Exception as e:
            LOG.exception(e)
    def create_or_update(self, event_type, payload, timestamp):
        volume_id = payload['volume_id']
        LOG.debug("Updating cinder volume information for %s", volume_id)

        try:
            volume_payload = serialize_cinder_volume(volume_id)
            version = self.get_version(volume_payload, timestamp)
            self.index_helper.save_document(volume_payload, version=version)
            return pipeline.IndexItem(
                self.index_helper.plugin,
                event_type,
                payload,
                volume_payload
            )
        except cinderclient.exceptions.NotFound:
            LOG.warning(_LW("Volume %s not found; deleting") % volume_id)
            self.delete(payload, timestamp)
예제 #19
0
    def process_response(self, resp):
        try:
            request_id = resp.request.context.request_id
        except AttributeError:
            LOG.warning(_LW('Unable to retrieve request id from context'))
        else:
            # For python 3 compatibility need to use bytes type
            prefix = 'req-'
            if isinstance(request_id, bytes):
                prefix = b'req-'

            if not request_id.startswith(prefix):
                request_id = prefix + request_id

            resp.headers['x-openstack-request-id'] = request_id

        return resp
예제 #20
0
    def process_response(self, resp):
        try:
            request_id = resp.request.context.request_id
        except AttributeError:
            LOG.warning(_LW('Unable to retrieve request id from context'))
        else:
            # For python 3 compatibility need to use bytes type
            prefix = 'req-'
            if isinstance(request_id, bytes):
                prefix = b'req-'

            if not request_id.startswith(prefix):
                request_id = prefix + request_id

            resp.headers['x-openstack-request-id'] = request_id

        return resp
예제 #21
0
    def save_documents(self, documents, versions=None, index=None):
        """Send list of serialized documents into search engine.

           Warning: Index vs Alias usage.
           Listeners [plugins/*/notification_handlers.py]:
           When the plugin listeners are indexing documents, we will want
           to use the normal ES alias for their resource group. In this case
           the index parameter will not be set. Listeners are by far the most
           common usage case.

           Re-Indexing [plugins/base.py::index_initial_data()]:
           When we are re-indexing we will want to use the new ES index.
           Bypassing the alias means we will not send duplicate documents
           to the old index. In this case the index will be set. Re-indexing
           is an event that will rarely happen.
        """
        if not index:
            use_index = self.alias_name
        else:
            use_index = index

        try:
            result = helpers.bulk(
                client=self.engine,
                index=use_index,
                doc_type=self.document_type,
                chunk_size=self.index_chunk_size,
                actions=self._prepare_actions(documents, versions))
        except helpers.BulkIndexError as e:
            err_msg = []
            for err in e.errors:
                if "VersionConflict" not in err['index']['error']:
                    raise
                err_msg.append("id %(_id)s: %(error)s" % err['index'])
            LOG.warning(_LW('Version conflict %s') % ';'.join(err_msg))
            result = 0
        except es_exc.RequestError as e:
            if _is_multiple_alias_exception(e):
                LOG.error(_LE("Alias [%(a)s] with multiple indexes error") %
                          {'a': self.alias_name})
                self._index_alias_multiple_indexes_bulk(documents=documents,
                                                        versions=versions)

            result = 0
        LOG.debug("Indexing result: %s", result)
예제 #22
0
    def index_from_api(self, payload, timestamp):
        """Index from the nova API"""
        instance_id = payload['instance_id']
        LOG.debug("Updating nova server information for %s", instance_id)
        try:
            serialized_payload = serialize_nova_server(instance_id)
            self.index_helper.save_document(serialized_payload,
                                            version=self.get_version(
                                                serialized_payload, timestamp))
        except novaclient.exceptions.NotFound:
            LOG.warning(_LW("Instance %s not found; deleting") % instance_id)

            # Where a notification represents an in-progress delete, we will
            # also receive an 'instance.delete' notification shortly
            deleted = (payload.get('state_description') == 'deleting'
                       or payload.get('state') == 'deleted')
            if not deleted:
                self.delete(payload, timestamp)
예제 #23
0
    def index_from_api(self, payload, timestamp):
        """Index from the nova API"""
        instance_id = payload['instance_id']
        LOG.debug("Updating nova server information for %s", instance_id)
        try:
            serialized_payload = serialize_nova_server(instance_id)
            self.index_helper.save_document(
                serialized_payload,
                version=self.get_version(serialized_payload, timestamp))
        except novaclient.exceptions.NotFound:
            LOG.warning(_LW("Instance %s not found; deleting") % instance_id)

            # Where a notification represents an in-progress delete, we will
            # also receive an 'instance.delete' notification shortly
            deleted = (payload.get('state_description') == 'deleting' or
                       payload.get('state') == 'deleted')
            if not deleted:
                self.delete(payload, timestamp)
예제 #24
0
def transform_facets_results(result_aggregations, resource_type):
    """This effectively reverses the logic from `get_facets_query`,
    and produces output that looks the same regardless of whether
    a faceted field happens to be nested.

    The input should be the value of the `aggregations` keypair in the
    Elasticsearch response. Inputs can be of two forms:
      {"not_nested": {"buckets": [{"key": "something", "doc_count": 10}..]},
       "nested": "buckets": [
          {"key": 4, "doc_count": 2, "nested__unique_docs": {"doc_count": 1}},
          {"key": 6, "doc_count": 3, "nested__unique_docs": {"doc_count": 2}}
      ]}

    Output is normalized (using the __unique_docs count) to:
      {"not_nested": {"buckets": [{"key": "something", "doc_count": 10}..]},
       "nested": {"buckets": [
          {"key": 4, "doc_count": 1},
          {"key": 6, "doc_count": 2}
       ]}
    """
    facet_terms = {}
    for term, aggregation in six.iteritems(result_aggregations):
        if term in aggregation:
            # Again, deeper nesting question
            term_buckets = aggregation[term]['buckets']
            for bucket in term_buckets:
                reversed_agg = bucket.pop(term + "__unique_docs")
                bucket["doc_count"] = reversed_agg["doc_count"]
            facet_terms[term] = term_buckets
        elif 'buckets' in aggregation:
            facet_terms[term] = aggregation['buckets']
        else:
            # This can happen when there's no mapping defined at all..
            format_msg = {
                'field': term,
                'resource_type': resource_type
            }
            LOG.warning(_LW(
                "Unexpected aggregation structure for field "
                "'%(field)s' in %(resource_type)s. Is the mapping "
                "defined correctly?") % format_msg)
            facet_terms[term] = []
    return facet_terms
    def process(self, ctxt, publisher_id, event_type, payload, metadata):
        if (event_type == 'dns.zone.update' and
                payload.get('status') == 'DELETED'):
            LOG.debug("Ignoring update notification for Domain with DELETED "
                      "status; event will be processed on delete event")
            return oslo_messaging.NotificationResult.HANDLED

        handled = super(ZoneHandler, self).process(
            ctxt, publisher_id, event_type, payload, metadata)
        try:
            # NOTE: So if this is a initial zone we need to index the SOA / NS
            # records it will have. Let's do this when receiving the create
            # event.
            if event_type == 'dns.zone.create':
                if handled != oslo_messaging.NotificationResult.HANDLED:
                    LOG.warning(_LW("Not writing initial recordsets; exception"
                                    "occurred during zone indexing"))
                    return None

                recordsets = designate._get_recordsets(payload['id'])
                serialized_recordsets = []
                recordset_versions = []
                for rs in recordsets:
                    rs = designate._serialize_recordset(rs)

                    # So project ID isn't provided in the recordset api.
                    rs['project_id'] = payload['project_id']

                    serialized_recordsets.append(rs)

                    # Use the timestamp from *this* notification but the
                    # updated_at from each recordset (which empirically appears
                    # to be the same for all initial recordsets)
                    recordset_versions.append(
                        self.get_version(rs, metadata['timestamp']))

                self.recordset_helper.save_documents(
                    serialized_recordsets, versions=recordset_versions)

            return oslo_messaging.NotificationResult.HANDLED
        except Exception as e:
            LOG.exception(e)
예제 #26
0
 def get_docs_by_nested_field(self, path, field, value, version=False):
     """Query ElasticSearch based on a nested field. The caller will
        need to specify the path of the nested field as well as the
        field itself. We will include the 'version' field if commanded
        as such by the caller.
     """
     # Set up query for accessing a nested field.
     nested_field = path + "." + field
     body = {"query": {"nested": {
             "path": path, "query": {"term": {nested_field: value}}}}}
     if version:
         body['version'] = True
     try:
         return self.engine.search(index=self.alias_name,
                                   doc_type=self.document_type,
                                   body=body, ignore_unavailable=True)
     except Exception as exc:
         LOG.warning(_LW(
             'Error querying %(p)s %(f)s. Error %(exc)s') %
             {'p': path, 'f': field, 'exc': exc})
         raise
예제 #27
0
    def process_request(self, req):
        """Try to find a version first in the accept header, then the URL"""
        msg = _("Determining version of request: %(method)s %(path)s"
                " Accept: %(accept)s")
        args = {'method': req.method, 'path': req.path, 'accept': req.accept}
        LOG.debug(msg % args)

        LOG.debug("Using url versioning")
        # Remove version in url so it doesn't conflict later
        req_version = self._pop_path_info(req)

        try:
            version = self._match_version_string(req_version)
        except ValueError:
            LOG.warning(_LW("Unknown version. Returning version choices."))
            return self.versions_app

        req.environ['api.version'] = version
        req.path_info = ''.join(('/v', str(version), req.path_info))
        LOG.debug("Matched version: v%d", version)
        LOG.debug('new path %s', req.path_info)
        return None
예제 #28
0
    def process_request(self, req):
        """Try to find a version first in the accept header, then the URL"""
        msg = _("Determining version of request: %(method)s %(path)s"
                " Accept: %(accept)s")
        args = {'method': req.method, 'path': req.path, 'accept': req.accept}
        LOG.debug(msg % args)

        LOG.debug("Using url versioning")
        # Remove version in url so it doesn't conflict later
        req_version = self._pop_path_info(req)

        try:
            version = self._match_version_string(req_version)
        except ValueError:
            LOG.warning(_LW("Unknown version. Returning version choices."))
            return self.versions_app

        req.environ['api.version'] = version
        req.path_info = ''.join(('/v', str(version), req.path_info))
        LOG.debug("Matched version: v%d", version)
        LOG.debug('new path %s', req.path_info)
        return None
예제 #29
0
    def _plugin_api(self, plugin_obj, index_names):
        """Helper to re-index using the plugin API within a thread, allowing
           all plugins to re-index simultaneously. We may need to cleanup.
           See sig_handler() for more info.
        """
        gname = plugin_obj.resource_group_name
        index_name = index_names[gname]
        dtype = plugin_obj.document_type

        LOG.info(
            _LI("API Reindex start %(type)s into %(index_name)s") % {
                'type': dtype,
                'index_name': index_name
            })

        try:
            plugin_obj.index_initial_data(index_name=index_name)
            es_utils.refresh_index(index_name)

            LOG.info(
                _LI("API Reindex end %(type)s into %(index_name)s") % {
                    'type': dtype,
                    'index_name': index_name
                })
        except exceptions.EndpointNotFound:
            # Display a warning, do not propagate.
            doc = plugin_obj.get_document_type()
            LOG.warning(
                _LW("Service is not available for plugin: "
                    "%(doc)s") % {"doc": doc})
        except Exception as e:
            LOG.exception(
                _LE("Failed to setup index extension "
                    "%(ex)s: %(e)s") % {
                        'ex': index_name,
                        'e': e
                    })
            raise
예제 #30
0
        def sig_handler(signum, frame):
            """When rudely interrupted by the user, we will want to clean up
               after ourselves. We have potentially three pieces of unfinished
               business.
                   1. We have running threads. Cancel them.
                   2. Wait for all threads to finish.
                   3. We created new indices in Elasticsearch. Remove them.
            """
            # Cancel any and all threads.
            for future in futures:
                future.cancel()

            # Politely wait for the current threads to finish.
            LOG.warning(
                _LW("Interrupt received, waiting for threads to finish"
                    " before cleaning up"))
            wait_for_threads()

            # Rudely remove any newly created Elasticsearch indices.
            if index_names:
                es_utils.alias_error_cleanup(index_names)

            sys.exit(0)
예제 #31
0
 def resource_group_name(self):
     if not getattr(self, '_group_name', None):
         if self.parent_plugin:
             self._group_name = self.parent_plugin.resource_group_name
             if self.options.resource_group_name is not None and  \
                     self.options.resource_group_name != self._group_name:
                 LOG.warning(_LW(
                     "Overriding resource_group for %(plugin)s because it "
                     "differs from parent plugin %(parent)s resource_group "
                     "%(resource_group)s") %
                     {"plugin": self.document_type,
                      "parent": self.parent_plugin.document_type,
                      "resource_group": self._group_name}
                 )
         else:
             if self.options.resource_group_name is not None:
                 self._group_name = self.options.resource_group_name
             elif cfg.CONF.resource_plugin.resource_group_name is not None:
                 self._group_name = \
                     cfg.CONF.resource_plugin.resource_group_name
             else:
                 self._group_name = "searchlight"
     return self._group_name
예제 #32
0
    def _load_rules(self):
        try:
            conf_file = CONF.find_file(CONF.property_protection_file)
            CONFIG.read(conf_file)
        except Exception as e:
            msg = (_LE("Couldn't find property protection file %(file)s: "
                       "%(error)s.") % {
                           'file': CONF.property_protection_file,
                           'error': e
                       })
            LOG.error(msg)
            raise InvalidPropProtectConf()

        if self.prop_prot_rule_format not in ['policies', 'roles']:
            msg = _LE("Invalid value '%s' for "
                      "'property_protection_rule_format'. "
                      "The permitted values are "
                      "'roles' and 'policies'") % self.prop_prot_rule_format
            LOG.error(msg)
            raise InvalidPropProtectConf()

        operations = ['create', 'read', 'update', 'delete']
        properties = CONFIG.sections()
        for property_exp in properties:
            property_dict = {}
            compiled_rule = self._compile_rule(property_exp)

            for operation in operations:
                permissions = CONFIG.get(property_exp, operation)
                if permissions:
                    if self.prop_prot_rule_format == 'policies':
                        if ',' in permissions:
                            LOG.error(
                                _LE("Multiple policies '%s' not allowed "
                                    "for a given operation. Policies can be "
                                    "combined in the policy file"),
                                permissions)
                            raise InvalidPropProtectConf()
                        self.prop_exp_mapping[compiled_rule] = property_exp
                        self._add_policy_rules(property_exp, operation,
                                               permissions)
                        permissions = [permissions]
                    else:
                        permissions = [
                            permission.strip()
                            for permission in permissions.split(',')
                        ]
                        if '@' in permissions and '!' in permissions:
                            msg = (
                                _LE("Malformed property protection rule in "
                                    "[%(prop)s] %(op)s=%(perm)s: '@' and '!' "
                                    "are mutually exclusive") % {
                                        'prop': property_exp,
                                        'op': operation,
                                        'perm': permissions
                                    })
                            LOG.error(msg)
                            raise InvalidPropProtectConf()
                    property_dict[operation] = permissions
                else:
                    property_dict[operation] = []
                    LOG.warning(
                        _LW('Property protection on operation %(operation)s'
                            ' for rule %(rule)s is not found. No role will be'
                            ' allowed to perform this operation.') % {
                                'operation': operation,
                                'rule': property_exp
                            })

            self.rules.append((compiled_rule, property_dict))
예제 #33
0
    def delete_documents(self, metadocs, override_role_separation=False):
        """Each metadoc should be a dict with at an _id, and if
         applicable, a _parent. override_role_separation will treat the _ids
         and _parents in the documents as their actual indexed values
         rather than determining role separation
         """
        def _get_delete_action(doc, id_suffix=''):
            action = {'_op_type': 'delete', '_id': doc['_id'] + id_suffix}

            if doc.get('_version'):
                action['_version'] = doc['_version']
                action['_version_type'] = 'external'

            parent_entity_id = doc.get('_parent')
            if parent_entity_id:
                if (not override_role_separation and
                        self.plugin.parent_plugin.requires_role_separation):
                    # Default to _USER; defaulting to _ADMIN causes a
                    # security issue because of potential fishing queries
                    parent_entity_id += (id_suffix or USER_ID_SUFFIX)
                action['_parent'] = parent_entity_id
            if '_routing' in doc:
                action['_routing'] = doc['_routing']
            return action

        actions = []
        for metadoc in metadocs:
            if (not override_role_separation and
                    self.plugin.requires_role_separation):
                actions.extend([
                    _get_delete_action(metadoc, ADMIN_ID_SUFFIX),
                    _get_delete_action(metadoc, USER_ID_SUFFIX)])
            else:
                actions.append(_get_delete_action(metadoc))

        try:
            helpers.bulk(
                client=self.plugin.engine,
                index=self.alias_name,
                doc_type=self.document_type,
                actions=actions
            )
        except helpers.BulkIndexError as exc:
            exc_payload = exc.errors
            doc_ids = ', '.join(e['delete']['_id'] for e in exc_payload)

            if all(e['delete']['status'] == 404 for e in exc_payload):
                LOG.warning(
                    _LW("Error deleting %(doc_type)s %(ids)s; "
                        "already deleted") %
                    {"doc_type": self.plugin.document_type, "ids": doc_ids})

            elif all(e['delete']['status'] == 409 for e in exc_payload):
                # This *should* never happen. If it does, something has gone
                # wrong but leaving this here for now
                LOG.warning(
                    _LW("Error deleting %(doc_type)s %(ids)s; newer versions "
                        "of some documents have been indexed") %
                    {"doc_type": self.plugin.document_type, "ids": doc_ids})
            else:
                raise
        except es_exc.RequestError as e:
            if _is_multiple_alias_exception(e):
                LOG.error(_LE("Alias [%(a)s] with multiple indexes error") %
                          {'a': self.alias_name})
                self._index_alias_multiple_indexes_bulk(actions=actions)
예제 #34
0
    def delete_documents(self, metadocs, override_role_separation=False):
        """Each metadoc should be a dict with at an _id, and if
         applicable, a _parent. override_role_separation will treat the _ids
         and _parents in the documents as their actual indexed values
         rather than determining role separation
         """
        def _get_delete_action(doc, id_suffix=''):
            action = {'_op_type': 'delete', '_id': doc['_id'] + id_suffix}

            if doc.get('_version'):
                action['_version'] = doc['_version']
                action['_version_type'] = 'external'

            parent_entity_id = doc.get('_parent')
            if parent_entity_id:
                if (not override_role_separation and
                        self.plugin.parent_plugin.requires_role_separation):
                    # Default to _USER; defaulting to _ADMIN causes a
                    # security issue because of potential fishing queries
                    parent_entity_id += (id_suffix or USER_ID_SUFFIX)
                action['_parent'] = parent_entity_id
            if '_routing' in doc:
                action['_routing'] = doc['_routing']
            return action

        actions = []
        for metadoc in metadocs:
            if (not override_role_separation
                    and self.plugin.requires_role_separation):
                actions.extend([
                    _get_delete_action(metadoc, ADMIN_ID_SUFFIX),
                    _get_delete_action(metadoc, USER_ID_SUFFIX)
                ])
            else:
                actions.append(_get_delete_action(metadoc))

        try:
            helpers.bulk(client=self.plugin.engine,
                         index=self.alias_name,
                         doc_type=self.document_type,
                         actions=actions)
        except helpers.BulkIndexError as exc:
            exc_payload = exc.errors
            doc_ids = ', '.join(e['delete']['_id'] for e in exc_payload)

            if all(e['delete']['status'] == 404 for e in exc_payload):
                LOG.warning(
                    _LW("Error deleting %(doc_type)s %(ids)s; "
                        "already deleted") % {
                            "doc_type": self.plugin.document_type,
                            "ids": doc_ids
                        })

            elif all(e['delete']['status'] == 409 for e in exc_payload):
                # This *should* never happen. If it does, something has gone
                # wrong but leaving this here for now
                LOG.warning(
                    _LW("Error deleting %(doc_type)s %(ids)s; newer versions "
                        "of some documents have been indexed") % {
                            "doc_type": self.plugin.document_type,
                            "ids": doc_ids
                        })
            else:
                raise
        except es_exc.RequestError as e:
            if _is_multiple_alias_exception(e):
                LOG.error(
                    _LE("Alias [%(a)s] with multiple indexes error") %
                    {'a': self.alias_name})
                self._index_alias_multiple_indexes_bulk(actions=actions)