Example #1
0
    def delete(self, payload, timestamp):
        # notification payload doesn't have any date/time fields
        # so temporarily use metadata timestamp value as
        # updated_at field to retrieve version
        # Remove it when notifcation starts sending datetime field(s)
        payload['updated_at'] = timestamp
        version = self.get_version(payload,
                                   timestamp,
                                   preferred_date_field='deleted_at')
        del payload['updated_at']

        id = payload['account'] + swift.ID_SEP + payload['container']
        try:
            self.object_helper.delete_documents_with_parent(id,
                                                            version=version)
        except Exception as exc:
            LOG.error(
                _LE('Error deleting objects in container %(id)s '
                    'from index. Error: %(exc)s') % {
                        'id': id,
                        'exc': exc
                    })
        try:
            self.index_helper.delete_document({
                '_id': id,
                '_version': version,
                '_routing': payload['account']
            })
        except Exception as exc:
            LOG.error(
                _LE('Error deleting container %(id)s '
                    'from index. Error: %(exc)s') % {
                        'id': id,
                        'exc': exc
                    })
    def delete(self, payload, timestamp):
        # notification payload doesn't have any date/time fields
        # so temporarily use metadata timestamp value as
        # updated_at field to retrieve version
        # Remove it when notifcation starts sending datetime field(s)
        payload['updated_at'] = timestamp
        version = self.get_version(payload, timestamp,
                                   preferred_date_field='deleted_at')
        del payload['updated_at']

        id = payload['account'] + swift.ID_SEP + payload['container']
        try:
            self.object_helper.delete_documents_with_parent(
                id, version=version)
        except Exception as exc:
            LOG.error(_LE('Error deleting objects in container %(id)s '
                          'from index. Error: %(exc)s') %
                      {'id': id, 'exc': exc})
        try:
            self.index_helper.delete_document(
                {'_id': id, '_version': version,
                 '_routing': payload['account']})
        except Exception as exc:
            LOG.error(_LE('Error deleting container %(id)s '
                          'from index. Error: %(exc)s') %
                      {'id': id, 'exc': exc})
Example #3
0
 def _index_alias_multiple_indexes_get(self, doc_id, routing=None):
     """Getting a document from an alias with multiple indexes will fail.
        We need to retrive it from one of the indexes. We will choose
        the latest index. Since the indexes are named with a timestamp,
        a reverse sort will bring the latest to the front.
     """
     indexes = self.engine.indices.get_alias(index=self.alias_name)
     index_list = indexes.keys()
     index_list.sort(reverse=True)
     try:
         if routing:
             return self.engine.get(
                 index=index_list[0],
                 doc_type=self.document_type,
                 id=doc_id,
                 routing=routing
             )
         else:
             return self.engine.get(
                 index=index_list[0],
                 doc_type=self.document_type,
                 id=doc_id
             )
     except Exception as e:
         format_msg = {
             'doc': self.document_type,
             'msg': str(e)
         }
         LOG.error(_LE("Failed Indexing %(doc)s: %(msg)s") % format_msg)
Example #4
0
 def _index_alias_multiple_indexes_bulk(self, documents=None, actions=None,
                                        versions=None):
     """A bulk operation failed by trying to access an alias that has
        multiple indexes. To rememdy this we will need to iterate on all
        indexes within the alias and retry the bulk operation individually.
     """
     indexes = self.engine.indices.get_alias(index=self.alias_name)
     for index_name in indexes:
         try:
             if documents:
                 result = helpers.bulk(
                     client=self.engine,
                     index=index_name,
                     doc_type=self.document_type,
                     chunk_size=self.index_chunk_size,
                     actions=self._prepare_actions(documents,
                                                   versions))
             if actions:
                 result = helpers.bulk(
                     client=self.engine,
                     index=index_name,
                     doc_type=self.document_type,
                     chunk_size=self.index_chunk_size,
                     actions=actions)
             LOG.debug("Indexing result: %s", result)
         except Exception as e:
             # Log the error and continue to the next index.
             format_msg = {
                 'doc': self.document_type,
                 'msg': str(e)
             }
             LOG.error(_LE("Failed Indexing %(doc)s: %(msg)s") % format_msg)
def _get_enforcers():
    global _ENFORCERS
    if not _ENFORCERS:
        _ENFORCERS = {}
        pol_files = cfg.CONF.service_policies.service_policy_files
        for service, pol_file in six.iteritems(pol_files):
            base_path = str(cfg.CONF.service_policies.service_policy_path)
            service_policy_path = os.path.join(base_path,
                                               pol_file)
            enforcer = policy.Enforcer(cfg.CONF, service_policy_path)
            missing_config_file = False

            # oslo.policy's approach to locating these files seems to be
            # changing; current master doesn't raise an exception
            try:
                enforcer.load_rules()
                if not enforcer.policy_path:
                    missing_config_file = True
            except cfg.ConfigFilesNotFoundError:
                missing_config_file = True

            if missing_config_file:
                LOG.error(_LE("Policy file for service %(service)s not found"
                              " in %(policy_file)s (base path %(base)s)") %
                          {"service": service, "policy_file": pol_file,
                           "base": service_policy_path})
                raise MissingPolicyFile(
                    "Could not find policy file %(pol_file)s for service "
                    "type %(service)s" % {'pol_file': pol_file,
                                          'service': service})

            LOG.debug("Adding policy enforcer for %s" % service)
            _ENFORCERS[service] = enforcer

    return _ENFORCERS
Example #6
0
    def get_document(self, doc_id, for_admin=False, routing=None):
        if self.plugin.requires_role_separation:
            doc_id += (ADMIN_ID_SUFFIX if for_admin else USER_ID_SUFFIX)

        try:
            if routing:
                return self.engine.get(
                    index=self.alias_name,
                    doc_type=self.document_type,
                    id=doc_id,
                    routing=routing
                )
            else:
                return self.engine.get(
                    index=self.alias_name,
                    doc_type=self.document_type,
                    id=doc_id
                )
        except es_exc.RequestError:
            # TODO(ricka) Verify this is the IllegalArgument exception.
            LOG.error(_LE("Alias [%(alias)s] with multiple indexes error") %
                      {'alias': self.alias_name})
            #
            return self._index_alias_multiple_indexes_get(
                doc_id=doc_id, routing=routing)
Example #7
0
 def _index_alias_multiple_indexes_bulk(self,
                                        documents=None,
                                        actions=None,
                                        versions=None):
     """A bulk operation failed by trying to access an alias that has
        multiple indexes. To rememdy this we will need to iterate on all
        indexes within the alias and retry the bulk operation individually.
     """
     indexes = self.engine.indices.get_alias(index=self.alias_name)
     for index_name in indexes:
         try:
             if documents:
                 result = helpers.bulk(client=self.engine,
                                       index=index_name,
                                       doc_type=self.document_type,
                                       chunk_size=self.index_chunk_size,
                                       actions=self._prepare_actions(
                                           documents, versions))
             if actions:
                 result = helpers.bulk(client=self.engine,
                                       index=index_name,
                                       doc_type=self.document_type,
                                       chunk_size=self.index_chunk_size,
                                       actions=actions)
             LOG.debug("Indexing result: %s", result)
         except Exception as e:
             # Log the error and continue to the next index.
             format_msg = {'doc': self.document_type, 'msg': str(e)}
             LOG.error(_LE("Failed Indexing %(doc)s: %(msg)s") % format_msg)
Example #8
0
 def _index_alias_multiple_indexes_get(self, doc_id, routing=None):
     """Getting a document from an alias with multiple indexes will fail.
        We need to retrieve it from one of the indexes. We will choose
        the latest index. Since the indexes are named with a timestamp,
        a reverse sort will bring the latest to the front.
     """
     indexes = self.engine.indices.get_alias(index=self.alias_name)
     index_list = indexes.keys()
     index_list.sort(reverse=True)
     try:
         if routing:
             return self.engine.get(
                 index=index_list[0],
                 doc_type=self.document_type,
                 id=doc_id,
                 routing=routing
             )
         else:
             return self.engine.get(
                 index=index_list[0],
                 doc_type=self.document_type,
                 id=doc_id
             )
     except Exception as e:
         format_msg = {
             'doc': self.document_type,
             'msg': str(e)
         }
         LOG.error(_LE("Failed Indexing %(doc)s: %(msg)s") % format_msg)
Example #9
0
    def get_document(self, doc_id, for_admin=False, routing=None):
        if self.plugin.requires_role_separation:
            doc_id += (ADMIN_ID_SUFFIX if for_admin else USER_ID_SUFFIX)

        try:
            if routing:
                return self.engine.get(
                    index=self.alias_name,
                    doc_type=self.document_type,
                    id=doc_id,
                    routing=routing
                )
            else:
                return self.engine.get(
                    index=self.alias_name,
                    doc_type=self.document_type,
                    id=doc_id
                )
        except es_exc.RequestError:
            # TODO(ricka) Verify this is the IllegalArgument exception.
            LOG.error(_LE("Alias [%(alias)s] with multiple indexes error") %
                      {'alias': self.alias_name})
            #
            return self._index_alias_multiple_indexes_get(
                doc_id=doc_id, routing=routing)
 def delete_group(self, payload, timestamp):
     sec_id = payload['security_group_id']
     LOG.debug("Deleting security group information for %s", sec_id)
     try:
         self.index_helper.delete_document({'_id': sec_id})
     except Exception as exc:
         LOG.error(_LE(
             'Error deleting security_group %(sec_id)s. Error: %(exc)s') %
             {'sec_id': sec_id, 'exc': exc})
Example #11
0
    def _get_es_query(self,
                      context,
                      query,
                      resource_types,
                      all_projects=False):
        is_admin = context.is_admin
        ignore_rbac = is_admin and all_projects

        type_and_rbac_filters = []
        for resource_type in resource_types:
            plugin = self.plugins[resource_type].obj

            try:
                plugin_filter = plugin.get_query_filters(
                    context, ignore_rbac=ignore_rbac)

                type_and_rbac_filters.append(plugin_filter)
            except Exception as e:
                msg = _("Error processing %s RBAC filter") % resource_type
                LOG.error(
                    _LE("Failed to retrieve RBAC filters "
                        "from search plugin "
                        "%(ext)s: %(e)s") % {
                            'ext': plugin.name,
                            'e': e
                        })
                raise webob.exc.HTTPInternalServerError(explanation=msg)

        role_filter = {
            'term': {
                searchlight.elasticsearch.ROLE_USER_FIELD:
                context.user_role_filter
            }
        }

        # Create a filter query for the role filter; RBAC filters are added
        # in the next step
        es_query = {
            'bool': {
                'filter': {
                    'bool': {
                        'must': role_filter
                    }
                },
                'must': query
            }
        }

        if type_and_rbac_filters:
            # minimum_should_match: 1 is assumed in filter context,
            # but I'm including it explicitly so nobody spends an
            # hour scouring the documentation to check
            es_query['bool']['filter']['bool'].update({
                'should': type_and_rbac_filters,
                'minimum_should_match': 1
            })
        return {'query': es_query}
 def delete(self, payload, timestamp):
     fip_id = payload['floatingip_id']
     LOG.debug("Deleting floatingip information for %s", fip_id)
     try:
         self.index_helper.delete_document({'_id': fip_id})
     except Exception as exc:
         LOG.error(_LE(
             'Error deleting floating ip %(fip)s '
             'from index: %(exc)s') %
             {'fip': fip_id, 'exc': exc})
Example #13
0
    def topics_and_exchanges(self):
        topics_exchanges = set()
        for plugin_type, plugin in six.iteritems(self.plugins):
            try:
                handler = plugin.obj.get_notification_handler()
                topic_exchanges = handler.get_notification_topics_exchanges()
                for plugin_topic in topic_exchanges:
                    if isinstance(plugin_topic, six.string_types):
                        raise Exception(
                            _LE("Plugin %s should return a list of topic"
                                "exchange pairs") % plugin.__class__.__name__)
                    topics_exchanges.add(plugin_topic)
            except Exception as e:
                LOG.error(_LE("Failed to retrieve notification topic(s)"
                              " and exchanges from search plugin "
                              "%(ext)s: %(e)s") %
                          {'ext': plugin.name, 'e': e})

        return topics_exchanges
 def create_or_update(self, payload, timestamp):
     payload = serialize_swift_container_notification(payload)
     try:
         self.index_helper.save_document(
             payload,
             version=self.get_version(payload, timestamp))
     except Exception as exc:
         LOG.error(_LE('Error saving container %(id)s '
                       'in index. Error: %(exc)s') %
                   {'id': payload['id'], 'exc': exc})
 def delete(self, payload, timestamp):
     port_id = payload['port_id']
     LOG.debug("Deleting port information for %s; finding routing", port_id)
     try:
         self.index_helper.delete_document_unknown_parent(port_id)
     except Exception as exc:
         LOG.error(_LE(
             'Error deleting port %(port_id)s '
             'from index. Error: %(exc)s') %
             {'port_id': port_id, 'exc': exc})
 def delete(self, payload, timestamp):
     router_id = payload['router_id']
     LOG.debug("Deleting router information for %s", router_id)
     try:
         self.index_helper.delete_document({'_id': router_id})
     except Exception as exc:
         LOG.error(_LE(
             'Error deleting router %(router)s '
             'from index: %(exc)s') %
             {'router': router_id, 'exc': exc})
Example #17
0
 def _compile_rule(self, rule):
     try:
         return re.compile(rule)
     except Exception as e:
         msg = (_LE("Encountered a malformed property protection rule"
                    " %(rule)s: %(error)s.") % {
                        'rule': rule,
                        'error': e
                    })
         LOG.error(msg)
         raise InvalidPropProtectConf()
Example #18
0
 def delete(self, payload, timestamp):
     image_id = payload['id']
     try:
         version = self.get_version(payload, timestamp,
                                    preferred_date_field='deleted_at')
         self.index_helper.delete_document(
             {'_id': image_id, '_version': version})
     except Exception as exc:
         LOG.error(_LE(
             'Error deleting image %(image_id)s from index: %(exc)s') %
             {'image_id': image_id, 'exc': exc})
Example #19
0
    def delete_document_unknown_parent(self, doc_id, version=None):
        """Deletes a document that requires routing but where the routing is
        not known. Since this involves a query to find it, there is a potential
        race condition in between storing and indexing the document. A search
        is run to find the routing parent, and the document is then deleted.
        """
        search_doc_id = doc_id
        if self.plugin.requires_role_separation:
            # Just look for the admin document though it doesn't matter which.
            # This has to match whatever we strip later with strip_role_suffix
            search_doc_id += ADMIN_ID_SUFFIX

        # Necessary to request 'fields' for e-s 1.x, not for 2.x
        query = {'filter': {'term': {'_id': search_doc_id}},
                 'fields': ['_parent', '_routing']}
        search_results = self.engine.search(index=self.alias_name,
                                            doc_type=self.document_type,
                                            body=query)
        total_hits = search_results['hits']['total']
        if not total_hits:
            ctx = {'doc_type': self.document_type, 'id': doc_id}
            LOG.warning(_LI(
                "No results found for %(doc_type)s id %(id)s; can't find "
                "routing to delete") % ctx)
            return

        # There are results. Check that there's only one unique result (may be
        # two in different indices from the same alias). ES 1.x and 2.x differ
        # slightly; metafields are returned by default and not in 'fields' in
        # 2.x whether you like it or not
        distinct_results = set((r['_id'], get_metafield(r, '_parent'))
                               for r in search_results['hits']['hits'])
        if len(distinct_results) != 1:
            ctx = {'count': len(distinct_results),
                   'results': ", ".join(distinct_results),
                   'doc_type': self.document_type,
                   'id': doc_id}
            LOG.error(_LE("%(count)d distinct results (%(results)s) found for "
                          "get_document_by_query for %(doc_type)s id %(id)s") %
                      ctx)

        first_hit = search_results['hits']['hits'][0]
        routing = get_metafield(first_hit, '_routing')
        parent = get_metafield(first_hit, '_parent')
        parent = strip_role_suffix(parent, ADMIN_ID_SUFFIX)

        LOG.debug("Deleting %s id %s with parent %s routing %s",
                  self.document_type, doc_id, parent, routing)
        delete_info = {'_id': doc_id, '_parent': parent}
        if routing:
            delete_info['_routing'] = routing
        if version:
            delete_info['_version'] = version
        self.delete_document(delete_info)
Example #20
0
def _get_storage_url_prefix():
    # Extracts swift proxy url after removing the default account id
    # from the service account. Later storage_url's will be constructed
    # for each account by appending the keystone tenant id.
    try:
        storage_url = openstack_clients.get_swiftclient().get_auth()[0]
        return storage_url[:storage_url.index(AUTH_PREFIX)] + AUTH_PREFIX
    except ValueError:
        LOG.error(_LE("reseller_prefix %s not found in keystone endpoint ")
                  % AUTH_PREFIX)
        raise
Example #21
0
    def delete_document_unknown_parent(self, doc_id, version=None):
        """Deletes a document that requires routing but where the routing is
        not known. Since this involves a query to find it, there is a potential
        race condition in between storing and indexing the document. A search
        is run to find the routing parent, and the document is then deleted.
        """
        search_doc_id = doc_id
        if self.plugin.requires_role_separation:
            # Just look for the admin document though it doesn't matter which.
            # This has to match whatever we strip later with strip_role_suffix
            search_doc_id += ADMIN_ID_SUFFIX

        # Necessary to request 'fields' for e-s 1.x, not for 2.x
        query = {'filter': {'term': {'_id': search_doc_id}},
                 'fields': ['_parent', '_routing']}
        search_results = self.engine.search(index=self.alias_name,
                                            doc_type=self.document_type,
                                            body=query)
        total_hits = search_results['hits']['total']
        if not total_hits:
            ctx = {'doc_type': self.document_type, 'id': doc_id}
            LOG.warning(_LW(
                "No results found for %(doc_type)s id %(id)s; can't find "
                "routing to delete") % ctx)
            return

        # There are results. Check that there's only one unique result (may be
        # two in different indices from the same alias). ES 1.x and 2.x differ
        # slightly; metafields are returned by default and not in 'fields' in
        # 2.x whether you like it or not
        distinct_results = set((r['_id'], get_metafield(r, '_parent'))
                               for r in search_results['hits']['hits'])
        if len(distinct_results) != 1:
            ctx = {'count': len(distinct_results),
                   'results': ", ".join(distinct_results),
                   'doc_type': self.document_type,
                   'id': doc_id}
            LOG.error(_LE("%(count)d distinct results (%(results)s) found for "
                          "get_document_by_query for %(doc_type)s id %(id)s") %
                      ctx)

        first_hit = search_results['hits']['hits'][0]
        routing = get_metafield(first_hit, '_routing')
        parent = get_metafield(first_hit, '_parent')
        parent = strip_role_suffix(parent, ADMIN_ID_SUFFIX)

        LOG.debug("Deleting %s id %s with parent %s routing %s",
                  self.document_type, doc_id, parent, routing)
        delete_info = {'_id': doc_id, '_parent': parent}
        if routing:
            delete_info['_routing'] = routing
        if version:
            delete_info['_version'] = version
        self.delete_document(delete_info)
 def delete(self, payload, timestamp):
     version = self.get_version(payload, timestamp,
                                preferred_date_field='deleted_at')
     id = payload['account']
     try:
         self.index_helper.delete_document(
             {'_id': id, '_version': version,
              '_routing': payload['account']})
     except Exception as exc:
         LOG.error(_LE('Error deleting account %(id)s '
                       'from index. Error: %(exc)s') %
                   {'id': id, 'exc': exc})
 def delete(self, payload, timestamp):
     network_id = payload['network_id']
     LOG.debug("Deleting network information for %s", network_id)
     try:
         # Note that it's not necessary to delete ports; neutron will not
         # allow deletion of a network that has ports assigned on it
         self.index_helper.delete_document({'_id': network_id})
     except Exception as exc:
         LOG.error(_LE(
             'Error deleting network %(network_id)s '
             'from index. Error: %(exc)s') %
             {'network_id': network_id, 'exc': exc})
Example #24
0
def _get_storage_url_prefix():
    # Extracts swift proxy url after removing the default account id
    # from the service account. Later storage_url's will be constructed
    # for each account by appending the keystone tenant id.
    try:
        storage_url = openstack_clients.get_swiftclient().get_auth()[0]
        return storage_url[:storage_url.index(AUTH_PREFIX)] + AUTH_PREFIX
    except ValueError:
        LOG.error(
            _LE("reseller_prefix %s not found in keystone endpoint ") %
            AUTH_PREFIX)
        raise
Example #25
0
 def delete(self, event_type, payload, port_id):
     LOG.debug("Deleting port information for %s; finding routing", port_id)
     try:
         self.index_helper.delete_document_unknown_parent(port_id)
         return pipeline.DeleteItem(self.index_helper.plugin, event_type,
                                    payload, port_id)
     except Exception as exc:
         LOG.error(
             _LE('Error deleting port %(port_id)s '
                 'from index. Error: %(exc)s') % {
                     'port_id': port_id,
                     'exc': exc
                 })
    def delete(self, payload, timestamp):
        volume_id = payload['volume_id']
        LOG.debug("Deleting cinder volume information for %s", volume_id)
        if not volume_id:
            return

        try:
            self.index_helper.delete_document({'_id': volume_id})
        except Exception as exc:
            LOG.error(_LE(
                'Error deleting volume %(volume_id)s '
                'from index. Error: %(exc)s') %
                {'volume_id': volume_id, 'exc': exc})
Example #27
0
 def _verify_and_respawn_children(self, pid, status):
     if len(self.stale_children) == 0:
         LOG.debug('No stale children')
     if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
         LOG.error(
             _LE('Not respawning child %d, cannot '
                 'recover from termination') % pid)
         if not self.children and not self.stale_children:
             LOG.info(_LI('All workers have terminated. Exiting'))
             self.running = False
     else:
         if len(self.children) < self.workers:
             self.run_child()
Example #28
0
 def _verify_and_respawn_children(self, pid, status):
     if len(self.stale_children) == 0:
         LOG.debug('No stale children')
     if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
         LOG.error(_LE('Not respawning child %d, cannot '
                       'recover from termination') % pid)
         if not self.children and not self.stale_children:
             LOG.info(
                 _LI('All workers have terminated. Exiting'))
             self.running = False
     else:
         if len(self.children) < self.workers:
             self.run_child()
Example #29
0
 def create_or_update(self, payload, timestamp):
     payload = serialize_swift_account_notification(payload)
     try:
         self.index_helper.save_document(payload,
                                         version=self.get_version(
                                             payload, timestamp))
     except Exception as exc:
         LOG.error(
             _LE('Error saving account %(id)s '
                 'in index. Error: %(exc)s') % {
                     'id': payload['id'],
                     'exc': exc
                 })
Example #30
0
    def save_documents(self, documents, versions=None, index=None):
        """Send list of serialized documents into search engine.

           Warning: Index vs Alias usage.
           Listeners [plugins/*/notification_handlers.py]:
           When the plugin listeners are indexing documents, we will want
           to use the normal ES alias for their resource group. In this case
           the index parameter will not be set. Listeners are by far the most
           common usage case.

           Re-Indexing [plugins/base.py::index_initial_data()]:
           When we are re-indexing we will want to use the new ES index.
           Bypassing the alias means we will not send duplicate documents
           to the old index. In this case the index will be set. Re-indexing
           is an event that will rarely happen.
        """
        if not index:
            use_index = self.alias_name
        else:
            use_index = index

        for document in documents:
            # NOTE: In Elasticsearch 2.0 field names cannot contain '.', change
            # '.' to '__'.
            utils.replace_dots_in_field_names(document)

        try:
            result = helpers.bulk(
                client=self.engine,
                index=use_index,
                doc_type=self.document_type,
                chunk_size=self.index_chunk_size,
                actions=self._prepare_actions(documents, versions))
        except helpers.BulkIndexError as e:
            err_msg = []
            for err in e.errors:
                if "VersionConflict" not in err['index']['error']:
                    raise
                err_msg.append("id %(_id)s: %(error)s" % err['index'])
            LOG.warning(_LW('Version conflict %s') % ';'.join(err_msg))
            result = 0
        except es_exc.RequestError as e:
            if _is_multiple_alias_exception(e):
                LOG.error(_LE("Alias [%(a)s] with multiple indexes error") %
                          {'a': self.alias_name})
                self._index_alias_multiple_indexes_bulk(documents=documents,
                                                        versions=versions)

            result = 0
        LOG.debug("Indexing result: %s", result)
Example #31
0
 def chassis_delete(self, event_type, payload, timestamp):
     chassis_id = payload['uuid']
     LOG.debug("Deleting chassis %s", chassis_id)
     try:
         self.index_helper.delete_document({'_id': chassis_id})
         return pipeline.DeleteItem(self.index_helper.plugin, event_type,
                                    payload, chassis_id)
     except Exception as exc:
         LOG.error(
             _LE('Error deleting chassis %(chassis_id)s '
                 'from index. Error: %(exc)s') % {
                     'chassis_id': chassis_id,
                     'exc': exc
                 })
Example #32
0
 def delete(self, event_type, payload, timestamp):
     router_id = payload['router_id']
     LOG.debug("Deleting router information for %s", router_id)
     try:
         self.index_helper.delete_document({'_id': router_id})
         return pipeline.DeleteItem(self.index_helper.plugin, event_type,
                                    payload, router_id)
     except Exception as exc:
         LOG.error(
             _LE('Error deleting router %(router)s '
                 'from index: %(exc)s') % {
                     'router': router_id,
                     'exc': exc
                 })
Example #33
0
 def delete_group(self, event_type, payload, timestamp):
     sec_id = payload['security_group_id']
     LOG.debug("Deleting security group information for %s", sec_id)
     try:
         self.index_helper.delete_document({'_id': sec_id})
         return pipeline.DeleteItem(self.index_helper.plugin, event_type,
                                    payload, sec_id)
     except Exception as exc:
         LOG.error(
             _LE('Error deleting security_group %(sec_id)s. Error: %(exc)s')
             % {
                 'sec_id': sec_id,
                 'exc': exc
             })
Example #34
0
 def delete(self, event_type, payload, timestamp):
     fip_id = payload['floatingip_id']
     LOG.debug("Deleting floatingip information for %s", fip_id)
     try:
         self.index_helper.delete_document({'_id': fip_id})
         return pipeline.DeleteItem(self.index_helper.plugin, event_type,
                                    payload, fip_id)
     except Exception as exc:
         LOG.error(
             _LE('Error deleting floating ip %(fip)s '
                 'from index: %(exc)s') % {
                     'fip': fip_id,
                     'exc': exc
                 })
Example #35
0
 def port_delete(self, event_type, payload, timestamp):
     port_id = payload['uuid']
     LOG.debug("Deleting port %s", port_id)
     try:
         self.index_helper.delete_document_unknown_parent(port_id)
         return pipeline.DeleteItem(self.index_helper.plugin, event_type,
                                    payload, port_id)
     except Exception as exc:
         LOG.error(
             _LE('Error deleting port %(port_id)s '
                 'from index. Error: %(exc)s') % {
                     'port_id': port_id,
                     'exc': exc
                 })
    def delete(self, payload, timestamp):
        snapshot_id = payload['snapshot_id']
        volume_id = payload['volume_id']
        LOG.debug("Deleting cinder snapshot information for %s", snapshot_id)
        if not snapshot_id:
            return

        try:
            self.index_helper.delete_document({'_id': snapshot_id,
                                               '_parent': volume_id})
        except Exception as exc:
            LOG.error(_LE(
                'Error deleting snapshot %(snapshot_id)s '
                'from index. Error: %(exc)s') %
                {'snapshot_id': snapshot_id, 'exc': exc})
Example #37
0
 def create_or_update(self, payload, timestamp):
     image_id = payload['id']
     try:
         payload = self.serialize_notification(payload)
         self.index_helper.save_document(
             payload,
             version=self.get_version(payload, timestamp))
     except glanceclient.exc.NotFound:
         LOG.warning(_LW("Image %s not found; deleting") % image_id)
         try:
             self.index_helper.delete_document({'_id': image_id})
         except Exception as exc:
             LOG.error(_LE(
                 'Error deleting image %(image_id)s from index: %(exc)s') %
                 {'image_id': image_id, 'exc': exc})
Example #38
0
 def delete(self, event_type, payload, timestamp):
     subnet_id = payload['subnet_id']
     LOG.debug("Deleting subnet information for %s; finding routing",
               subnet_id)
     try:
         self.index_helper.delete_document_unknown_parent(subnet_id)
         return pipeline.DeleteItem(self.index_helper.plugin, event_type,
                                    payload, subnet_id)
     except Exception as exc:
         LOG.error(
             _LE('Error deleting subnet %(subnet_id)s '
                 'from index: %(exc)s') % {
                     'subnet_id': subnet_id,
                     'exc': exc
                 })
Example #39
0
    def delete(self, payload, timestamp):
        volume_id = payload['volume_id']
        LOG.debug("Deleting cinder volume information for %s", volume_id)
        if not volume_id:
            return

        try:
            self.index_helper.delete_document({'_id': volume_id})
        except Exception as exc:
            LOG.error(
                _LE('Error deleting volume %(volume_id)s '
                    'from index. Error: %(exc)s') % {
                        'volume_id': volume_id,
                        'exc': exc
                    })
Example #40
0
    def aliases(self):
        # Grab a list of aliases used by Searchlight.
        aliases = []
        for res_type, ext in six.iteritems(utils.get_search_plugins()):
            aliases.append(ext.obj.alias_name_listener)
            aliases.append(ext.obj.alias_name_search)

        # Grab the indices associated with the aliases. The end result is
        # a dictionary where the key is the index and the value is a list
        # of aliases associated with that index.
        indices = {}
        for alias in set(aliases):
            try:
                response = es_utils.get_indices(alias)
            except es_exc.NotFoundError:
                # Ignore and continue.
                response = {}
            except Exception as e:
                # Probably an ES connection issue. Alert the user.
                LOG.error(
                    _LE("Failed retrieving indices from Elasticsearch "
                        "%(a)s %(e)s") % {
                            'a': alias,
                            'e': e
                        })
                sys.exit(3)

            for index in response.keys():
                if index not in indices:
                    indices[index] = [alias]
                else:
                    indices[index].append(alias)

        if not indices:
            print("\nNo Elasticsearch indices for Searchlight exist.")
        else:
            print("\nList of Elasticsearch indices (and their associated"
                  " aliases) used by Searchlight.\n")
            print("The indices are based on the config file.")
            print("To view indices used by other Searchlight config "
                  "files, use the --config-file option.\n")
            print("Indices are denoted with a '*'")
            print("Aliases are denoted with a '+'\n")
            for index in indices:
                print("    * " + index)
                for alias in indices[index]:
                    print("        + " + alias)
        print("\n")
Example #41
0
    def update_document(self,
                        document,
                        doc_id,
                        update_as_script,
                        expected_version=None):
        """Updates are a little simpler than inserts because the documents
        already exist. Note that scripted updates are not filtered in the same
        way as partial document updates. Script updates should be passed as
        a dict {"script": .., "parameters": ..}. Partial document updates
        should be the raw document.
        """
        def _get_update_action(source, id_suffix=''):
            action = {'_id': doc_id + id_suffix, '_op_type': 'update'}
            if expected_version:
                action['_version'] = expected_version
            if update_as_script:
                action.update(source)
            else:
                action['doc'] = source

            routing_field = self.plugin.routing_field
            if routing_field:
                action['_routing'] = source[routing_field]

            return action

        if self.plugin.requires_role_separation:
            user_doc = (self._remove_admin_fields(document)
                        if update_as_script else document)
            actions = [
                _get_update_action(document, ADMIN_ID_SUFFIX),
                _get_update_action(user_doc, USER_ID_SUFFIX)
            ]
        else:
            actions = [_get_update_action(document)]
        try:
            result = helpers.bulk(client=self.engine,
                                  index=self.alias_name,
                                  doc_type=self.document_type,
                                  chunk_size=self.index_chunk_size,
                                  actions=actions)
            LOG.debug("Update result: %s", result)
        except es_exc.RequestError as e:
            if _is_multiple_alias_exception(e):
                LOG.error(
                    _LE("Alias [%(a)s] with multiple indexes error") %
                    {'a': self.alias_name})
                self._index_alias_multiple_indexes_bulk(actions=actions)
Example #42
0
 def delete(self, event_type, payload, timestamp):
     network_id = payload['network_id']
     LOG.debug("Deleting network information for %s", network_id)
     try:
         # Note that it's not necessary to delete ports; neutron will not
         # allow deletion of a network that has ports assigned on it
         self.index_helper.delete_document({'_id': network_id})
         return pipeline.DeleteItem(self.index_helper.plugin, event_type,
                                    payload, network_id)
     except Exception as exc:
         LOG.error(
             _LE('Error deleting network %(network_id)s '
                 'from index. Error: %(exc)s') % {
                     'network_id': network_id,
                     'exc': exc
                 })
    def delete(self, payload, timestamp):
        instance_id = payload['instance_id']
        LOG.debug("Deleting nova instance information for %s", instance_id)
        if not instance_id:
            return

        try:
            version = self.get_version(payload, timestamp,
                                       preferred_date_field='deleted_at')
            self.index_helper.delete_document(
                {'_id': instance_id, '_version': version})
        except Exception as exc:
            LOG.error(_LE(
                'Error deleting instance %(instance_id)s '
                'from index: %(exc)s') %
                {'instance_id': instance_id, 'exc': exc})
Example #44
0
def alias_error_cleanup(indexes):
    """While trying to re-index, we ran into some error. In this case, the
       new index creation/alias updating is incorrect. We will need to clean
       up by rolling back all of the changes. ElasticSearch must stay
       uncluttered. We will delete the indexes explicitly here. ElasticSearch
       will implicitly take care of removing deleted indexes from the aliases.
    """

    es_engine = searchlight.elasticsearch.get_api()

    for index in indexes.values():
        try:
            es_engine.indices.delete(index=index, ignore=404)
        except Exception as e:
            msg = {'index': index}
            LOG.error(_LE("Index [%(index)s] clean-up failed.") % msg)
            LOG.error(encodeutils.exception_to_unicode(e))
Example #45
0
    def delete_rule(self, event_type, payload, timestamp):
        # See comment for create_or_update_rule() for details.
        rule_id = payload['security_group_rule_id']
        LOG.debug("Updating security group rule information for %s", rule_id)

        field = 'security_group_rules'

        # Read, modify, write of an existing security group.
        # To avoid a race condition, we are searching for the document
        # in a round-about way. Outside of the retry loop, we will
        # search for the document and save the document ID. This way we
        # do not need to search inside the loop. We will access the document
        # directly by the ID which will always return the latest version.
        orig_doc = self.index_helper.get_docs_by_nested_field(
            "security_group_rules", "id", rule_id, version=True)
        if not orig_doc:
            return
        doc_id = orig_doc['hits']['hits'][0]['_id']
        doc = orig_doc['hits']['hits'][0]
        for attempts in range(SECGROUP_RETRIES):
            body = doc['_source']
            if not body or field not in body:
                return

            body[field] = \
                list(filter(lambda r: r['id'] != rule_id, body[field]))

            version = doc['_version']
            try:
                version += 1
                self.index_helper.save_document(body, version=version)
                return pipeline.IndexItem(self.index_helper.plugin, event_type,
                                          payload, body)
            except helpers.BulkIndexError as e:
                if e.errors[0]['index']['status'] == 409:
                    # Conflict. Retry with new version.
                    doc = self.index_helper.get_document(doc_id)
                    if not doc:
                        return
                else:
                    raise

        if attempts == (SECGROUP_RETRIES - 1):
            LOG.error(
                _LE('Error deleting security group rule %(id)s:'
                    ' Too many retries') % {'id': rule_id})
Example #46
0
    def save_documents(self, documents, versions=None, index=None):
        """Send list of serialized documents into search engine.

           Warning: Index vs Alias usage.
           Listeners [plugins/*/notification_handlers.py]:
           When the plugin listeners are indexing documents, we will want
           to use the normal ES alias for their resource group. In this case
           the index parameter will not be set. Listeners are by far the most
           common usage case.

           Re-Indexing [plugins/base.py::index_initial_data()]:
           When we are re-indexing we will want to use the new ES index.
           Bypassing the alias means we will not send duplicate documents
           to the old index. In this case the index will be set. Re-indexing
           is an event that will rarely happen.
        """
        if not index:
            use_index = self.alias_name
        else:
            use_index = index

        try:
            result = helpers.bulk(
                client=self.engine,
                index=use_index,
                doc_type=self.document_type,
                chunk_size=self.index_chunk_size,
                actions=self._prepare_actions(documents, versions))
        except helpers.BulkIndexError as e:
            err_msg = []
            for err in e.errors:
                if "VersionConflict" not in err['index']['error']:
                    raise
                err_msg.append("id %(_id)s: %(error)s" % err['index'])
            LOG.warning(_LW('Version conflict %s') % ';'.join(err_msg))
            result = 0
        except es_exc.RequestError as e:
            if _is_multiple_alias_exception(e):
                LOG.error(_LE("Alias [%(a)s] with multiple indexes error") %
                          {'a': self.alias_name})
                self._index_alias_multiple_indexes_bulk(documents=documents,
                                                        versions=versions)

            result = 0
        LOG.debug("Indexing result: %s", result)
Example #47
0
    def create_or_update_rule(self, event_type, payload, timestamp):
        # The issue here is that the notification is not complete.
        # We have only a single rule that needs to be added to an
        # existing group. A major issue is that we may be updating
        # the ES document while other workers are modifying the rules
        # in the same ES document. This requires an aggressive retry policy,
        # using the "version" field. Since the ES document will have been
        # modified after a conflict, we will need to grab the latest version
        # of the document before continuing. After "retries" number of times,
        # we will admit failure and not try the update anymore.
        # NB: Most of the looping logic is the same as in "delete_rule".
        #     The read/modify the ES document is different. If the logic
        #     changes, please make the changes there.
        group_id = payload['security_group_rule']['security_group_id']
        LOG.debug("Updating security group rule information for %s", group_id)

        for attempts in range(SECGROUP_RETRIES):
            # Read, modify, write of an existing security group.
            doc = self.index_helper.get_document(group_id)

            if not doc:
                return
            body = doc['_source']
            if not body or 'security_group_rules' not in body:
                return

            body['security_group_rules'].append(payload['security_group_rule'])

            version = doc['_version']
            try:
                version += 1
                self.index_helper.save_document(body, version=version)
                return pipeline.IndexItem(self.index_helper.plugin, event_type,
                                          payload, body)
            except helpers.BulkIndexError as e:
                if e.errors[0]['index']['status'] == 409:
                    # Conflict error, retry with new version of doc.
                    pass
                else:
                    raise

        if attempts == (SECGROUP_RETRIES - 1):
            LOG.error(
                _LE('Error adding security group rule %(id)s:'
                    ' Too many retries') % {'id': group_id})
Example #48
0
 def delete(self, payload, timestamp):
     version = self.get_version(payload,
                                timestamp,
                                preferred_date_field='deleted_at')
     id = payload['account']
     try:
         self.index_helper.delete_document({
             '_id': id,
             '_version': version,
             '_routing': payload['account']
         })
     except Exception as exc:
         LOG.error(
             _LE('Error deleting account %(id)s '
                 'from index. Error: %(exc)s') % {
                     'id': id,
                     'exc': exc
                 })
    def delete_rule(self, payload, timestamp):
        # See comment for create_or_update_rule() for details.
        rule_id = payload['security_group_rule_id']
        LOG.debug("Updating security group rule information for %s", rule_id)

        field = 'security_group_rules'

        # Read, modify, write of an existing security group.
        # To avoid a race condition, we are searching for the document
        # in a round-about way. Outside of the retry loop, we will
        # search for the document and save the document ID. This way we
        # do not need to search inside the loop. We will access the document
        # directly by the ID which will always return the latest version.
        orig_doc = self.index_helper.get_docs_by_nested_field(
            "security_group_rules", "id", rule_id, version=True)
        if not orig_doc:
            return
        doc_id = orig_doc['hits']['hits'][0]['_id']
        doc = orig_doc['hits']['hits'][0]
        for attempts in range(SECGROUP_RETRIES):
            body = doc['_source']
            if not body or field not in body:
                return

            body[field] = \
                list(filter(lambda r: r['id'] != rule_id, body[field]))

            version = doc['_version']
            try:
                version += 1
                self.index_helper.save_document(body, version=version)
                break
            except helpers.BulkIndexError as e:
                if e.errors[0]['index']['status'] == 409:
                    # Conflict. Retry with new version.
                    doc = self.index_helper.get_document(doc_id)
                    if not doc:
                        return
                else:
                    raise

        if attempts == (SECGROUP_RETRIES - 1):
            LOG.error(_LE('Error deleting security group rule %(id)s:'
                          ' Too many retries') % {'id': rule_id})
    def create_or_update_rule(self, payload, timestamp):
        # The issue here is that the notification is not complete.
        # We have only a single rule that needs to be added to an
        # existing group. A major issue is that we may be updating
        # the ES document while other workers are modifying the rules
        # in the same ES document. This requires an aggressive retry policy,
        # using the "version" field. Since the ES document will have been
        # modified after a conflict, we will need to grab the latest version
        # of the document before continuing. After "retries" number of times,
        # we will admit failure and not try the update anymore.
        # NB: Most of the looping logic is the same as in "delete_rule".
        #     The read/modify the ES document is different. If the logic
        #     changes, please make the changes there.
        group_id = payload['security_group_rule']['security_group_id']
        LOG.debug("Updating security group rule information for %s", group_id)

        for attempts in range(SECGROUP_RETRIES):
            # Read, modify, write of an existing security group.
            doc = self.index_helper.get_document(group_id)

            if not doc:
                return
            body = doc['_source']
            if not body or 'security_group_rules' not in body:
                return

            body['security_group_rules'].append(payload['security_group_rule'])

            version = doc['_version']
            try:
                version += 1
                self.index_helper.save_document(body, version=version)
                break
            except helpers.BulkIndexError as e:
                if e.errors[0]['index']['status'] == 409:
                    # Conflict error, retry with new version of doc.
                    pass
                else:
                    raise

        if attempts == (SECGROUP_RETRIES - 1):
            LOG.error(_LE('Error adding security group rule %(id)s:'
                          ' Too many retries') % {'id': group_id})
Example #51
0
    def update_document(self, document, doc_id, update_as_script,
                        expected_version=None):
        """Updates are a little simpler than inserts because the documents
        already exist. Note that scripted updates are not filtered in the same
        way as partial document updates. Script updates should be passed as
        a dict {"script": .., "parameters": ..}. Partial document updates
        should be the raw document.
        """
        def _get_update_action(source, id_suffix=''):
            action = {'_id': doc_id + id_suffix, '_op_type': 'update'}
            if expected_version:
                action['_version'] = expected_version
            if update_as_script:
                action.update(source)
            else:
                action['doc'] = source

            routing_field = self.plugin.routing_field
            if routing_field:
                action['_routing'] = source[routing_field]

            return action

        if self.plugin.requires_role_separation:
            user_doc = (self._remove_admin_fields(document)
                        if update_as_script else document)
            actions = [_get_update_action(document, ADMIN_ID_SUFFIX),
                       _get_update_action(user_doc, USER_ID_SUFFIX)]
        else:
            actions = [_get_update_action(document)]
        try:
            result = helpers.bulk(
                client=self.engine,
                index=self.alias_name,
                doc_type=self.document_type,
                chunk_size=self.index_chunk_size,
                actions=actions)
            LOG.debug("Update result: %s", result)
        except es_exc.RequestError as e:
            if _is_multiple_alias_exception(e):
                LOG.error(_LE("Alias [%(a)s] with multiple indexes error") %
                          {'a': self.alias_name})
                self._index_alias_multiple_indexes_bulk(actions=actions)
Example #52
0
    def delete(self, payload, timestamp):
        snapshot_id = payload['snapshot_id']
        volume_id = payload['volume_id']
        LOG.debug("Deleting cinder snapshot information for %s", snapshot_id)
        if not snapshot_id:
            return

        try:
            self.index_helper.delete_document({
                '_id': snapshot_id,
                '_parent': volume_id
            })
        except Exception as exc:
            LOG.error(
                _LE('Error deleting snapshot %(snapshot_id)s '
                    'from index. Error: %(exc)s') % {
                        'snapshot_id': snapshot_id,
                        'exc': exc
                    })
Example #53
0
def setup_remote_pydev_debug(host, port):
    error_msg = _LE('Error setting up the debug environment. Verify that the'
                    ' option pydev_worker_debug_host is pointing to a valid '
                    'hostname or IP on which a pydev server is listening on'
                    ' the port indicated by pydev_worker_debug_port.')

    try:
        try:
            from pydev import pydevd
        except ImportError:
            import pydevd

        pydevd.settrace(host,
                        port=port,
                        stdoutToServer=True,
                        stderrToServer=True)
        return True
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception(error_msg)
Example #54
0
 def __init__(self, plugins):
     self.plugins = plugins
     self.notification_target_map = {}
     for plugin_type, plugin in six.iteritems(self.plugins):
         try:
             handler = plugin.obj.get_notification_handler()
             if not handler:
                 continue
             event_list = handler.get_notification_supported_events()
             for event in event_list:
                 LOG.debug("Registering event '%s' for plugin '%s'",
                           event, plugin.name)
                 # Add this plugin to the list of handlers for this event
                 # type, creating that list if necessary
                 self.notification_target_map.setdefault(
                     event.lower(), []).append(plugin.obj)
         except Exception as e:
             LOG.error(_LE("Failed to retrieve supported notification"
                           " events from search plugins "
                           "%(ext)s: %(e)s") %
                       {'ext': plugin.name, 'e': e})
Example #55
0
    def _validate_aggregations(self, context, aggregations):
        if aggregations:
            # Check aggregations against policy
            try:
                self.policy_enforcer.enforce(context,
                                             'search:query:aggregations',
                                             context.policy_target)
            except exception.Forbidden as e:
                raise webob.exc.HTTPForbidden(explanation=e.msg)

            # Reject any requests including the 'global' aggregation type
            # because it bypasses RBAC. 'global' aggregations can only occur
            # at the top level, so we only need to check that level
            for agg_name, agg_definition in six.iteritems(aggregations):
                if 'global' in agg_definition:
                    msg = _LE(
                        "Aggregation '%s' contains the 'global' aggregation "
                        "which is not allowed") % agg_name
                    LOG.error(msg)
                    raise webob.exc.HTTPForbidden(explanation=msg)
        return aggregations
    def rbac_create(self, payload, timestamp):
        """RBAC policy is making a network visible to users in a specific
           tenant. Previously this network was not visible to users in that
           tenant. We will want to add this tenant to the members list.
           Also add the RBAC policy.
        """
        valid_types = ["network"]

        event_type = payload['rbac_policy']['object_type']
        action = payload['rbac_policy']['action']
        if action not in RBAC_VALID_ACTIONS or event_type not in valid_types:
            # I'm bored. Nothing that concerns nor interests us.
            return

        network_id = payload['rbac_policy']['object_id']
        target_tenant = payload['rbac_policy']['target_tenant']
        policy_id = payload['rbac_policy']['id']
        LOG.debug("Adding RBAC policy for network %s with tenant %s",
                  network_id, target_tenant)

        # Read, modify, write an existing network document. Grab and modify
        # the admin version of the document. When saving the document it will
        # be indexed for both admin and user.
        doc = self.index_helper.get_document(network_id, for_admin=True)

        if not doc or not doc['_source']:
            LOG.error(_LE('Error adding rule to network. Network %(id)s '
                      'does not exist.') % {'id': network_id})
            return

        body = doc['_source']

        # Update network with RBAC policy.
        add_rbac(body, target_tenant, policy_id)

        # Bump version for race condition prevention. Use doc and not
        # body, since '_version' is outside of '_source'.
        version = doc['_version'] + 1
        self.index_helper.save_document(body, version=version)