Beispiel #1
0
    def test_set_child_plugin_group(self):
        """Test setting child plugin's resource_group_name while loading
        plugins
        """
        mock_engine = mock.Mock()

        parent_plugin = fake_plugins.FakeSimplePlugin(es_engine=mock_engine)
        child_plugin = fake_plugins.FakeWrongGroupChildPlugin(
            es_engine=mock_engine)
        grandchild_plugin = fake_plugins.FakeWrongGroupGrandchildPlugin(
            es_engine=mock_engine)
        mock_stevedore_parent = mock.Mock()
        mock_stevedore_parent.obj = parent_plugin
        mock_stevedore_child = mock.Mock()
        mock_stevedore_child.obj = child_plugin
        mock_stevedore_grandchild = mock.Mock()
        mock_stevedore_grandchild.obj = grandchild_plugin

        with mock.patch('stevedore.extension.ExtensionManager') as mock_stev:
            manager = mock.Mock()
            manager.extensions = [
                mock_stevedore_parent, mock_stevedore_child,
                mock_stevedore_grandchild
            ]

            mock_stev.return_value = manager
            searchlight_utils.get_search_plugins()
            self.assertEqual(grandchild_plugin.resource_group_name,
                             child_plugin.resource_group_name)
            self.assertEqual(child_plugin.resource_group_name,
                             parent_plugin.resource_group_name)
Beispiel #2
0
    def test_set_child_plugin_group(self):
        """Test setting child plugin's resource_group_name while loading
        plugins
        """
        mock_engine = mock.Mock()

        parent_plugin = fake_plugins.FakeSimplePlugin(es_engine=mock_engine)
        child_plugin = fake_plugins.FakeWrongGroupChildPlugin(
            es_engine=mock_engine)
        grandchild_plugin = fake_plugins.FakeWrongGroupGrandchildPlugin(
            es_engine=mock_engine)
        mock_stevedore_parent = mock.Mock()
        mock_stevedore_parent.obj = parent_plugin
        mock_stevedore_child = mock.Mock()
        mock_stevedore_child.obj = child_plugin
        mock_stevedore_grandchild = mock.Mock()
        mock_stevedore_grandchild.obj = grandchild_plugin

        with mock.patch('stevedore.extension.ExtensionManager') as mock_stev:
            manager = mock.Mock()
            manager.extensions = [mock_stevedore_parent,
                                  mock_stevedore_child,
                                  mock_stevedore_grandchild]

            mock_stev.return_value = manager
            searchlight_utils.get_search_plugins()
            self.assertEqual(grandchild_plugin.resource_group_name,
                             child_plugin.resource_group_name)
            self.assertEqual(child_plugin.resource_group_name,
                             parent_plugin.resource_group_name)
Beispiel #3
0
    def test_policy_all_disallowed(self):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        types = ['OS::Glance::Image']
        self.enforcer.add_rules(policy.policy.Rules.from_dict({
            'resource:OS::Glance::Image': '!',
            'resource:OS::Nova::Server': '!',
        }))

        expected_message = (
            "There are no resource types accessible to you to serve "
            "your request. You do not have access to the following "
            "resource types: OS::Glance::Image")
        self.assertRaisesRegex(
            webob.exc.HTTPForbidden, expected_message,
            search_deserializer._filter_types_by_policy,
            request.context, types)

        types = ['OS::Glance::Image', 'OS::Nova::Server']
        expected_message = (
            "There are no resource types accessible to you to serve "
            "your request. You do not have access to the following "
            "resource types: OS::Glance::Image, OS::Nova::Server")
        self.assertRaisesRegex(
            webob.exc.HTTPForbidden, expected_message,
            search_deserializer._filter_types_by_policy,
            request.context, types)
Beispiel #4
0
    def test_policy_all_disallowed(self):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(), policy_enforcer=self.enforcer)

        types = ['OS::Glance::Image']
        self.enforcer.add_rules(
            policy.policy.Rules.from_dict({
                'resource:OS::Glance::Image': '!',
                'resource:OS::Nova::Server': '!',
            }))

        expected_message = (
            "There are no resource types accessible to you to serve "
            "your request. You do not have access to the following "
            "resource types: OS::Glance::Image")
        self.assertRaisesRegex(webob.exc.HTTPForbidden, expected_message,
                               search_deserializer._filter_types_by_policy,
                               request.context, types)

        types = ['OS::Glance::Image', 'OS::Nova::Server']
        expected_message = (
            "There are no resource types accessible to you to serve "
            "your request. You do not have access to the following "
            "resource types: OS::Glance::Image, OS::Nova::Server")
        self.assertRaisesRegex(webob.exc.HTTPForbidden, expected_message,
                               search_deserializer._filter_types_by_policy,
                               request.context, types)
Beispiel #5
0
def create_resource():
    """Search resource factory method"""
    plugins = utils.get_search_plugins()
    deserializer = RequestDeserializer(plugins)
    serializer = ResponseSerializer()
    controller = SearchController(plugins)
    return wsgi.Resource(controller, deserializer, serializer)
Beispiel #6
0
    def sync(self, index=None, _type=None, force=False, clear=False):
        # Verify all indices and types have registered plugins.
        # index and _type are lists because of nargs='*'
        index = index.split(',') if index else []
        _type = _type.split(',') if _type else []

        indices_set = set(index)
        types_set = set(_type)

        plugins_to_index = []
        for resource_type, ext in six.iteritems(utils.get_search_plugins()):
            plugin_obj = ext.obj
            indices_set.discard(plugin_obj.get_index_name())
            types_set.discard(plugin_obj.get_document_type())

            skip = (index and plugin_obj.get_index_name() not in index or
                    _type and plugin_obj.get_document_type() not in _type)
            if not skip:
                plugins_to_index.append((resource_type, ext))

        if indices_set or types_set:
            print("Some index names or types do not have plugins "
                  "registered. Index names: %s. Types: %s" %
                  (",".join(indices_set) or "<None>",
                   ",".join(types_set) or "<None>"))
            print("Aborting.")
            sys.exit(1)

        if not force:
            def format_selection(selection):
                resource_type, ext = selection
                return '  %s (%s)' % (ext.obj.get_document_type(),
                                      ext.obj.get_index_name())

            print("\nResource types (and indices) matching selection:\n%s\n" %
                  '\n'.join(map(format_selection, plugins_to_index)))

            if clear:
                ans = raw_input(
                    "Indexing will delete existing data and mapping(s) before "
                    "reindexing.\nUse '--force' to suppress this "
                    "message.\nOK to continue? [y/n]: ")
            else:
                ans = raw_input(
                    "Indexing will NOT delete existing data or mapping(s). It "
                    "will reindex all resources. \nUse '--force' to suppress "
                    "this message.\nOK to continue? [y/n]: ")
            if ans.lower() != 'y':
                print("Aborting.")
                sys.exit(0)

        for resource_type, ext in plugins_to_index:
            plugin_obj = ext.obj
            try:
                plugin_obj.initial_indexing(clear=clear)
            except Exception as e:
                LOG.error(_LE("Failed to setup index extension "
                              "%(ext)s: %(e)s") % {'ext': ext.name,
                                                   'e': e})
Beispiel #7
0
    def test_facets_resource_policy(self):
        request = unit_test_utils.get_fake_request()
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(), policy_enforcer=self.enforcer)

        with mock.patch.object(self.enforcer, 'check') as mock_enforce:
            search_deserializer.facets(request)
            self.assertIn(
                mock.call(request.context, 'resource:OS::Nova::Server',
                          request.context.policy_target),
                mock_enforce.mock_calls)
            self.assertIn(
                mock.call(request.context, 'resource:OS::Glance::Image',
                          request.context.policy_target),
                mock_enforce.mock_calls)
            self.assertEqual(len(utils.get_search_plugins()),
                             len(mock_enforce.call_args_list))
Beispiel #8
0
    def test_facets_resource_policy(self):
        request = unit_test_utils.get_fake_request()
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        with mock.patch.object(self.enforcer, 'check') as mock_enforce:
            search_deserializer.facets(request)
            self.assertIn(mock.call(request.context,
                                    'resource:OS::Nova::Server',
                                    request.context.policy_target),
                          mock_enforce.mock_calls)
            self.assertIn(mock.call(request.context,
                                    'resource:OS::Glance::Image',
                                    request.context.policy_target),
                          mock_enforce.mock_calls)
            self.assertEqual(len(utils.get_search_plugins()),
                             len(mock_enforce.call_args_list))
Beispiel #9
0
def create_resource():
    """Search resource factory method"""
    plugins = utils.get_search_plugins()
    policy_enforcer = policy.Enforcer()
    deserializer = RequestDeserializer(plugins,
                                       policy_enforcer=policy_enforcer)
    serializer = ResponseSerializer()
    controller = SearchController(plugins, policy_enforcer=policy_enforcer)
    return wsgi.Resource(controller, deserializer, serializer)
Beispiel #10
0
    def test_default_facet_options(self):
        request = unit_test_utils.get_fake_request(path='/v1/search/facets')
        output = self.deserializer.facets(request)

        output['doc_type'] = sorted(output['doc_type'])
        expected_doc_types = sorted(utils.get_search_plugins().keys())
        expected = {'index_name': None, 'doc_type': expected_doc_types,
                    'all_projects': False, 'limit_terms': 0}
        self.assertEqual(expected, output)
Beispiel #11
0
    def test_default_facet_options(self):
        request = unit_test_utils.get_fake_request(path='/v1/search/facets')
        output = self.deserializer.facets(request)

        output['doc_type'] = sorted(output['doc_type'])
        expected_doc_types = sorted(utils.get_search_plugins().keys())
        expected = {'index_name': None, 'doc_type': expected_doc_types,
                    'all_projects': False, 'limit_terms': 0,
                    'include_fields': True, 'exclude_options': False}
        self.assertEqual(expected, output)
Beispiel #12
0
    def test_facet_exclude_options(self):
        path = '/v1/search/facets?exclude_options=True'
        request = unit_test_utils.get_fake_request(path=path)
        output = self.deserializer.facets(request)

        output['doc_type'] = sorted(output['doc_type'])
        expected_doc_types = sorted(utils.get_search_plugins().keys())
        expected = {'index_name': None, 'doc_type': expected_doc_types,
                    'all_projects': False, 'limit_terms': 0,
                    'include_fields': True, 'exclude_options': True}
        self.assertEqual(expected, output)
Beispiel #13
0
    def test_search_resource_policy_checks(self, mock_request_body):
        request = unit_test_utils.get_fake_request()
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(), policy_enforcer=self.enforcer)

        with mock.patch.object(self.enforcer, 'check') as mock_enforce:
            mock_request_body.return_value = {'type': 'OS::Nova::Server'}
            search_deserializer.search(request)
            mock_enforce.assert_called_with(request.context,
                                            'resource:OS::Nova::Server',
                                            request.context.policy_target)
Beispiel #14
0
    def test_search_resource_policy_checks(self, mock_request_body):
        request = unit_test_utils.get_fake_request()
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        with mock.patch.object(self.enforcer, 'check') as mock_enforce:
            mock_request_body.return_value = {'type': 'OS::Nova::Server'}
            search_deserializer.search(request)
            mock_enforce.assert_called_with(request.context,
                                            'resource:OS::Nova::Server',
                                            request.context.policy_target)
Beispiel #15
0
    def test_facets_resource_policy(self):
        request = unit_test_utils.get_fake_request()
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        with mock.patch.object(self.enforcer, 'enforce') as mock_enforce:
            search_deserializer.facets(request)
            mock_enforce.assert_has_calls([
                mock.call(request.context,
                          'resource:OS::Nova::Server:allow', {}),
                mock.call(request.context,
                          'resource:OS::Nova::Server:facets', {})
            ])
    def test_search_service_policies(self):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        types = ['OS::Glance::Image', 'OS::Nova::Server']
        glance_enforce = mock.Mock()
        nova_enforce = mock.Mock()
        glance_enforce.enforce.return_value = False
        nova_enforce.enforce.return_value = True
        service_enforcers = {
            'image': glance_enforce,
            'compute': nova_enforce
        }

        expect_creds = {
            'tenant_id': request.context.tenant,
            'project_id': request.context.tenant,
            'user_id': request.context.user,
            'roles': ['member'],
            'is_admin_project': True,
            'is_admin': False,
            'domain_id': None,
            'user_domain_id': None,
            'project_domain_id': None,
            'service_user_id': None,
            'service_user_domain_id': None,
            'service_project_id': None,
            'service_project_domain_id': None,
            'service_roles': [],
            'system_scope': None
        }

        fake_target = {
            'user_id': request.context.user,
            'project_id': request.context.tenant,
            'tenant_id': request.context.tenant
        }

        with mock.patch('searchlight.service_policies._get_enforcers',
                        return_value=service_enforcers):
            filtered_types = search_deserializer._filter_types_by_policy(
                request.context, types)
            self.assertEqual(['OS::Nova::Server'], filtered_types)
            glance_enforce.enforce.assert_called_with(
                'get_images', fake_target, expect_creds)
            nova_enforce.enforce.assert_called_with(
                'os_compute_api:servers:index', fake_target, expect_creds)
Beispiel #17
0
    def test_resource_policy_allows_admin(self):
        request = unit_test_utils.get_fake_request(is_admin=True)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)
        types = ['OS::Glance::Image', 'OS::Nova::Server']

        self.enforcer.add_rules(policy.policy.Rules.from_dict({
            'resource:OS::Glance::Image': 'role:admin',
            'resource:OS::Nova::Server': 'role:admin'
        }))

        filtered_types = search_deserializer._filter_types_by_policy(
            request.context, types)
        self.assertEqual(types, filtered_types)
Beispiel #18
0
    def test_aggregation_policy(self, mock_request_body):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        with mock.patch.object(self.enforcer, 'enforce') as mock_enforce:
            mock_request_body.return_value = {
                'query': {'match_all': {}},
                'aggregations': {'terms': {'field': 'some_field'}}
            }
            search_deserializer.search(request)
            mock_enforce.assert_called_with(request.context,
                                            'search:query:aggregations',
                                            request.context.policy_target)
Beispiel #19
0
    def test_aggregation_policy(self, mock_request_body):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        with mock.patch.object(self.enforcer, 'enforce') as mock_enforce:
            mock_request_body.return_value = {
                'query': {'match_all': {}},
                'aggregations': {'terms': {'field': 'some_field'}}
            }
            search_deserializer.search(request)
            mock_enforce.assert_called_with(request.context,
                                            'search:query:aggregations',
                                            request.context.policy_target)
Beispiel #20
0
    def test_resource_policy_allows_admin(self):
        request = unit_test_utils.get_fake_request(is_admin=True)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)
        types = ['OS::Glance::Image', 'OS::Nova::Server']

        self.enforcer.add_rules(policy.policy.Rules.from_dict({
            'resource:OS::Glance::Image': 'role:admin',
            'resource:OS::Nova::Server': 'role:admin'
        }))

        filtered_types = search_deserializer._filter_types_by_policy(
            request.context, types)
        self.assertEqual(types, filtered_types)
Beispiel #21
0
    def test_search_service_policies(self):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        types = ['OS::Glance::Image', 'OS::Nova::Server']
        glance_enforce = mock.Mock()
        nova_enforce = mock.Mock()
        glance_enforce.enforce.return_value = False
        nova_enforce.enforce.return_value = True
        service_enforcers = {
            'image': glance_enforce,
            'compute': nova_enforce
        }

        expect_creds = {
            'tenant_id': request.context.tenant,
            'project_id': request.context.tenant,
            'user_id': request.context.user,
            'roles': ['member'],
            'is_admin_project': True,
            'is_admin': False,
            'user_domain_id': None,
            'project_domain_id': None,
            'service_user_id': None,
            'service_user_domain_id': None,
            'service_project_id': None,
            'service_project_domain_id': None,
            'service_roles': [],
            'system_scope': None
        }

        fake_target = {
            'user_id': request.context.user,
            'project_id': request.context.tenant,
            'tenant_id': request.context.tenant
        }

        with mock.patch('searchlight.service_policies._get_enforcers',
                        return_value=service_enforcers):
            filtered_types = search_deserializer._filter_types_by_policy(
                request.context, types)
            self.assertEqual(['OS::Nova::Server'], filtered_types)
            glance_enforce.enforce.assert_called_with(
                'get_images', fake_target, expect_creds)
            nova_enforce.enforce.assert_called_with(
                'os_compute_api:servers:index', fake_target, expect_creds)
Beispiel #22
0
    def aliases(self):
        # Grab a list of aliases used by Searchlight.
        aliases = []
        for res_type, ext in six.iteritems(utils.get_search_plugins()):
            aliases.append(ext.obj.alias_name_listener)
            aliases.append(ext.obj.alias_name_search)

        # Grab the indices associated with the aliases. The end result is
        # a dictionary where the key is the index and the value is a list
        # of aliases associated with that index.
        indices = {}
        for alias in set(aliases):
            try:
                response = es_utils.get_indices(alias)
            except es_exc.NotFoundError:
                # Ignore and continue.
                response = {}
            except Exception as e:
                # Probably an ES connection issue. Alert the user.
                LOG.error(
                    _LE("Failed retrieving indices from Elasticsearch "
                        "%(a)s %(e)s") % {
                            'a': alias,
                            'e': e
                        })
                sys.exit(3)

            for index in response.keys():
                if index not in indices:
                    indices[index] = [alias]
                else:
                    indices[index].append(alias)

        if not indices:
            print("\nNo Elasticsearch indices for Searchlight exist.")
        else:
            print("\nList of Elasticsearch indices (and their associated"
                  " aliases) used by Searchlight.\n")
            print("The indices are based on the config file.")
            print("To view indices used by other Searchlight config "
                  "files, use the --config-file option.\n")
            print("Indices are denoted with a '*'")
            print("Aliases are denoted with a '+'\n")
            for index in indices:
                print("    * " + index)
                for alias in indices[index]:
                    print("        + " + alias)
        print("\n")
Beispiel #23
0
    def test_mapping_field_types(self):
        """Fields with identical names but different types cause problems
        because lucene doesn't differentiate on doc_type. Elasticsearch 2.x
        enforces rules during mapping that 1.x did not. This test ensures that
        for any plugins present, mappings don't conflict.
        """
        # Keep track of field names and types
        encountered = {}
        encountered_in = collections.defaultdict(list)

        # Some properties are allowed to be different.
        # See https://www.elastic.co/guide/en/elasticsearch/reference/current/
        #             breaking_20_mapping_changes.html
        ignore_props = ['copy_to', 'dynamic', 'enabled', 'ignore_above',
                        'include_in_all', 'properties']

        def merge_and_assert_conflict(resource_type, properties):
            for field_name, field_type in six.iteritems(properties):

                # Ignore some properties (see above)
                for prop in ignore_props:
                    field_type.pop(prop, None)

                existing = encountered.get(field_name, {})

                if existing:
                    previous = ",".join(encountered_in[field_name])
                    params = {
                        'field_name': field_name, 'field_type': field_type,
                        'resource_type': resource_type, 'previous': previous,
                        'existing': existing}
                    message = (
                        "Field definition for '%(field_name)s' in "
                        "%(resource_type)s (%(field_type)s) does not match "
                        "that found in %(previous)s (%(existing)s") % params
                    self.assertEqual(existing, field_type, message)
                else:
                    encountered[field_name] = field_type

                encountered_in[field_name].append(resource_type)

        index_base = 'searchlight.elasticsearch.plugins.base.IndexBase'
        with mock.patch(index_base + '.enabled',
                        new_callable=mock.PropertyMock, return_value=True):
            plugins = searchlight_utils.get_search_plugins()
            for resource_type, plugin in six.iteritems(plugins):
                props = plugin.obj.get_mapping()['properties']
                merge_and_assert_conflict(resource_type, props)
Beispiel #24
0
    def test_search_resource_policy_checks(self, mock_request_body):
        request = unit_test_utils.get_fake_request()
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)

        with mock.patch.object(self.enforcer, 'enforce') as mock_enforce:
            mock_request_body.return_value = {'type': 'OS::Nova::Server'}
            search_deserializer.search(request)
            self.assertEqual(2, mock_enforce.call_count)
            mock_enforce.assert_has_calls([
                mock.call(request.context,
                          'resource:OS::Nova::Server:allow', {}),
                mock.call(request.context,
                          'resource:OS::Nova::Server:query', {})
            ])
Beispiel #25
0
    def test_resource_policy_disallow(self):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(), policy_enforcer=self.enforcer)
        types = ['OS::Glance::Image', 'OS::Nova::Server']

        # And try disabling access for everyone
        self.enforcer.add_rules(
            policy.policy.Rules.from_dict({'resource:OS::Glance::Image': '!'}))

        self.assertEqual(['OS::Nova::Server'],
                         search_deserializer._filter_types_by_policy(
                             request.context, types))

        # Same for admin
        request = unit_test_utils.get_fake_request(is_admin=False)
        self.assertEqual(['OS::Nova::Server'],
                         search_deserializer._filter_types_by_policy(
                             request.context, types))
Beispiel #26
0
    def test_policy_precedence(self):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)
        types = ['OS::Nova::Server', 'OS::Glance::Image']
        self.enforcer.add_rules(policy.policy.Rules.from_dict({
            'resource:OS::Nova::Server:allow': '',
            'resource:OS::Nova::Server:query': '!'
        }))

        # Query should be disallowed by the specific policy
        filtered_types = search_deserializer._filter_types_by_policy(
            request.context, types, "query")
        self.assertEqual(['OS::Glance::Image'], filtered_types)

        # Facet should be allowed since there is no specific exclusion
        filtered_types = search_deserializer._filter_types_by_policy(
            request.context, types, "facets")
        self.assertEqual(set(['OS::Nova::Server', 'OS::Glance::Image']),
                         set(filtered_types))
Beispiel #27
0
    def test_faulty_policy_precedence(self):
        """Unfortunately the ordering that might make most sense isn't
        possible. Rules can only become more restrictive
         """
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)
        types = ['OS::Glance::Image', 'OS::Nova::Server']
        self.enforcer.add_rules(policy.policy.Rules.from_dict({
            'resource:OS::Glance::Image:allow': '!',
            'resource:OS::Glance::Image:query': ''
        }))

        filtered_types = search_deserializer._filter_types_by_policy(
            request.context, types, "facets")
        self.assertEqual(['OS::Nova::Server'], filtered_types)

        filtered_types = search_deserializer._filter_types_by_policy(
            request.context, types, "query")
        self.assertEqual(['OS::Nova::Server'], filtered_types)
Beispiel #28
0
    def test_resource_policy_disallow_non_admin(self):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)
        types = ['OS::Glance::Image', 'OS::Nova::Server',
                 'OS::Glance::Metadef']

        self.enforcer.add_rules(policy.policy.Rules.from_dict({
            'resource:OS::Glance::Image:allow': 'role:admin'
        }))
        filtered_types = search_deserializer._filter_types_by_policy(
            request.context, types, "query")
        self.assertEqual(set(['OS::Nova::Server', 'OS::Glance::Metadef']),
                         set(filtered_types))

        self.enforcer.add_rules(policy.policy.Rules.from_dict({
            'resource:OS::Nova::Server:query': 'role:admin'
        }))

        filtered_types = search_deserializer._filter_types_by_policy(
            request.context, types, "query")
        self.assertEqual(['OS::Glance::Metadef'], filtered_types)
Beispiel #29
0
    def test_resource_policy_disallow(self):
        request = unit_test_utils.get_fake_request(is_admin=False)
        search_deserializer = search.RequestDeserializer(
            utils.get_search_plugins(),
            policy_enforcer=self.enforcer)
        types = ['OS::Glance::Image', 'OS::Nova::Server']

        # And try disabling access for everyone
        self.enforcer.add_rules(policy.policy.Rules.from_dict({
            'resource:OS::Glance::Image': '!'
        }))

        self.assertEqual(
            ['OS::Nova::Server'],
            search_deserializer._filter_types_by_policy(request.context, types)
        )

        # Same for admin
        request = unit_test_utils.get_fake_request(is_admin=False)
        self.assertEqual(
            ['OS::Nova::Server'],
            search_deserializer._filter_types_by_policy(request.context, types)
        )
Beispiel #30
0
 def __init__(self, *args, **kwargs):
     super(ListenerService, self).__init__(*args, **kwargs)
     self.plugins = utils.get_search_plugins()
     self.listeners = []
     self.topics_exchanges_set = self.topics_and_exchanges()
Beispiel #31
0
    def test_mapping_field_types(self):
        """Fields with identical names but different types cause problems
        because lucene doesn't differentiate on doc_type. Elasticsearch 2.x
        enforces rules during mapping that 1.x did not. This test ensures that
        for any plugins present, mappings don't conflict.
        """
        # Keep track of field names and types
        encountered = {}
        encountered_in = collections.defaultdict(list)

        # Some properties are allowed to be different.
        # See https://www.elastic.co/guide/en/elasticsearch/reference/current/
        #             breaking_20_mapping_changes.html
        ignore_props = [
            'copy_to', 'dynamic', 'enabled', 'ignore_above', 'include_in_all',
            'properties'
        ]

        def merge_and_assert_conflict(resource_type, properties):
            for field_name, field_type in six.iteritems(properties):

                # Ignore some properties (see above)
                for prop in ignore_props:
                    field_type.pop(prop, None)

                existing = encountered.get(field_name, {})

                if existing:
                    previous = ",".join(encountered_in[field_name])
                    params = {
                        'field_name': field_name,
                        'field_type': field_type,
                        'resource_type': resource_type,
                        'previous': previous,
                        'existing': existing
                    }
                    message = (
                        "Field definition for '%(field_name)s' in "
                        "%(resource_type)s (%(field_type)s) does not match "
                        "that found in %(previous)s (%(existing)s") % params
                    self.assertEqual(existing, field_type, message)
                else:
                    encountered[field_name] = field_type

                encountered_in[field_name].append(resource_type)

        def verify_normalized_fields(resource_type, full_mapping):
            """Some fields need to be included in all Elasticsearch mappings.
               Mostly these fields are used bhy the UI for queries. We want
               to verify that these fields do indeed exist in all mappings.
            """
            # List of fields that are required.
            fields = ['updated_at']

            for field in fields:
                self.assertIn(field, full_mapping['properties'].keys())

        index_base = 'searchlight.elasticsearch.plugins.base.IndexBase'
        with mock.patch(index_base + '.enabled',
                        new_callable=mock.PropertyMock,
                        return_value=True):
            plugins = searchlight_utils.get_search_plugins()
            for resource_type, plugin in six.iteritems(plugins):
                props = plugin.obj.get_mapping()['properties']
                merge_and_assert_conflict(resource_type, props)
                for doc_type, mapping in plugin.obj.get_full_mapping():
                    verify_normalized_fields(doc_type, mapping)
Beispiel #32
0
 def __init__(self, context, es_api):
     self.context = context
     self.es_api = es_api
     self.plugins = common_utils.get_search_plugins()
     self._plugins_list = self._get_plugin_list()
Beispiel #33
0
 def setUp(self):
     super(TestSearchDeserializer, self).setUp()
     self.deserializer = search.RequestDeserializer(
         utils.get_search_plugins())
Beispiel #34
0
 def setUp(self):
     super(TestSearchDeserializer, self).setUp()
     self.deserializer = search.RequestDeserializer(
         utils.get_search_plugins()
     )
Beispiel #35
0
 def __init__(self, context, es_api):
     self.context = context
     self.es_api = es_api
     self.plugins = common_utils.get_search_plugins()
     self._plugins_list = self._get_plugin_list()
    def sync(self, group=None, _type=None, force=False):
        # Verify all indices and types have registered plugins.
        # index and _type are lists because of nargs='*'
        group = group.split(',') if group else []
        _type = _type.split(',') if _type else []

        group_set = set(group)
        type_set = set(_type)
        """
        The caller can specify a sync based on either the Document Type or the
        Resource Group. With the Zero Downtime functionality, we are using
        aliases to index into ElasticSearch. We now have multiple Document
        Types sharing a single alias. If any member of a Resource Group (an
        ES alias) is re-syncing *all* members of that Resoruce Group needs
        to re-sync.

        The final list of plugins to use for re-syncing *must* come only from
        the Resource Group specifications. The "type" list is used only to make
        the "group" list complete. We need a two pass algorithm for this.

        First pass: Analyze the plugins according to the "type" list. This
          turns a type in the "type" list to a group in the "group" list.

        Second pass: Analyze the plugins according to the "group" list. Create
          the plugin list that will be used for re-syncing.

        Note: We cannot call any plugin's sync() during these two passes. The
        sync needs to be a separate step. The API states that if any invalid
        plugin was specified by the caller, the entire operation fails.
        """

        # First Pass: Document Types.
        if _type:
            for res_type, ext in six.iteritems(utils.get_search_plugins()):
                plugin_obj = ext.obj
                type_set.discard(plugin_obj.get_document_type())
                if plugin_obj.get_document_type() in _type:
                    group.append(plugin_obj.resource_group_name)

        # Second Pass: Resource Groups (including those from types).
        # This pass is a little tricky. If "group" is empty, it implies every
        # resource gets re-synced. The command group_set.discard() is a no-op
        # when "group" is empty.
        resource_groups = []
        plugin_objs = {}
        plugins_list = []
        for res_type, ext in six.iteritems(utils.get_search_plugins()):
            plugin_obj = ext.obj
            group_set.discard(plugin_obj.resource_group_name)
            if (not group) or (plugin_obj.resource_group_name in group):
                plugins_list.append((res_type, ext))
                plugin_objs[plugin_obj.resource_group_name] = plugin_obj
                if not (plugin_obj.resource_group_name,
                        plugin_obj.alias_name_search,
                        plugin_obj.alias_name_listener) in resource_groups:
                    resource_groups.append((plugin_obj.resource_group_name,
                                            plugin_obj.alias_name_search,
                                            plugin_obj.alias_name_listener))

        if group_set or type_set:
            print("Some index names or types do not have plugins "
                  "registered. Index names: %s. Types: %s" %
                  (",".join(group_set) or "<None>", ",".join(type_set)
                   or "<None>"))
            print("Aborting.")
            sys.exit(1)

        if not force:
            # For display purpose, we want to iterate on only parthenogenetic
            # plugins that are not the children of another plugin. If there
            # are children plugins they will be displayed when we call
            # get_index_display_name(). Therefore any child plugins in the
            # display list, will be listed twice.
            display_plugins = []
            for res, ext in plugins_list:
                if not ext.obj.parent_plugin:
                    display_plugins.append((res, ext))

            def format_selection(selection):
                resource_type, ext = selection
                return '  ' + ext.obj.get_index_display_name()

            # Grab the first element in the first (and only) tuple.
            group = resource_groups[0][0]
            print("\nAll resource types within Resource Group \"%(group)s\""
                  " must be re-indexed" % {'group': group})
            print("\nResource types (and aliases) matching selection:\n%s\n" %
                  '\n'.join(map(format_selection, sorted(display_plugins))))

            ans = six.moves.input(
                "Indexing will NOT delete existing data or mapping(s). It "
                "will reindex all resources. \nUse '--force' to suppress "
                "this message.\nOK to continue? [y/n]: ")
            if ans.lower() != 'y':
                print("Aborting.")
                sys.exit(0)

        # Start the re-indexing process

        # Step #1: Create new indexes for each Resource Group Type.
        #   The index needs to be fully functional before it gets
        #   added to any aliases. This inclues all settings and
        #   mappings. Only then can we add it to the aliases. We first
        #   need to create all indexes. This is done by resource group.
        #   We cache and turn off new indexes' refresh intervals,
        #   this will improve the the performance of data re-syncing.
        #   After data get re-synced, set the refresh interval back.
        #   Once all indexes are created, we need to initialize the
        #   indexes. This is done by document type.
        #   NB: The aliases remain unchanged for this step.
        index_names = {}
        refresh_intervals = {}
        try:
            for group, search, listen in resource_groups:
                index_name = es_utils.create_new_index(group)
                index_names[group] = index_name
                refresh_intervals[index_name] = \
                    es_utils.get_index_refresh_interval(index_name)
                # Disable refresh interval by setting its value to -1
                es_utils.set_index_refresh_interval(index_name, -1)
            for resource_type, ext in plugins_list:
                plugin_obj = ext.obj
                group_name = plugin_obj.resource_group_name
                plugin_obj.prepare_index(index_name=index_names[group_name])
        except Exception:
            LOG.error(
                _LE("Error creating index or mapping, aborting "
                    "without indexing"))
            es_utils.alias_error_cleanup(index_names)
            raise

        # Step #2: Modify new index to play well with multiple indices.
        #   There is a "feature" of Elasticsearch where some types of
        #   queries do not work across multiple indices if there are no
        #   mappings for the specified document types. This is an issue we
        #   run into with our RBAC functionality. We need to modify the new
        #   index to work for these cases. We will grab all document types
        #   from the plugins and add a mapping for them as needed to the newly
        #   created indices.
        doc_type_info = []
        for res_type, ext in six.iteritems(utils.get_search_plugins()):
            doc_type_info.append(
                (ext.obj.get_document_type(), ext.obj.parent_plugin_type))
        for index in list(index_names.values()):
            es_utils.add_extra_mappings(index_name=index,
                                        doc_type_info=doc_type_info)

        # Step #3: Set up the aliases for all Resource Type Group.
        #   These actions need to happen outside of the plugins. Now that
        #   the indexes are created and fully functional we can associate
        #   them with the aliases.
        #   NB: The indexes remain unchanged for this step.
        for group, search, listen in resource_groups:
            try:
                es_utils.setup_alias(index_names[group], search, listen)
            except Exception as e:
                LOG.error(
                    _LE("Failed to setup alias for resource group "
                        "%(g)s: %(e)s") % {
                            'g': group,
                            'e': e
                        })
                es_utils.alias_error_cleanup(index_names)
                raise

        # Step #4: Re-index all resource types in this Resource Type Group.
        #   As an optimization, if any types are explicitly requested, we
        #   will index them from their service APIs. The rest will be
        #   indexed from an existing ES index, if one exists.
        #   NB: The "search" and "listener" aliases remain unchanged for this
        #       step.
        es_reindex = []
        plugins_to_index = copy.copy(plugins_list)
        if _type:
            for resource_type, ext in plugins_list:
                doc_type = ext.obj.get_document_type()
                if doc_type not in _type:
                    es_reindex.append(doc_type)
                    plugins_to_index.remove((resource_type, ext))

        # Call plugin API as needed.
        if plugins_to_index:
            for res, ext in plugins_to_index:
                plugin_obj = ext.obj
                gname = plugin_obj.resource_group_name
                try:
                    plugin_obj.initial_indexing(index_name=index_names[gname])
                    es_utils.refresh_index(index_names[gname])
                except exceptions.EndpointNotFound:
                    LOG.warning(
                        _LW("Service is not available for plugin: "
                            "%(ext)s") % {"ext": ext.name})
                except Exception as e:
                    LOG.error(
                        _LE("Failed to setup index extension "
                            "%(ex)s: %(e)s") % {
                                'ex': ext.name,
                                'e': e
                            })
                    es_utils.alias_error_cleanup(index_names)
                    raise

        # Call ElasticSearch for the rest, if needed.
        if es_reindex:
            for group in six.iterkeys(index_names):
                # Grab the correct tuple as a list, convert list to a single
                # tuple, extract second member (the search alias) of tuple.
                alias_search = \
                    [a for a in resource_groups if a[0] == group][0][1]
                try:
                    es_utils.reindex(src_index=alias_search,
                                     dst_index=index_names[group],
                                     type_list=es_reindex)
                    es_utils.refresh_index(index_names[group])
                except Exception as e:
                    LOG.error(
                        _LE("Failed to setup index extension "
                            "%(ex)s: %(e)s") % {
                                'ex': ext.name,
                                'e': e
                            })
                    es_utils.alias_error_cleanup(index_names)
                    raise

        # Step #5: Update the "search" alias.
        #   All re-indexing has occurred. The index/alias is the same for
        #   all resource types within this Resource Group. These actions need
        #   to happen outside of the plugins. Also restore refresh interval
        #   for indexes, this will make data in the indexes become searchable.
        #   NB: The "listener" alias remains unchanged for this step.
        for index_name, interval in refresh_intervals.items():
            es_utils.set_index_refresh_interval(index_name, interval)

        old_index = {}
        for group, search, listen in resource_groups:
            old_index[group] = \
                es_utils.alias_search_update(search, index_names[group])

        # Step #6: Update the "listener" alias.
        #   The "search" alias has been updated. This involves both removing
        #   the old index from the alias as well as deleting the old index.
        #   These actions need to happen outside of the plugins.
        #   NB: The "search" alias remains unchanged for this step.
        for group, search, listen in resource_groups:
            try:
                # If any exception raises, ignore and continue to delete
                # any other old indexes.
                es_utils.delete_index(old_index[group])
            except Exception as e:
                LOG.error(encodeutils.exception_to_unicode(e))
Beispiel #37
0
 def __init__(self, *args, **kwargs):
     super(ListenerService, self).__init__(*args, **kwargs)
     self.plugins = utils.get_search_plugins()
     self.listeners = []
     self.topics_exchanges_set = self.topics_and_exchanges()
Beispiel #38
0
 def __init__(self, context, es_api):
     self.context = context
     self.es_api = es_api
     self.plugins = utils.get_search_plugins() or []
     self.plugins_info_dict = self._get_plugin_info()
Beispiel #39
0
    def sync(self, group=None, _type=None, force=False, force_es=False):
        def wait_for_threads():
            """Patiently wait for all running threads to complete.
            """
            threads_running = True
            while threads_running:
                # Are any threads still running?
                threads_running = False
                for future in futures:
                    if not future.done():
                        threads_running = True
                        break
                time.sleep(1)

        # Signal handler to catch interrupts from the user (ctl-c)
        def sig_handler(signum, frame):
            """When rudely interrupted by the user, we will want to clean up
               after ourselves. We have potentially three pieces of unfinished
               business.
                   1. We have running threads. Cancel them.
                   2. Wait for all threads to finish.
                   3. We created new indices in Elasticsearch. Remove them.
            """
            # Cancel any and all threads.
            for future in futures:
                future.cancel()

            # Politely wait for the current threads to finish.
            LOG.warning(
                _LW("Interrupt received, waiting for threads to finish"
                    " before cleaning up"))
            wait_for_threads()

            # Rudely remove any newly created Elasticsearch indices.
            if index_names:
                es_utils.alias_error_cleanup(index_names)

            sys.exit(0)

        if force_es and _type:
            # The user cannot specify both of these options simultaneously.
            print("\nInvalid set of options.")
            print("Cannot specify both '--type' and '--apply-mapping-changes "
                  "simultaneously.\n")
            sys.exit(1)

        try:
            max_workers = cfg.CONF.manage.workers
        except cfg.ConfigFileValueError as e:
            LOG.error(
                _LE("Invalid value for config file option "
                    "'manage.workers'. The number of thread workers "
                    "must be greater than 0."))
            sys.exit(3)

        # Grab the list of plugins registered as entry points through stevedore
        search_plugins = utils.get_search_plugins()

        # Verify all indices and types have registered plugins.
        # index and _type are lists because of nargs='*'
        group = group.split(',') if group else []
        _type = _type.split(',') if _type else []

        _type = utils.expand_type_matches(_type, six.viewkeys(search_plugins))
        LOG.debug("After expansion, 'type' argument: %s", ", ".join(_type))

        group_set = set(group)
        type_set = set(_type)
        """
        The caller can specify a sync based on either the Document Type or the
        Resource Group. With the Zero Downtime functionality, we are using
        aliases to index into ElasticSearch. We now have multiple Document
        Types sharing a single alias. If any member of a Resource Group (an
        ES alias) is re-syncing *all* members of that Resource Group needs
        to re-sync.

        The final list of plugins to use for re-syncing *must* come only from
        the Resource Group specifications. The "type" list is used only to make
        the "group" list complete. We need a two pass algorithm for this.

        First pass: Analyze the plugins according to the "type" list. This
          turns a type in the "type" list to a group in the "group" list.

        Second pass: Analyze the plugins according to the "group" list. Create
          the plugin list that will be used for re-syncing.

        Note: We cannot call any plugin's sync() during these two passes. The
        sync needs to be a separate step. The API states that if any invalid
        plugin was specified by the caller, the entire operation fails.
        """

        # First Pass: Document Types.
        if _type:
            for res_type, ext in six.iteritems(search_plugins):
                plugin_obj = ext.obj
                type_set.discard(plugin_obj.get_document_type())
                if plugin_obj.get_document_type() in _type:
                    group.append(plugin_obj.resource_group_name)

        # Second Pass: Resource Groups (including those from types).
        # This pass is a little tricky. If "group" is empty, it implies every
        # resource gets re-synced. The command group_set.discard() is a no-op
        # when "group" is empty.
        resource_groups = []
        plugin_objs = {}
        plugins_list = []
        for res_type, ext in six.iteritems(search_plugins):
            plugin_obj = ext.obj
            group_set.discard(plugin_obj.resource_group_name)
            if (not group) or (plugin_obj.resource_group_name in group):
                plugins_list.append((res_type, ext))
                plugin_objs[plugin_obj.resource_group_name] = plugin_obj
                if not (plugin_obj.resource_group_name,
                        plugin_obj.alias_name_search,
                        plugin_obj.alias_name_listener) in resource_groups:
                    resource_groups.append((plugin_obj.resource_group_name,
                                            plugin_obj.alias_name_search,
                                            plugin_obj.alias_name_listener))

        if group_set or type_set:
            print("Some index names or types do not have plugins "
                  "registered. Index names: %s. Types: %s" %
                  (",".join(group_set) or "<None>", ",".join(type_set)
                   or "<None>"))
            print("Aborting.")
            sys.exit(1)

        # As an optimization, if any types are explicitly requested, we
        # will index them from their service APIs. The rest will be
        # indexed from an existing ES index, if one exists.
        #
        # Also, if force_es is set the user wishes to use ES exclusively
        # as the source for all data. This implies everything in the
        # es_reindex list and nothing in the plugins_to_index list.
        es_reindex = []
        plugins_to_index = copy.copy(plugins_list)
        if _type or force_es:
            for resource_type, ext in plugins_list:
                doc_type = ext.obj.get_document_type()

                # If force_es is set, then "_type" is None. Always do this.
                # If force_es is None, then "_type" is set. Adjust as needed.
                if doc_type not in _type:
                    es_reindex.append(doc_type)
                    # Don't reindex this type
                    plugins_to_index.remove((resource_type, ext))

        if not force:
            # For display purpose, we want to iterate on only parthenogenetic
            # plugins that are not the children of another plugin. If there
            # are children plugins they will be displayed when we call
            # get_index_display_name(). Therefore any child plugins in the
            # display list, will be listed twice.
            display_plugins = []
            plugins_without_notifications = []
            for res, ext in plugins_list:
                if not ext.obj.parent_plugin:
                    display_plugins.append((res, ext))

            def format_selection(selection):
                def _format_plugin(plugin, indent=0):
                    plugin_doc_type = plugin.get_document_type()
                    handler = plugin.get_notification_handler()
                    event_list = handler.get_notification_supported_events()

                    display = '\n' + '    ' * indent + '--> ' if indent else ''
                    display += '%s (%s)' % (plugin_doc_type,
                                            plugin.resource_group_name)
                    if plugin_doc_type in es_reindex:
                        display += ' *'
                    if not event_list:
                        display += ' !!'
                        plugins_without_notifications.append(plugin)
                    return display + ''.join(
                        _format_plugin(c, indent + 1)
                        for c in plugin.child_plugins)

                return _format_plugin(selection[1].obj)

            all_res_groups = set(grp[0] for grp in resource_groups)
            print("\nResources in these groups must be re-indexed: %s." %
                  ", ".join(all_res_groups))

            print("Resource types (and aliases) matching selection:\n\n%s\n" %
                  '\n'.join(map(format_selection, sorted(display_plugins))))

            if es_reindex:
                print("Any types marked with * will be reindexed from "
                      "existing Elasticsearch data.\n")

            if plugins_without_notifications:
                print("Any types marked with !! do not support incremental "
                      "updates via the listener.")
                print("These types must be fully re-indexed periodically or "
                      "should be disabled.\n")

            ans = six.moves.input("\nUse '--force' to suppress this message.\n"
                                  "OK to continue? [y/n]: ")
            if ans.lower() != 'y':
                print("Aborting.")
                sys.exit(0)

        # Start the re-indexing process.
        # Now we are starting to change Elasticsearch. Let's clean up
        # if interrupted. Set index_names/futures here for cleaner code
        # in the signal handler.
        index_names = {}
        futures = []
        signal.signal(signal.SIGINT, sig_handler)

        # Step #1: Create new indexes for each Resource Group Type.
        #   The index needs to be fully functional before it gets
        #   added to any aliases. This includes all settings and
        #   mappings. Only then can we add it to the aliases. We first
        #   need to create all indexes. This is done by resource group.
        #   We cache and turn off new indexes' refresh intervals,
        #   this will improve the the performance of data re-syncing.
        #   After data get re-synced, set the refresh interval back.
        #   Once all indexes are created, we need to initialize the
        #   indexes. This is done by document type.
        #   NB: The aliases remain unchanged for this step.
        refresh_intervals = {}
        try:
            for group, search, listen in resource_groups:
                index_name = es_utils.create_new_index(group)
                index_names[group] = index_name

                refresh_intervals[index_name] = \
                    es_utils.get_index_refresh_interval(index_name)
                # Disable refresh interval by setting its value to -1
                es_utils.set_index_refresh_interval(index_name, -1)
            for resource_type, ext in plugins_list:
                plugin_obj = ext.obj
                group_name = plugin_obj.resource_group_name
                plugin_obj.prepare_index(index_name=index_names[group_name])
        except Exception:
            LOG.error(
                _LE("Error creating index or mapping, aborting "
                    "without indexing"))
            es_utils.alias_error_cleanup(index_names)
            raise

        # Step #2: Modify new index to play well with multiple indices.
        #   There is a "feature" of Elasticsearch where some types of
        #   queries do not work across multiple indices if there are no
        #   mappings for the specified document types. This is an issue we
        #   run into with our RBAC functionality. We need to modify the new
        #   index to work for these cases. We will grab all document types
        #   from the plugins and add a mapping for them as needed to the newly
        #   created indices.
        doc_type_info = []
        for res_type, ext in six.iteritems(search_plugins):
            doc_type_info.append(
                (ext.obj.get_document_type(), ext.obj.parent_plugin_type))
        for index in list(index_names.values()):
            es_utils.add_extra_mappings(index_name=index,
                                        doc_type_info=doc_type_info)

        # Step #3: Set up the aliases for all Resource Type Group.
        #   These actions need to happen outside of the plugins. Now that
        #   the indexes are created and fully functional we can associate
        #   them with the aliases.
        #   NB: The indexes remain unchanged for this step.
        for group, search, listen in resource_groups:
            try:
                es_utils.setup_alias(index_names[group], search, listen)
            except Exception as e:
                LOG.exception(
                    _LE("Failed to setup alias for resource group "
                        "%(g)s: %(e)s") % {
                            'g': group,
                            'e': e
                        })
                es_utils.alias_error_cleanup(index_names)
                raise

        # Step #4: Re-index all resource types in this Resource Type Group.
        #   NB: The "search" and "listener" aliases remain unchanged for this
        #       step.
        #   NB: We will be spinning off this working into separate threads.
        #       We will limit each thread to a single resource type. For
        #       more information, please refer to the spec:
        #           searchlight-specs/specs/newton/
        #             index-performance-enhancement.rst
        ThreadPoolExec = concurrent.futures.ThreadPoolExecutor
        with ThreadPoolExec(max_workers=max_workers) as executor:
            try:
                futures = []
                # Start threads for plugin API.
                for res, ext in plugins_to_index:
                    # Throw the plugin into the thread pool.
                    plugin_obj = ext.obj
                    futures.append(
                        executor.submit(self._plugin_api, plugin_obj,
                                        index_names))

                # Start the single thread for ES re-index.
                if es_reindex:
                    futures.append(
                        executor.submit(self._es_reindex_worker, es_reindex,
                                        resource_groups, index_names))

                # Sit back, relax and wait for the threads to complete.
                wait_for_threads()
            except Exception as e:
                # An exception occurred. Start cleaning up ElasticSearch and
                # inform the user.
                es_utils.alias_error_cleanup(index_names)
                raise

        # Step #5: Update the "search" alias.
        #   All re-indexing has occurred. The index/alias is the same for
        #   all resource types within this Resource Group. These actions need
        #   to happen outside of the plugins. Also restore refresh interval
        #   for indexes, this will make data in the indexes become searchable.
        #   NB: The "listener" alias remains unchanged for this step.
        for index_name, interval in refresh_intervals.items():
            es_utils.set_index_refresh_interval(index_name, interval)

        old_index = {}
        for group, search, listen in resource_groups:
            old_index[group] = \
                es_utils.alias_search_update(search, index_names[group])

        # Step #6: Update the "listener" alias.
        #   The "search" alias has been updated. This involves both removing
        #   the old index from the alias as well as deleting the old index.
        #   These actions need to happen outside of the plugins.
        #   NB: The "search" alias remains unchanged for this step.
        for group, search, listen in resource_groups:
            try:
                # If any exception raises, ignore and continue to delete
                # any other old indexes.
                es_utils.delete_index(old_index[group])
            except Exception as e:
                LOG.error(encodeutils.exception_to_unicode(e))