def test_bulk_index_error_handling(self): """Check that 404 and 409 errors are appropriately ignored""" from elasticsearch import helpers mock_engine = mock.Mock() plugin = fake_plugins.FakeSimplePlugin(es_engine=mock_engine) indexing_helper = helper.IndexingHelper(plugin) bulk_name = 'searchlight.elasticsearch.plugins.helper.helpers.bulk' with mock.patch(bulk_name) as mock_bulk: mock_bulk.side_effect = helpers.BulkIndexError( "1 document(s) failed to index", [{ 'delete': { "_id": "1", "error": "Some error", "status": 404, "exception": helpers.TransportError() } }]) indexing_helper.delete_documents([{'_id': '1'}]) self.assertEqual(1, mock_bulk.call_count) with mock.patch(bulk_name) as mock_bulk: mock_bulk.side_effect = helpers.BulkIndexError( "1 document(s) failed to index", [{ 'index': { "_id": "1", "error": "VersionConflict", "status": 409 } }]) indexing_helper.save_documents([{'id': '1'}]) self.assertEqual(1, mock_bulk.call_count)
def test_rule_delete_exception(self): # Set up the return documents. payload = {'security_group_rule_id': ID1} doc_get = {'_source': {'security_group_rules': [], 'id': 1}, '_version': 1} doc_nest = {'hits': {'hits': [{ '_id': 123456789, '_source': {'security_group_rules': []}, '_version': 1}]}} handler = self.plugin.get_notification_handler() with mock.patch.object(self.plugin.index_helper, 'get_docs_by_nested_field') as mo_nest: with mock.patch.object(self.plugin.index_helper, 'get_document') as mock_get: with mock.patch.object(self.plugin.index_helper, 'save_document') as mock_save: mo_nest.return_value = doc_nest mock_get.return_value = doc_get exc_obj = helpers.BulkIndexError( "Version conflict", [{'index': { "_id": "1", "error": "Some error", "status": 409}}] ) # 1 retry (exception). mock_save.side_effect = [exc_obj, {}] handler.delete_rule( 'security_group_rule.delete.end', payload, None) # 1 retry + 1 success = 2 calls. self.assertEqual(1, mo_nest.call_count) self.assertEqual(1, mock_get.call_count) self.assertEqual(2, mock_save.call_count) # 24 retries (exceptions) that exceed the retry limit. # Not all retries will be used. mo_nest.reset_mock() mock_get.reset_mock() mock_save.reset_mock() mock_save.side_effect = [exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, {}] handler.delete_rule( 'security_group_rule.delete.end', payload, None) # Verified we bailed out after 20 retires. self.assertEqual(1, mo_nest.call_count) self.assertEqual(20, mock_get.call_count) self.assertEqual(20, mock_save.call_count)
def run(self): """ Uses a reliable queue to process the latest entries. Entries are popped and returned from the queue while also being added to the process queue. Each entry is then sent and processed by elasticsearch. If elasticsearch returns a successful response the entry is deleted from the process queue. """ es = get_connection() _process_tag_queue = redis.register_script(PROCESS_TAG_QUEUE_LUA) tags = [] for i in range(100): epoch_time = int(time.time()) # Pop the latest entry, add it to the process queue with the # current time as score and return it tags.append( _process_tag_queue( keys=[self.redis_queue, self.redis_process_queue], args=[epoch_time])) doctypes = self.deserialize(tags) # Send the entries in bulk to elasticsearch errors = [] for result in es_helpers.streaming_bulk(es, doctypes, raise_on_exception=False, raise_on_error=False): ok, info = result if ok: _id = self.get_id(info) tag_string = self.id_pickles[_id] # Delete successful entries from the process queue redis.zrem(self.redis_process_queue, tag_string) else: if info.get('delete', {}).get('status') == 404: # trying to delete already deleted doc, delete it from # the process queue _id = self.get_id(info) tag_string = self.id_pickles[_id] redis.zrem(self.redis_process_queue, tag_string) else: errors.append(info) if errors: raise es_helpers.BulkIndexError( '%d document(s) failed to index.' % len(errors), errors)
def test_rule_update_exception(self): # Set up the return documents. payload = _secgrouprule_fixture(ID1, TENANT1) doc = {'_source': {'security_group_rules': []}, '_version': 1} handler = self.plugin.get_notification_handler() with mock.patch.object(self.plugin.index_helper, 'get_document') as mock_get: with mock.patch.object(self.plugin.index_helper, 'save_document') as mock_save: mock_get.return_value = doc exc_obj = helpers.BulkIndexError( "Version conflict", [{'index': { "_id": "1", "error": "Some error", "status": 409}}] ) # 1 retry (exception). mock_save.side_effect = [exc_obj, {}] handler.create_or_update_rule(payload, None) # 1 retry + 1 success = 2 calls. self.assertEqual(2, mock_get.call_count) self.assertEqual(2, mock_save.call_count) # 24 retries (exceptions) that exceed the retry limit. # Not all retries will be used. mock_get.reset_mock() mock_save.reset_mock() mock_save.side_effect = [exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, exc_obj, {}] handler.create_or_update_rule(payload, None) # Verified we bailed out after 20 retires. self.assertEqual(20, mock_get.call_count) self.assertEqual(20, mock_save.call_count)