class FacetScannerUpdateHandler(UpdateHandler):
    def __init__(self, conf: 'YamlConfig', **kwargs):

        self.facet_scanner = None
        self.es = None

        super().__init__(conf, **kwargs)

    def setup_extra(self, **kwargs) -> None:
        """
        Setup the facet scanner class and elasticserach connection to

        :param kwargs:
        """
        # Get the facet scanner class
        self.logger.info('Loading facet scanner')
        self.facet_scanner = FacetScanner()

        # Set up the Elasticsearch connection
        api_key = self.conf.get('elasticsearch', 'es_api_key')

        self.es = CEDAElasticsearchClient(headers={'x-api-key': api_key})

    def process_event(self, message: 'IngestMessage'):
        """
        Scan the file for facets
        :param message:
        :return:
        """
        if message.action == 'DEPOSIT':
            self._process_deposits(message)

    def _process_deposits(self, message: 'IngestMessage'):

        # Wait to make sure that the file is accessible on the filesystem
        self._wait_for_file(message)

        # Get the handler for this filepath
        handler = self.facet_scanner.get_handler(message.filepath)

        # Extract the facets
        facets = handler.get_facets(message.filepath)

        # Build the project dictionary using the handlers project name attr
        project = {'projects': {handler.project_name: facets}}

        index = self.conf.get('files_index', 'name')

        # Send facets to elasticsearch
        self.es.update(index=index,
                       id=PathTools.generate_id(message.filepath),
                       body={
                           'doc': project,
                           'doc_as_upsert': True
                       })
Пример #2
0
    def __init__(self, index, host, port):
        """
        Creates an elasticsearch connection. Default host and port specified.

        :param index: The elasticsearch index to connect to.
        :param host: The elasticsearch host address.
        :param port: The read/write elasticsearch port.
        """

        self.es = CEDAElasticsearchClient()
        self.index = index
Пример #3
0
def get_manifest(request, uuid):

    es = CEDAElasticsearchClient()

    try:
        response = es.get(index=settings.ELASTICSEARCH_COLLECTION_INDEX, id=uuid, _source_includes=['manifest'])
        response = json.loads(response['_source']['manifest'])
        return JsonResponse(response)

    except (NotFoundError, KeyError):
        raise Http404(f'Manifest not found for collection {uuid}')
Пример #4
0
    def __init__(self, index, **kwargs):
        """
        Common variables.
        :param index:   Index to update
        """

        ca_root = os.path.abspath(
            os.path.join(os.path.dirname(__file__),
                         '../root_certificate/root-ca.pem'))

        self.index = index
        self.es = CEDAElasticsearchClient(**kwargs)
    def setup_extra(self, **kwargs) -> None:
        """
        Setup the facet scanner class and elasticserach connection to

        :param kwargs:
        """
        # Get the facet scanner class
        self.logger.info('Loading facet scanner')
        self.facet_scanner = FacetScanner()

        # Set up the Elasticsearch connection
        api_key = self.conf.get('elasticsearch', 'es_api_key')

        self.es = CEDAElasticsearchClient(headers={'x-api-key': api_key})
Пример #6
0
 def __init__(self, source_index, dest_index, keep_id=False, **es_kwargs):
     self.source = Elasticsearch(['https://jasmin-es1.ceda.ac.uk'])
     self.dest = CEDAElasticsearchClient(**es_kwargs)
     self.source_index = source_index
     self.dest_index = dest_index
     self.keep_id = keep_id
     self.total = self.source.count(index=self.source_index)['count']
Пример #7
0
def get_dataset_filelist(dataset):
    """
    Query Elasticsearch for the list of files in the changed dataset
    :param dataset: path to root of dataset
    :return: list of file paths
    """

    query = {
        "_source": {
            "includes": ["info.directory", "info.name"]
        },
        "query": {
            "match_phrase_prefix": {
                "info.directory.analyzed": dataset
            }
        }
    }

    es = CEDAElasticsearchClient()
    results = scan(es, query=query, index='opensearch-files')

    file_list = [
        os.path.join(item['_source']['info']['directory'],
                     item['_source']['info']['name']) for item in results
    ]

    return file_list
Пример #8
0
 def __init__(self):
     super().__init__()
     api_token = self.config.get("api_token")
     if api_token is not None:
         self.es = CEDAElasticsearchClient(headers={"x-api-key": api_token})
     else:
         self.es = Elasticsearch(
             [CONFIG["elasticsearch"]["endpoint"]],
             use_ssl=True,
             port=CONFIG["elasticsearch"]["port"],
         )
    def __init__(self):
        base = os.path.dirname(__file__)
        self.default_config = os.path.join(base, '../conf/index_updater.ini')

        self.conf = RawConfigParser()
        self.conf.read(self.default_config)

        # Load queue params to object
        self._load_queue_params()

        # Load the fbi exchange name
        self.fbi_exchange = self.conf.get('server', 'fbi_exchange')

        # Setup local queues
        self.manual_queue = persistqueue.SQLiteAckQueue(os.path.join(
            self.db_location, 'priority'),
                                                        multithreading=True)
        self.bot_queue = persistqueue.SQLiteAckQueue(os.path.join(
            self.db_location, 'bot'),
                                                     multithreading=True)

        # Create Elasticsearch connection
        self.es = CEDAElasticsearchClient(timeout=60, retry_on_timeout=True)
        self.rabbit_connect()

        self.spot_progress = self._get_spot_progress()

        # Setup logging
        logging_level = self.conf.get('logging', 'log-level')
        logger.setLevel(getattr(logging, logging_level.upper()))

        # Add formatting
        ch = logging.StreamHandler()
        ch.setLevel(getattr(logging, logging_level.upper()))
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        ch.setFormatter(formatter)

        logger.addHandler(ch)
Пример #10
0
def get_elasticsearch_client():
    return CEDAElasticsearchClient()
Пример #11
0
class IndexUpdaterBase(object):
    """
    Base class for index updaters. Contains common methods.
    """
    def __init__(self, index, **kwargs):
        """
        Common variables.
        :param index:   Index to update
        """

        ca_root = os.path.abspath(
            os.path.join(os.path.dirname(__file__),
                         '../root_certificate/root-ca.pem'))

        self.index = index
        self.es = CEDAElasticsearchClient(**kwargs)

    @staticmethod
    def _get_action_key(es_response_item):
        """
        Get the action key for processing the response
        :param es_response_item:
        :return: key
        """

        actions = ["update", "index", "delete"]
        response_keys = es_response_item.keys()

        return list(set(actions) & set(response_keys))[0]

    def _scroll_search(self, query, size=1000):
        """
        Perform a scroll search query

        :param query:   The query to perform
        :param size:    Size to return in each scroll. (default: 1000)
        :return:        Generator of results
        """

        return scan(self.es,
                    query=query,
                    scroll='1m',
                    index=self.index,
                    size=size)

    def _bulk_action(self, action_list, api="bulk", process_results=True):
        """
        Perform bulk action to elasticsearch. This is either bulk|msearch. Defualt: bulk

        :param action_list: List of bulk index operations.
        :return Consolidated report.
                    when api == bulk    returns {"success": int, "failed": int, "failed_items": list}
                    when api == msearch returns list with three levels as described below
                    [           # Container for the reponse
                        [       # Collection of all the responses in a block as submitted to elasticsearch
                            []  # Indiviual query responses
                        ]
                    ]

        """

        response_list = []
        for action in tqdm(action_list,
                           desc="Processing queries",
                           file=sys.stdout):

            if api == "bulk":
                response = self.es.bulk(index=self.index, body=action)
            elif api == "msearch":
                response = self.es.msearch(body=action)
            else:
                raise ValueError(
                    "Invalid api selected. Must be of either bulk|msearch")

            response_list.append(response)

        return self._process_bulk_action_response(response_list,
                                                  api,
                                                  process=process_results)

    def _generate_bulk_operation_body(self, content_list, action="index"):
        """
        Generate the query body for the bulk operation

        :param content_list:    List of dictionaries containing the content to be actioned upon
        :param action:          The elasticsearch action to perform. (index|update|delete) (default: index)
        :return:                List of actions to perform in batches of 800.
        """
        bulk_json = ""
        bulk_action_list = []

        for i, item in enumerate(content_list, 1):
            id = item["id"]

            if action == "index":
                header = json.dumps(
                    {"index": {
                        "_index": self.index,
                        "_id": id
                    }}) + "\n"
                body = json.dumps(item["document"]) + "\n"

            elif action == "update":
                header = json.dumps(
                    {"update": {
                        "_index": self.index,
                        "_id": id
                    }}) + "\n"
                body = json.dumps({
                    "doc": item["document"],
                    "doc_as_upsert": True
                }) + "\n"

            elif action == "delete":
                header = json.dumps(
                    {"delete": {
                        "_index": self.index,
                        "_id": id
                    }}) + "\n"
                body = ""

            elif action == "search":
                header = json.dumps({"index": self.index}) + "\n"
                body = json.dumps(item["query"]) + "\n"

            else:
                raise ValueError(
                    "Incorrect action supplied. Must be of either index|update|delete|search"
                )

            bulk_json += header + body

            # Every 800 items create a new bulk request
            if i % 800 == 0:
                bulk_action_list.append(bulk_json)
                bulk_json = ""

        # Clean up any remaining jobs
        if bulk_json:
            bulk_action_list.append(bulk_json)

        return bulk_action_list

    def _process_bulk_action_response(self,
                                      action_response,
                                      api,
                                      process=True):
        """
        Process the bulk action response and generate a consilated report of actions
        :param action_response: Response from elasticseach bulk api call
        :param api:             Whether api used was bulk or msearch
        :param process:         True: return consolidated response. False: Return raw response
        :return: Consolidated report | Raw response based on process flag.
        """

        # Return raw response
        if not process:
            return action_response

        if api == "bulk":
            success = 0
            failed = 0
            items_failed = []

            for action in action_response:
                # If there are no errors in the high level json. All items succeeded
                if not action["errors"]:
                    success += len(action["items"])

                else:
                    # Some or all items failed
                    for item in action["items"]:
                        action_key = self._get_action_key(item)

                        # If 2xx HTTP response. Successful
                        if 200 <= item[action_key]["status"] < 300:
                            success += 1

                        else:
                            failed += 1

                            id = item[action_key]["_id"]
                            status = item[action_key]["status"]
                            error = item[action_key]["error"]

                            items_failed.append({
                                "id": id,
                                "status": status,
                                "error": error
                            })

            return {
                "success": success,
                "failed": failed,
                "failed_items": items_failed
            }

        elif api == "msearch":

            msearch_action_response = []
            for action in action_response:
                response_hits = []

                for response in action["responses"]:
                    response_hits.append(response["hits"]["hits"])

                msearch_action_response.append(response_hits)

            return msearch_action_response

        else:
            raise ValueError(
                "Invalid api selected. Must be of either bulk|msearch")

    def _create_id(self, string):
        return hashlib.sha1(string).hexdigest()

    def _add_item(self, id, doc):
        """
        Update a single document
        :param id: Dictionary containing document body and id in form
        {'document':{},'id':<sha1 hash of filepath>}
        """
        document = {'doc': doc, 'doc_as_upsert': True}

        self.es.update(index=self.index, id=id, body=document)
Пример #12
0
 def __init__(self, **kwargs):
     self.es = CEDAElasticsearchClient(**kwargs)
Пример #13
0
class ElasticsearchConnection:
    """
    Wrapper class to handle the connection with Elasticsearch.
    Uses the `CEDAElasticsearchClient <https://github.com/cedadev/ceda-elasticsearch-tools>`_
    """

    def __init__(self, **kwargs):
        self.es = CEDAElasticsearchClient(**kwargs)

    def get_hits(self, index, query=None):
        return scan(self.es, query=query, index=index)

    def get_query(self, extensions, path, excludes=[]):
        query_base = {
            "_source": {
                "exclude": ["info.phenomena"]
            },
            "query": {
                "bool": {
                    "must": [
                        {
                            "match_phrase_prefix": {
                                "info.directory.analyzed": path
                            }
                        }
                    ],
                    "must_not": [],
                    "filter": []
                }
            }
        }

        for ext in extensions:
            filter = {
                "term": {
                    "info.type.keyword": ext
                }
            }

            query_base["query"]["bool"]["filter"].append(filter)

        for exclusion in excludes:
            query_base["query"]["bool"]["must_not"].append(exclusion)

        return query_base

    def bulk(self, iterator, *args, generator=False):
        if generator:
                bulk(self.es, iterator(*args), refresh=True)
        else:
            bulk(self.es, iterator, refresh=True)

    def count(self, *args, **kwargs):
        return self.es.count(*args, **kwargs).get('count')

    def mget(self, *args, **kwargs):
        return self.es.mget(*args, **kwargs)

    def search(self, *args, **kwargs):
        return self.es.search(*args, **kwargs)

    def create_collections_index(self, index):
        
        return self.es.indices.create(index=index, ignore=400, body={
            "mappings": {
                "properties": {
                    "time_frame": {
                        "type": "date_range"
                    },
                    "bbox": {
                        "properties": {
                            "coordinates": {
                                "type": "geo_point"
                            }
                        }
                    }
                }
            }
        })
def get_elasticsearch_client(**kwargs):
    """
    Returns and Elasticsearch client object.
    Used to abstract the precise client implementation.
    """
    return CEDAElasticsearchClient(**kwargs)
 def __init__(self, index=settings.ELASTICSEARCH_INDEX):
     self.index = index
     self.collection_index = settings.ELASTICSEARCH_COLLECTION_INDEX
     self.es = CEDAElasticsearchClient(
         **settings.ELASTICSEARCH_CONNECTION_PARAMS)
Пример #16
0
class ElasticsearchUpdater(object):
    """
    Class to handle updates to the elasticsearch index.
    """
    def __init__(self, index, host, port):
        """
        Creates an elasticsearch connection. Default host and port specified.

        :param index: The elasticsearch index to connect to.
        :param host: The elasticsearch host address.
        :param port: The read/write elasticsearch port.
        """

        self.es = CEDAElasticsearchClient()
        self.index = index

    def make_bulk_update(self, bulk_json):
        """
        Use the ES bulk API to make a bulk update to the ES index specified by the object.

        :param bulk_json: JSON to execute in the bulk request.
        :return: Status of the transaction.
        """
        if bulk_json:
            result = self.es.bulk(index=self.index, body=bulk_json)
            return {
                "took": result["took"],
                "errors": result["errors"],
                "docs_changed": len(result["items"])
            }
        else:
            return {
                "took": 0,
                "errors": "True",
                "error_msg": "No JSON submitted for updates",
                "docs_changed": 0
            }

    def check_files_existence(self,
                              param_func,
                              query_tmpl,
                              file_list=[],
                              raw_resp=False,
                              threshold=800):
        """
        Given a list of files in the archive, return a dictionary containing files from archive which are present in
        the given index; dict["True"] and those which are not; dict["False"].

        :param param_func: function which returns the parameters needed when rendering the query
        :param query_tmpl: The template to contruct the elasticsearch query
        :param file_list: List of real file paths.
        :param raw_resp: Boolean to state whether to include the ES response in the return dict or not.
        :param threshold: Limit for Elasticsearch msearch API call.

        :return: A dict comprising two lists. Files from the supplied list present in given ES index and those not.
                dict{"True"[List of the files provided which are indexed],"False":[List of files provided not indexed]}
        """

        # Set defaults
        file_in_index = {"True": [], "False": []}

        # Return if no file list has been provided
        if not file_list:
            return file_in_index

        msearch_query_list = self.gen_msearch_json(query_tmpl,
                                                   param_func,
                                                   file_list,
                                                   blocksize=threshold)

        file_in_index = self._get_and_process_results(msearch_query_list,
                                                      file_list, threshold,
                                                      file_in_index, raw_resp)

        return file_in_index

    def gen_msearch_json(self, querytemp, paramfunc, input_list, blocksize):
        """
        Takes a list and creates an Elasticsearch msearch query with the desired blocksize.
        The query is passed in using querytemp and the paramfunc defines the parameters which
        will be rendered to produce the final query.

        :param querytemp: Template query JSON
        :param paramfunc: Function which returns the parameters needed in the querytemp. eg.

                          def params(item)
                                return {"dirname": os.path.dirname(item), "filename":os.path.filename(item)}

                          This should be passed in without brackets.
        :param input_list: List to turn into a query.
        :param blocksize: Number of files to include in each msearch query.

        :return: List with each element containing a JSON msearch query which has been chopped so that the number of
                 objects in the query matches blocksize.
        """
        msearch_json = ""
        query_list = []

        for i, item in enumerate(input_list, 1):
            params = paramfunc(item)

            index = json.dumps({}) + "\n"
            search_query = self._render_query(querytemp, params) + "\n"

            msearch_json += index + search_query

            if i % blocksize == 0:
                query_list.append(msearch_json)
                msearch_json = ""

        if msearch_json:
            query_list.append(msearch_json)

        return query_list

    @staticmethod
    def _render_query(query, parameters):
        """
        Renders parameters into JSON for elasticsearch query.
        Templated variables are in the format <var1>

        :param query: The query template with dynamic vars with format <var1>
        :param parameters: Dictionary containing key, value pairs for the template eg. {"var1":"Test string"}
                           would replace <var1> in the query template.

        :return: Returns a JSON string with variables templated in.
        """
        m = re.findall('<\w+>', query)
        for match in m:
            param = parameters[match.strip('<>')]
            query = query.replace(match, param)

        return query

    def _get_and_process_results(self, msearchquery_list, file_list, blocksize,
                                 output, raw_resp):
        """
        Generate a True False dict of filepaths contained in the index from a suppled file list.
        If raw_resp = True, return the elasticsearch document in the output.

        If the query doesn't get a hit. Just return the filepath.

        :param msearchquery_list: A list containing msearch query JSON split into blocks.
        :param file_list: List of filepaths to test.
        :param blocksize: Max number of files included in each query.
        :param output: the output dictionary

        :return: True False dict of file paths in given index: self.index
        """

        scroll_count = 0
        for mquery in msearchquery_list:

            results = self.es.msearch(index=self.index,
                                      body=mquery,
                                      request_timeout=240)

            if results:
                for i, response in enumerate(results["responses"]):
                    if raw_resp:
                        # Append the raw ElasticSearch response where there is data.
                        if response["hits"]["total"] == 1:
                            output["True"].append(response["hits"]["hits"])

                        if response["hits"]["total"] == 0:
                            output["False"].append(
                                file_list[i + (blocksize * scroll_count)])

                    else:
                        # Append the filepath
                        if response["hits"]["total"] == 1:
                            output["True"].append(
                                file_list[i + (blocksize * scroll_count)])

                        if response["hits"]["total"] == 0:
                            output["False"].append(
                                file_list[i + (blocksize * scroll_count)])

            scroll_count += 1

        return output

    def update_location(self,
                        file_list,
                        params,
                        search_query,
                        on_disk,
                        threshold=800):
        """
        Currently only works with the ceda-eo and fbs indexes.

        :param file_list: List of file paths to update
        :param params: function which returns parameters
        :param search_query: query used to search index to check existence
        :param on_disk: Boolean. Sets location value to on_disk or on_tape

        :return: List of files which were not found in ES index
        """

        # set location
        if on_disk:
            location = "on_disk"
        else:
            location = "on_tape"

        # Check if files are in the provided index
        index_test = self.check_files_existence(param_func=params,
                                                query_tmpl=search_query,
                                                file_list=file_list,
                                                raw_resp=True)

        # Only update those files which are contained in the target index.
        files_to_update = index_test["True"]
        print(f"Files to update: {len(index_test['True'])}")

        if len(files_to_update) == 0:
            return "No files to update"

        # create update json and update location
        update_json = ""
        result = []
        updated_files = 0

        for i, file in enumerate(files_to_update, 1):
            id = file[0]["_id"]

            if self.index == "ceda-eo":
                index = json.dumps(
                    {"update": {
                        "_id": id,
                        "_type": "geo_metadata"
                    }}) + "\n"
                location_field = json.dumps(
                    {"source": {
                        "doc": {
                            "file": {
                                "location": location
                            }
                        }
                    }}) + "\n"
                update_json += index + location_field
                updated_files += 1

            else:
                if file[0]["_source"]["info"]["location"] != location:
                    index = json.dumps(
                        {"update": {
                            "_id": id,
                            "_type": "file"
                        }}) + "\n"
                    location_field = json.dumps(
                        {"source": {
                            "doc": {
                                "info": {
                                    "location": location
                                }
                            }
                        }}) + "\n"
                    update_json += index + location_field
                    updated_files += 1

            if i % threshold == 0:
                if update_json:
                    result.append(self.make_bulk_update(update_json))
                update_json = ""

        # Clean up any remaining updates
        result.append(self.make_bulk_update(update_json))

        summary_string = f"Processed {len(file_list)} files. " \
                         "Updated '{self.index}' index. " \
                         "Updated {updated_files} files. " \
                         "{len(index_test['False'])} files not in target index"

        return index_test["False"], summary_string

    def update_md5(self, spot_name, spot_path, threshold=800):

        logger = logging.getLogger(__name__)
        logging.getLogger('elasticsearch').setLevel(logging.WARNING)

        spotlog = MD5LogFile(spot_name, spot_path)
        file_list = spotlog.as_list()

        logger.debug(f"Spot: {spot_path} contains {len(spotlog)} files.")

        param_func, query_tmpl = ElasticsearchQuery.ceda_fbs()
        result = self.check_files_existence(param_func,
                                            query_tmpl,
                                            file_list,
                                            raw_resp=True,
                                            threshold=threshold)

        # return len(result["True"])

        logger.info(
            "Spot: {}. Files in index: {}. Files not in: {}. Percentage in: {}%"
            .format(spot_path, len(result["True"]), len(result["False"]),
                    utils.percent(len(spotlog), len(result["True"]))))

        files_in = result["True"]

        # Check md5s
        update_total = 0
        md5_json = ""

        try:
            for file in files_in:
                file_info = file[0]["_source"]["info"]
                filepath = os.path.join(file_info["directory"],
                                        file_info["name"])

                if file_info["md5"] != spotlog.get_md5(filepath):
                    update_total += 1

                    id = file[0]["_id"]
                    index = json.dumps(
                        {"update": {
                            "_id": id,
                            "_type": "file"
                        }}) + "\n"
                    md5_field = json.dumps({
                        "source": {
                            "doc": {
                                "info": {
                                    "md5": spotlog.get_md5(filepath)
                                }
                            }
                        }
                    }) + "\n"
                    md5_json += index + md5_field

                if update_total > threshold:
                    self.make_bulk_update(md5_json)
                    md5_json = ""
                    update_total = 0

            if md5_json:
                self.make_bulk_update(md5_json)

        except Exception as msg:
            logger.error(msg)
class ElasticsearchConnection:
    """
    Elasticsearch Connection class. Uses `CEDAElasticsearchClient <https://github.com/cedadev/ceda-elasticsearch-tools>`_

    :param index: files index (default: settings.ELASTICSEARCH_INDEX)
    :type index: str

    """
    def __init__(self, index=settings.ELASTICSEARCH_INDEX):
        self.index = index
        self.collection_index = settings.ELASTICSEARCH_COLLECTION_INDEX
        self.es = CEDAElasticsearchClient(
            **settings.ELASTICSEARCH_CONNECTION_PARAMS)

    def search(self, query):
        """
        Search the files index

        :param query: Elasticsearch file query
        :type query: dict

        :return: Elasticsearch response
        :rtype: dict
        """
        return self.es.search(index=self.index, body=query)

    def search_collections(self, query):
        """
        Search the collections index

        :param query: Elasticsearch collection query
        :type query: dict

        :return: Elasticsearch response
        :rtype: dict
        """
        return self.es.search(index=self.collection_index, body=query)

    def count(self, query):
        """
        Return the hit count from the current file query

        :param query: Elasticsearch file query
        :type query: dict

        :return: Elasticsearch count response
        :rtype: dict
        """
        return self.es.count(index=self.index, body=query)

    def count_collections(self, query):
        """
        Return the hit count from the current collection query

        :param query: Elasticsearch collection query
        :type query: dict

        :return: Elasticsearch count response
        :rtype: dict
        """
        return self.es.count(index=self.collection_index, body=query)
Пример #18
0
from dachar.utils.get_stores import (
    get_fix_store,
    get_fix_prop_store,
    get_dc_store,
    get_ar_store,
)

from tests._stores_for_tests import (
    _TestFixProposalStore,
    _TestFixStore,
    _TestAnalysisStore,
    _TestDatasetCharacterStore,
)

es = CEDAElasticsearchClient(headers={"x-api-key": ELASTIC_API_TOKEN})

# es.indices.delete(index="roocs-char-test", ignore=[400, 404])
# print(es.indices.exists("roocs-char-test"))
# es.indices.create("roocs-char-test")

date = datetime.today().strftime("%Y-%m-%d")

# character store
char_name = CONFIG['elasticsearch']["character_store"]
# analysis store
a_name = CONFIG['elasticsearch']["analysis_store"]
# fix store
fix_name = CONFIG['elasticsearch']["fix_store"]
# fix proposal store
fix_prop_name = CONFIG['elasticsearch']["fix_proposal_store"]