コード例 #1
0
ファイル: backends.py プロジェクト: alaminopu/django-hesab
    def __init__(self, connection_alias, **connection_options):
        super(ElasticsearchSearchBackend, self).__init__(connection_alias, **connection_options)

        # Update DEFAULT_SETTINGS with custom settings
        self.DEFAULT_SETTINGS.update(**merge_dict(self.DEFAULT_SETTINGS, es_settings.INDEX_SETTINGS))

        # Update FIELD_MAPPINGS with custom field mappings
        FIELD_MAPPINGS.update(**merge_dict(FIELD_MAPPINGS, es_settings.FIELD_MAPPINGS))
    def build_schema(self, fields, language):
        """
        Build the index schema for the given field. New argument language.
        :param fields:
        :param language: the language code
        :return: a dictionary wit the field name (string) and the
                 mapping configuration (dictionary)
        """
        content_field_name = ''
        mapping = {
            DJANGO_CT: {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False},
            DJANGO_ID: {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False},
        }

        for field_name, field_class in fields.items():
            field_mapping = FIELD_MAPPINGS.get(
                field_class.field_type, DEFAULT_FIELD_MAPPING).copy()
            if field_class.boost != 1.0:
                field_mapping['boost'] = field_class.boost

            if field_class.document is True:
                content_field_name = field_class.index_fieldname

            # Do this last to override `text` fields.
            if field_mapping['type'] == 'string':
                # Use the language analyzer for text fields.
                if field_mapping['analyzer'] == DEFAULT_FIELD_MAPPING['analyzer']:
                    field_mapping['analyzer'] = get_analyzer_for(language)
                if field_class.indexed is False or hasattr(field_class, 'facet_for'):
                    field_mapping['index'] = 'not_analyzed'
                    del field_mapping['analyzer']

            mapping[field_class.index_fieldname] = field_mapping

        return content_field_name, mapping
コード例 #3
0
    def build_schema(self, fields):
        content_field_name = ''
        mapping = {
            DJANGO_CT: {
                'type': 'keyword',
                'index': 'not_analyzed',
                'include_in_all': False
            },
            DJANGO_ID: {
                'type': 'keyword',
                'index': 'not_analyzed',
                'include_in_all': False
            },
        }

        for field_name, field_class in fields.items():
            field_mapping = FIELD_MAPPINGS.get(field_class.field_type,
                                               DEFAULT_FIELD_MAPPING).copy()
            if field_class.boost != 1.0:
                field_mapping['boost'] = field_class.boost

            if field_class.document is True:
                content_field_name = field_class.index_fieldname

            # Do this last to override `text` fields.
            if field_mapping['type'] == 'string':
                if field_class.indexed is False or hasattr(
                        field_class, 'facet_for'):
                    field_mapping['index'] = 'not_analyzed'
                    del field_mapping['analyzer']

            mapping[field_class.index_fieldname] = field_mapping

        return (content_field_name, mapping)
コード例 #4
0
    def build_schema(self, fields):
        content_field_name = ''
        mapping = {
            DJANGO_CT: {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False},
            DJANGO_ID: {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False},
        }

        for field_name, field_class in fields.items():
            field_mapping = FIELD_MAPPINGS.get(
                field_class.field_type, self.DEFAULT_FIELD_MAPPING).copy()
            if field_class.boost != 1.0:
                field_mapping['boost'] = field_class.boost

            if field_class.document is True:
                content_field_name = field_class.index_fieldname

            # Do this last to override `text` fields.
            if field_mapping['type'] == 'string':
                if field_class.indexed is False or hasattr(field_class, 'facet_for'):
                    field_mapping['index'] = 'not_analyzed'
                    del field_mapping['analyzer']

            mapping[field_class.index_fieldname] = field_mapping

        return (content_field_name, mapping)
コード例 #5
0
    def build_schema(self, indexes):
        """Build Elasticsearch schema.

        :param indexes: Dictionary of model -> index.
        :type indexes: dict
        :return: Schema.
        :rtype: dict
        """
        schema = {}

        for model, index in indexes.iteritems():
            mapping_properties = {
                DJANGO_CT: {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False},
                DJANGO_ID: {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False},
            }
            for field_name, field_class in index.fields.items():
                field_mapping = FIELD_MAPPINGS.get(field_class.field_type, DEFAULT_FIELD_MAPPING).copy()
                if field_class.boost != 1.0:
                    field_mapping['boost'] = field_class.boost

                if field_class.stored is True:
                    field_mapping['store'] = True

                # Do this last to override `text` fields.
                if field_mapping['type'] == 'string':
                    if field_class.indexed is False or hasattr(field_class, 'facet_for') or getattr(field_class, 'is_multivalued', False):
                        field_mapping['index'] = 'not_analyzed'
                        try:
                            del field_mapping['analyzer']
                            del field_mapping['term_vector']
                        except:
                            pass

                    elif field_class.field_type not in ('ngram', 'edge_ngram'):

                        # Check analyzer attribute
                        if not hasattr(field_class, 'analyzer') or field_class.analyzer is None:
                            logger.warning("Set default analyzer for field {}".format(field_name))
                        field_mapping['index'] = 'analyzed'
                        field_mapping['analyzer'] = getattr(field_class, 'analyzer', self.DEFAULT_ANALYZER)

                        # Check term_vector attribute
                        if hasattr(field_class, 'term_vector') and field_class.term_vector is not None:
                            field_mapping['term_vector'] = field_class.term_vector

                mapping_properties[field_class.index_fieldname] = field_mapping

            mapping_type = {
                'properties': mapping_properties,
                '_boost': {'name': 'boost', 'null_value': 1.0},
            }

            schema[get_model_ct(model)] = mapping_type

        return schema
コード例 #6
0
try:
    import elasticsearch
    if not ((5, 0, 0) <= elasticsearch.__version__ < (6, 0, 0)):
        raise ImportError
    from elasticsearch.helpers import bulk, scan
except ImportError:
    raise MissingDependency("The 'elasticsearch5' backend requires the \
                            installation of 'elasticsearch>=5.0.0,<6.0.0'. \
                            Please refer to the documentation.")

DEFAULT_FIELD_MAPPING = {'type': 'text', 'analyzer': 'snowball'}
FIELD_MAPPINGS.update({
    'text': {
        'type': 'text',
        'analyzer': 'snowball'
    },
    'keyword': {
        'type': 'keyword'
    }
})


class Elasticsearch5SearchBackend(ElasticsearchSearchBackend):
    def __init__(self, connection_alias, **connection_options):
        super(Elasticsearch5SearchBackend,
              self).__init__(connection_alias, **connection_options)
        self.content_field_name = None

    def clear(self, models=None, commit=True):
        """
        Clears the backend of all documents/objects for a collection of models.