def test_object_constructor(): expected = { "type": "object", "properties": { "inner_int": { "type": "integer" } } } class Inner(InnerDoc): inner_int = field.Integer() obj_from_doc = field.Object(doc_class=Inner) assert obj_from_doc.to_dict() == expected obj_from_props = field.Object(properties={"inner_int": field.Integer()}) assert obj_from_props.to_dict() == expected with pytest.raises(ValidationException): field.Object(doc_class=Inner, properties={"inner_int": field.Integer()}) with pytest.raises(ValidationException): field.Object(doc_class=Inner, dynamic=False)
class Book(document.DocType): # authors = field.Nested(properties={'name': field.String(),'url':field.String(index='not_analyzed')}) authors_name = field.String() authors_url = field.String(index='not_analyzed') average = field.Float() cover = field.String(index='not_analyzed') description = field.String() outlinks = field.String(index='not_analyzed') ratings = field.Integer() reviews = field.Integer() title = field.String() url = field.String(index='not_analyzed') # userreviews = field.Nested(properties={'userName': field.String(), 'userReview': field.String(), 'userReviewDate': field.String(index='not_analyzed'), 'userURL': field.String(index='not_analyzed')}) userreviews_userName = field.String() userreviews_userReview = field.String() userreviews_userReviewDate = field.String(index='not_analyzed') userreviews_userURL = field.String(index='not_analyzed') def add_authors(self, authors): self.authors_name = [author['name'] for author in authors] self.authors_url = [author['url'] for author in authors] def add_userreviews(self, reviews): self.userreviews_userName = [rev['userName'] for rev in reviews] self.userreviews_userReview = [rev['userReview'] for rev in reviews] self.userreviews_userReviewDate = [ rev['userReviewDate'] for rev in reviews ] self.userreviews_userURL = [rev['userURL'] for rev in reviews] class Meta: doc_type = 'book' index = 'book-index'
def prepare_doc(self): _fields, _map = {}, {} for idx, _f in enumerate(self.schema['fields'], 1): alias_name = _f['name'] field_name = 'col{}'.format(idx) _field = self._schema2doc_map[_f['type']] _map[field_name] = alias_name _fields[field_name] = _field if self.has_geo_data: _fields['shape'] = dsl_field.GeoShape() _fields['point'] = dsl_field.GeoPoint() _fields['label'] = dsl_field.Text() _fields['shape_type'] = dsl_field.Integer() _fields['resource'] = dsl_field.Nested( properties={ 'id': dsl_field.Integer(), 'title': dsl_field.Text(analyzer=polish_analyzer, fields={'raw': dsl_field.Keyword()}) }) _fields['updated_at'] = dsl_field.Date() _fields['row_no'] = dsl_field.Long() _fields['Index'] = type('Index', (type, ), {'name': self.idx_name}) doc = type(self.idx_name, (Document, ), _fields) doc._doc_type.mapping._meta['_meta'] = {'headers': _map} return doc
def test_object_constructor(): expected = { 'type': 'object', 'properties': { 'inner_int': { 'type': 'integer' } } } class Inner(InnerDoc): inner_int = field.Integer() obj_from_doc = field.Object(doc_class=Inner) assert obj_from_doc.to_dict() == expected obj_from_props = field.Object(properties={'inner_int': field.Integer()}) assert obj_from_props.to_dict() == expected with pytest.raises(ValidationException): field.Object(doc_class=Inner, properties={'inner_int': field.Integer()}) with pytest.raises(ValidationException): field.Object(doc_class=Inner, dynamic=False)
class Document(DocType): id = field.Integer() title = field.String(analyzer='snowball'), author = field.String(analyzer='snowball'), creation_date = field.Date(), pages = field.Integer(), content = field.String(analyzer='snowball'), lang = field.String(), size = field.Integer(), tags = field.String(index='not_analyzed') autocomplete = field.Text(analyzer = ngram_analyzer)
def doc(self): if not self._doc_cache: _fields, _map = {}, {} for idx, _f in enumerate(self.schema['fields']): alias_name = _f['name'] field_name = 'col{}'.format(idx + 1) _field = _schema2doc_map[_f['type']] _map[field_name] = alias_name _fields[field_name] = _field _fields['resource'] = dsl_field.Nested( properties={ 'id': dsl_field.Integer(), 'title': dsl_field.Text( analyzer=polish_analyzer, fields={'raw': dsl_field.Keyword()}) } ) _fields['updated_at'] = dsl_field.Date() _fields['row_no'] = dsl_field.Long() doc = type(self.idx_name, (DocType,), _fields) doc._doc_type.index = self.idx_name doc._doc_type.mapping._meta['_meta'] = {'headers': _map} doc._doc_type.mapping._meta['_meta'] self._doc_cache = doc return self._doc_cache
class Manga(Document): title = field.Text() title = field.Text(analyzer=titles, multi=True, fields={ 'space': field.Text(analyzer=titles_space, multi=True), 'keyword': field.Keyword(multi=True), }) tags = field.Object(Tag) upload_at = field.Date() scan_at = field.Date() url = field.Keyword() cover_url = field.Keyword() images_urls = field.Keyword(multi=True) images_len = field.Integer() class Index: name = 'nhentai__mangas' settings = {'number_of_shards': 2, 'number_of_replicas': 1} @classmethod def url_is_scaned(cls, url): logger.info(f"buscando manga {url}") if cls.search().filter("term", url=url).count() > 0: return True return False
class Mapping(Content.Mapping): # NOTE: parent is set to integer so DJES doesn't recurse parent = field.Integer() data = field.Object() class Meta: # Necessary to allow for our data field to store appropriately in Elasticsearch. # A potential alternative could be storing as a string., we should assess the value. dynamic = False
class Book(document.DocType): """ This is the document type class for objects of type book for using with Elasticsearch DSL using Python """ authors_name = field.String() authors_url = field.String(index='not_analyzed') average = field.Float() cover = field.String(index='not_analyzed') description = field.String() outlinks = field.String(index='not_analyzed') ratings = field.Integer() reviews = field.Integer() title = field.String() url = field.String(index='not_analyzed') userreviews_userName = field.String() userreviews_userReview = field.String() userreviews_userReviewDate = field.String(index='not_analyzed') userreviews_userURL = field.String(index='not_analyzed') class Meta: """ meta data for the Book document type """ doc_type = 'book' index = 'book-index' def add_authors(self, authors): """ This method will add all of the authors from the authors list of dicts to Book authors = json['authors'] """ self.authors_name = [author['name'] for author in authors] self.authors_url = [author['url'] for author in authors] def add_userreviews(self, reviews): """ This method will add all of the user reviews from the user reviews list of dicts to Book reviews = json['userreviews'] """ self.userreviews_userName = [rev['userName'] for rev in reviews] self.userreviews_userReview = [rev['userReview'] for rev in reviews] self.userreviews_userReviewDate = [rev['userReviewDate'] for rev in reviews] self.userreviews_userURL = [rev['userURL'] for rev in reviews]
def prepare_doc(self): _fields = { 'shape': dsl_field.GeoShape(), 'point': dsl_field.GeoPoint(), 'shape_type': dsl_field.Integer(), 'label': dsl_field.Text(), 'resource': dsl_field.Nested( properties={ 'id': dsl_field.Integer(), 'title': dsl_field.Text(analyzer=polish_analyzer, fields={'raw': dsl_field.Keyword()}) }), 'updated_at': dsl_field.Date(), 'row_no': dsl_field.Long() } _map = {} for idx, _f in enumerate(self.schema, 1): if _f.type not in self._schema2doc_map: continue alias_name = _f.name field_name = f'col{idx}' _field = self._schema2doc_map[_f.type] _map[field_name] = alias_name _fields[field_name] = _field _fields['Index'] = type('Index', (type, ), {'name': self.idx_name}) doc = type(self.idx_name, (Document, ), _fields) doc._doc_type.mapping._meta['_meta'] = {'headers': _map} return doc
class EventDoc(SerializedDoc): name = field.String() description = field.String( analyzer=html_strip, fields={'raw': field.String(index='not_analyzed')} ) event_type = field.Object(properties={'name': field.String(fields={'raw': field.String(index='not_analyzed')})}) start_year = field.Integer() places = field.Nested( doc_class=PlaceDoc, properties={'location_display': field.String(fields={'raw': field.String(index='not_analyzed')})} ) def get_display_name(self): return self.name
class CompanyDocType(DocType): date_of_creation = FormattedDate(date_format='%Y-%m-%d') description = field.Text() employees = field.Text() facebook_url = field.Text() pk = field.Integer() keywords = field.Text() linkedin_url = field.Text() logo = field.Text() has_single_sector = field.Boolean() modified = FormattedDate(date_format='%Y-%m-%dT%H:%M:%S.%fZ') name = field.Text() number = field.Text() sectors = field.Text(multi=True) sectors_label = field.Text(multi=True) slug = field.Text() summary = field.Text() twitter_url = field.Text() website = field.Text() supplier_case_studies = field.Nested( properties={ 'pk': field.Integer(), 'title': field.Text(), 'short_summary': field.Text(), 'description': field.Text(), 'sector': field.Text(), 'keywords': field.Text(), 'image_one_caption': field.Text(), 'image_two_caption': field.Text(), 'image_three_caption': field.Text(), 'testimonial': field.Text(), 'slug': field.Text(), }) class Meta: index = 'company'
class InitiativeDoc(SerializedDoc): identifier = field.String() name = field.String() principal_agent = field.Nested(multi=False, properties={'name': field.String()}) member_countries = field.Nested(doc_class=CountryDoc) geographic_scope = field.Nested( doc_class=CountryDoc, properties={ 'name': field.String(fields={'raw': field.String(index='not_analyzed')}) } ) initiative_type = field.Object(properties={'name': field.String(fields={'raw': field.String(index='not_analyzed')})}) start_year = field.Integer() def get_display_name(self): return self.name
class OrganizationDoc(SerializedDoc): name = field.String() description = field.String( analyzer=html_strip, fields={'raw': field.String(index='not_analyzed')} ) mission = field.String() countries = field.Nested(doc_class=CountryDoc) headquarters_location = field.String(fields={'raw': field.String(index='not_analyzed')}) scope_of_operations = field.String( multi=True, fields={'raw': field.String(index='not_analyzed')} ) start_year = field.Integer() def get_display_name(self): return self.name
class CaseStudyInnerDoc(InnerDoc): wildcard = field.Text() pk = field.Integer(index=False) title = field.Text(copy_to='wildcard') short_summary = field.Text(copy_to='wildcard') description = field.Text(copy_to='wildcard') sector = field.Text(copy_to='wildcard') keywords = field.Text(copy_to='wildcard') image = field.Text(index=False) company_number = field.Text(index=False) image_one_caption = field.Text(copy_to='wildcard') image_two_caption = field.Text(copy_to='wildcard') image_three_caption = field.Text(copy_to='wildcard') testimonial = field.Text(copy_to='wildcard') testimonial_name = field.Keyword(copy_to='wildcard') testimonial_job_title = field.Text(copy_to='wildcard') slug = field.Text(index=False)
class ProjectDoc(SerializedDoc): identifier = field.String() name = field.String() alternate_name = field.String() description = field.String( analyzer=html_strip, fields={'raw': field.String(index='not_analyzed')} ) status = field.String(fields={'raw': field.String(index='not_analyzed')}) start_year = field.Integer() countries = field.Nested( doc_class=CountryDoc, # project_location aggregation/facet uses the raw multifield properties={ 'name': field.String(fields={'raw': field.String(index='not_analyzed')}) } ) infrastructure_type = field.Object( properties={'name': field.String(fields={'raw': field.String(index='not_analyzed')})} ) # Providing a doc_class for initiatives produced errors, so keep it simple! initiatives = field.Nested(properties={'name': field.String()}) funding = field.Object( multi=True, properties={ 'sources': field.Object( multi=True, properties={ 'name': field.String(fields={'raw': field.String(index='not_analyzed')}), } ) } ) regions = field.Nested( doc_class=RegionDoc, properties={ 'name': field.String(fields={'raw': field.String(index='not_analyzed')}) } ) def get_display_name(self): return self.name
class AnswerDocument(QuestionDocument): """ ES document for Answers. Every Answer in DB gets an AnswerDocument in ES. Child class to QuestionDocument, with fields here un-prefixed. This document defines the answer-specific fields which are included in an AnswerDocument in addition to the de-normalized fields of an Answer's Question which are defined in QuestionDocument. Since QuestionDocument and AnswerDocument are stored in the same index, ES sees QuestionDocuments and AnswerDocuments the same, just with some documents missing certain fields. Enables aggregations on answers, such as when creating contribution metrics, and enables searching within an AAQ thread, or on Answer-specific properties like being a solution. """ creator_id = field.Keyword() created = field.Date() content = SumoLocaleAwareTextField(term_vector="with_positions_offsets") updated = field.Date() updated_by_id = field.Keyword() num_helpful_votes = field.Integer() num_unhelpful_votes = field.Integer() is_solution = field.Boolean() @classmethod def prepare(cls, instance, **kwargs): """Override super method to exclude certain docs.""" # Add a discard field in the document if the following conditions are met # Answer document is spam if instance.is_spam or instance.question.is_spam: instance.es_discard_doc = "unindex_me" obj = super().prepare(instance, **kwargs) # add a prefix to the id so we don't clash with QuestionDocuments obj.meta.id = "a_{}".format(obj.meta.id) return obj def prepare_is_solution(self, instance): solution_id = instance.question.solution_id return solution_id is not None and solution_id == instance.id def prepare_locale(self, instance): return instance.question.locale def prepare_num_helpful_votes(self, instance): if hasattr(instance, "es_num_helpful_votes"): return instance.es_num_helpful_votes return instance.num_helpful_votes def prepare_num_unhelpful_votes(self, instance): if hasattr(instance, "es_num_unhelpful_votes"): return instance.es_num_unhelpful_votes return instance.num_unhelpful_votes def prepare_answer_content(self, instance): # clear answer_content field from QuestionDocument, # as we don't need the content of sibling answers in an AnswerDocument return None def get_field_value(self, field, instance, *args): if field.startswith("question_"): instance = instance.question return super().get_field_value(field, instance, *args) def to_action(self, *args, **kwargs): # if the id is un-prefixed, add it if not str(self.meta.id).startswith("a_"): self.meta.id = f"a_{self.meta.id}" return super().to_action(*args, **kwargs) @classmethod def get(cls, id, **kwargs): # if the id is un-prefixed, add it if not str(id).startswith("a_"): id = f"a_{id}" return super().get(id, **kwargs) @classmethod def get_model(cls): return Answer @classmethod def get_queryset(cls): return ( Answer.objects # prefetch each answer's question, # applying the same optimizations as in the QuestionDocument .prefetch_related( Prefetch("question", queryset=QuestionDocument.get_queryset())) # count votes in db to improve performance .annotate( es_num_helpful_votes=Count("votes", filter=Q(votes__helpful=True)), es_num_unhelpful_votes=Count("votes", filter=Q(votes__helpful=False)), ))
class QuestionDocument(SumoDocument): """ ES document for Questions. Every Question in DB gets a QuestionDocument in ES. Parent class to AnswerDocument, with most fields here prefixed with "question_". This document defines the question-specific fields (most of) which are de-normalized in the AnswerDocument. Since QuestionDocument and AnswerDocument are stored in the same index, ES sees QuestionDocuments and AnswerDocuments the same, just with some documents missing certain fields. Enables searching for AAQ threads as a unit. """ question_id = field.Keyword() question_title = SumoLocaleAwareTextField() question_creator_id = field.Keyword() question_content = SumoLocaleAwareTextField( term_vector="with_positions_offsets") question_created = field.Date() question_updated = field.Date() question_updated_by_id = field.Keyword() question_has_solution = field.Boolean() question_is_locked = field.Boolean() question_is_archived = field.Boolean() question_product_id = field.Keyword() question_topic_id = field.Keyword() question_taken_by_id = field.Keyword() question_taken_until = field.Date() question_tag_ids = field.Keyword(multi=True) question_num_votes = field.Integer() # store answer content to optimise searching for AAQ threads as a unit answer_content = SumoLocaleAwareTextField( multi=True, term_vector="with_positions_offsets") locale = field.Keyword() class Index: name = config.QUESTION_INDEX_NAME using = config.DEFAULT_ES7_CONNECTION @classmethod def prepare(cls, instance): """Override super method to exclude certain docs.""" # Add a discard field in the document if the following conditions are met # Question document is spam if instance.is_spam: instance.es_discard_doc = "unindex_me" return super(QuestionDocument, cls).prepare(instance) def prepare_question_tag_ids(self, instance): return [tag.id for tag in instance.tags.all()] def prepare_question_has_solution(self, instance): return instance.solution_id is not None def prepare_question_num_votes(self, instance): if hasattr(instance, "es_question_num_votes"): return instance.es_question_num_votes return instance.num_votes def prepare_answer_content(self, instance): return [ answer.content for answer in ( # when bulk indexing use answer queryset prefetched in `get_queryset` method # this is to avoid running an extra query for each question in the chunk instance.es_question_answers_not_spam if hasattr( instance, "es_question_answers_not_spam") # fallback if non-spam answers haven't been prefetched else instance.answers.filter(is_spam=False)) ] def get_field_value(self, field, *args): if field.startswith("question_"): field = field[len("question_"):] return super().get_field_value(field, *args) @classmethod def get_model(cls): return Question @classmethod def get_queryset(cls): return ( Question.objects # prefetch answers which aren't spam to avoid extra queries when iterating over them .prefetch_related( Prefetch( "answers", queryset=Answer.objects.filter(is_spam=False), to_attr="es_question_answers_not_spam", )) # prefetch tags to avoid extra queries when iterating over them .prefetch_related("tags") # count votes in db to improve performance .annotate(es_question_num_votes=Count("votes")))
class CompanyDocument(Document): wildcard = field.Text(analyzer=american_english_analyzer) casestudy_wildcard = field.Text(analyzer=american_english_analyzer) keyword_wildcard = field.Keyword() case_study_count = field.Integer() date_of_creation = field.Date(index=False) description = field.Text( copy_to='wildcard', analyzer=american_english_analyzer ) has_description = field.Boolean() employees = field.Keyword(index=False, store=True) facebook_url = field.Keyword(index=False, store=True) pk = field.Integer(index=False) keywords = field.Text(copy_to='wildcard') linkedin_url = field.Keyword(index=False, store=True) logo = field.Keyword(index=False, store=True) has_single_sector = field.Boolean() modified = field.Date(index=False) ordering_name = field.Keyword() name = field.Text(copy_to=['wildcard', 'ordering_name']) number = field.Keyword(copy_to='keyword_wildcard',) sectors = field.Keyword(multi=True, copy_to='keyword_wildcard', store=True) sectors_label = field.Keyword( multi=True, copy_to='keyword_wildcard', store=True ) expertise_industries = field.Keyword( multi=True, copy_to='keyword_wildcard', store=True ) expertise_regions = field.Keyword( multi=True, copy_to='keyword_wildcard', store=True ) expertise_languages = field.Keyword( multi=True, copy_to='keyword_wildcard', store=True ) expertise_countries = field.Keyword( multi=True, copy_to='keyword_wildcard', store=True ) # Represents Dict as it's the primitive datatype for this field expertise_products_services = field.Object() expertise_products_services_labels = field.Keyword( multi=True, copy_to='keyword_wildcard', store=True ) expertise_labels = field.Keyword( multi=True, copy_to='keyword_wildcard', store=True ) slug = field.Keyword(copy_to='keyword_wildcard', store=True) summary = field.Text( copy_to='wildcard', analyzer=american_english_analyzer ) twitter_url = field.Keyword(index=False, store=True) website = field.Keyword(copy_to='keyword_wildcard', store=True) supplier_case_studies = field.Nested( properties={ 'pk': field.Integer(index=False), 'title': field.Text(copy_to='casestudy_wildcard'), 'short_summary': field.Text(copy_to='casestudy_wildcard'), 'description': field.Text(copy_to='casestudy_wildcard'), 'sector': field.Keyword(copy_to='keyword_wildcard', store=True), 'keywords': field.Text(copy_to='casestudy_wildcard'), 'image_one_caption': field.Text(copy_to='casestudy_wildcard'), 'image_two_caption': field.Text(copy_to='casestudy_wildcard'), 'image_three_caption': field.Text(copy_to='casestudy_wildcard'), 'testimonial': field.Text(copy_to='casestudy_wildcard'), 'website': field.Keyword(copy_to='casestudy_wildcard', store=True), 'slug': field.Keyword(copy_to='keyword_wildcard', store=True), 'testimonial_name': field.Keyword( copy_to='casestudy_wildcard', store=True ), 'testimonial_company': field.Text(copy_to='casestudy_wildcard'), 'testimonial_job_title': field.Text(copy_to='casestudy_wildcard'), } ) is_showcase_company = field.Boolean() is_published_investment_support_directory = field.Boolean() is_published_find_a_supplier = field.Boolean() class Meta: index = settings.ELASTICSEARCH_COMPANY_INDEX_ALIAS
class Mapping: pk = field.Integer() title = field.String(analyzer="snowball", _boost=2.0) slug = field.String(index="not_analyzed") status = field.String(index="not_analyzed") thumbnail_override = ElasticsearchImageField()
class Inner(InnerDoc): inner_int = field.Integer()
def test_integer(): f = field.Integer() assert f.deserialize('42') == 42 assert f.deserialize(None) is None with pytest.raises(ValueError): assert f.deserialize('not_an_integer')
class Dump_elastic_model(Document): numeric_1 = field.Integer() numeric_2 = field.Integer()
class Ssn_issued(InnerDoc): state = field.Keyword() year = field.Integer() date = field.Date()
POS: "ADJ", 'OP': '*' }, { POS: 'NOUN', 'OP': "+" }]), ('pnp', [{ POS: "ADJ", "OP": "*" }, { POS: "PROPN", "OP": "+" }])] PD2ES_TYPES = { np.dtype('O'): field.Text(), np.dtype('int64'): field.Integer(), np.dtype('float64'): field.Double(), np.dtype('<M8[ns]'): field.Date(), np.dtype('bool'): field.Boolean() } def load_spacy_model(): nlp = spacy.load('en_core_web_lg') nlp.tokenizer.infix_finditer = re.compile(r'[~\-_]').finditer return nlp def make_matcher(nlp, patterns=PATTERNS): matcher = spacy.matcher.Matcher(nlp.vocab) for _id, pattern in PATTERNS:
class QuestionDocument(SumoDocument): """ ES document for Questions. Every Question in DB gets a QuestionDocument in ES. Parent class to AnswerDocument, with most fields here prefixed with "question_". This document defines the question-specific fields (most of) which are de-normalized in the AnswerDocument. Since QuestionDocument and AnswerDocument are stored in the same index, ES sees QuestionDocuments and AnswerDocuments the same, just with some documents missing certain fields. Enables searching for AAQ threads as a unit. """ question_id = field.Keyword() question_title = SumoLocaleAwareTextField() question_creator_id = field.Keyword() question_content = SumoLocaleAwareTextField( term_vector="with_positions_offsets") question_created = field.Date() question_updated = field.Date() question_updated_by_id = field.Keyword() question_has_solution = field.Boolean() question_is_locked = field.Boolean() question_is_archived = field.Boolean() question_is_spam = field.Boolean() question_marked_as_spam = field.Date() question_marked_as_spam_by_id = field.Keyword() question_product_id = field.Keyword() question_topic_id = field.Keyword() question_taken_by_id = field.Keyword() question_taken_until = field.Date() question_tag_ids = field.Keyword(multi=True) question_num_votes = field.Integer() # store answer content to optimise searching for AAQ threads as a unit answer_content = SumoLocaleAwareTextField( multi=True, term_vector="with_positions_offsets") locale = field.Keyword() class Index: name = config.QUESTION_INDEX_NAME using = config.DEFAULT_ES7_CONNECTION def prepare_question_tag_ids(self, instance): return [tag.id for tag in instance.tags.all()] def prepare_question_has_solution(self, instance): return instance.solution_id is not None def prepare_question_num_votes(self, instance): if hasattr(instance, "es_question_num_votes"): return instance.es_question_num_votes return instance.num_votes def prepare_answer_content(self, instance): return [answer.content for answer in instance.answers.all()] def get_field_value(self, field, *args): if field.startswith("question_"): field = field[len("question_"):] return super().get_field_value(field, *args) @classmethod def get_model(cls): return Question @classmethod def get_queryset(cls): return (Question.objects.prefetch_related("answers") # prefetch tags to avoid extra queries when iterating over them .prefetch_related("tags") # count votes in db to improve performance .annotate(es_question_num_votes=Count("votes")))