def map_layers(request, entitytypeid='all', get_centroids=False): data = [] geom_param = request.GET.get('geom', None) bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) entityids = request.GET.get('entityid', '') geojson_collection = {"type": "FeatureCollection", "features": []} se = SearchEngineFactory().create() query = Query(se, limit=limit) args = {'index': 'maplayers'} if entitytypeid != 'all': args['doc_type'] = entitytypeid if entityids != '': for entityid in entityids.split(','): geojson_collection['features'].append( se.search(index='maplayers', id=entityid)['_source']) return JSONResponse(geojson_collection) if get_centroids: # If we are just fetching the centroids, we can do a slightly optimised query by having elasticsearch pull out relevant fields args['fields'] = [ 'properties.centroid.coordinates', 'type', '_source.id' ] data = query.search(**args) geojson_collection['features'] = [{ "geometry": { "type": "Point", "coordinates": item['fields']['properties.centroid.coordinates'] }, "type": "Feature", "id": item['_id'] } for item in data['hits']['hits']] else: # We need the full data for each record data = query.search(**args) for item in data['hits']['hits']: if geom_param != None: item['_source']['geometry'] = item['_source']['properties'][ geom_param] item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop(geom_param, None) else: item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('centroid', None) geojson_collection['features'].append(item['_source']) return JSONResponse(geojson_collection)
def get_related_resources(self, lang='en-US', limit=1000, start=0): """ Returns an object that lists the related resources, the relationship types, and a reference to the current resource """ ret = { 'resource_instance': self, 'resource_relationships': [], 'related_resources': [] } se = SearchEngineFactory().create() query = Query(se, limit=limit, start=start) bool_filter = Bool() bool_filter.should(Terms(field='resourceinstanceidfrom', terms=self.resourceinstanceid)) bool_filter.should(Terms(field='resourceinstanceidto', terms=self.resourceinstanceid)) query.add_query(bool_filter) resource_relations = query.search(index='resource_relations', doc_type='all') ret['total'] = resource_relations['hits']['total'] instanceids = set() for relation in resource_relations['hits']['hits']: relation['_source']['preflabel'] = get_preflabel_from_valueid(relation['_source']['relationshiptype'], lang) ret['resource_relationships'].append(relation['_source']) instanceids.add(relation['_source']['resourceinstanceidto']) instanceids.add(relation['_source']['resourceinstanceidfrom']) if len(instanceids) > 0: instanceids.remove(str(self.resourceinstanceid)) related_resources = se.search(index='resource', doc_type='_all', id=list(instanceids)) if related_resources: for resource in related_resources['docs']: ret['related_resources'].append(resource['_source']) return ret
def get_restricted_instances(user, search_engine=None, allresources=False): if allresources is False and user.is_superuser is True: return [] if allresources is True: restricted_group_instances = { perm["object_pk"] for perm in GroupObjectPermission.objects.filter(permission__codename="no_access_to_resourceinstance").values("object_pk") } restricted_user_instances = { perm["object_pk"] for perm in UserObjectPermission.objects.filter(permission__codename="no_access_to_resourceinstance").values("object_pk") } all_restricted_instances = list(restricted_group_instances | restricted_user_instances) return all_restricted_instances else: terms = Terms(field="permissions.users_with_no_access", terms=[str(user.id)]) query = Query(search_engine, start=0, limit=settings.SEARCH_RESULT_LIMIT) has_access = Bool() nested_term_filter = Nested(path="permissions", query=terms) has_access.must(nested_term_filter) query.add_query(has_access) results = query.search(index=RESOURCES_INDEX, scroll="1m") scroll_id = results["_scroll_id"] total = results["hits"]["total"]["value"] if total > settings.SEARCH_RESULT_LIMIT: pages = total // settings.SEARCH_RESULT_LIMIT for page in range(pages): results_scrolled = query.se.es.scroll(scroll_id=scroll_id, scroll="1m") results["hits"]["hits"] += results_scrolled["hits"]["hits"] restricted_ids = [res["_id"] for res in results["hits"]["hits"]] return restricted_ids
def get_relations(resourceinstanceid, start, limit): query = Query(se, start=start, limit=limit) bool_filter = Bool() bool_filter.should(Terms(field="resourceinstanceidfrom", terms=resourceinstanceid)) bool_filter.should(Terms(field="resourceinstanceidto", terms=resourceinstanceid)) query.add_query(bool_filter) return query.search(index="resource_relations")
def get_preflabel_from_conceptid(conceptid, lang): ret = None default = { "category": "", "conceptid": "", "language": "", "value": "", "type": "", "id": "" } se = SearchEngineFactory().create() query = Query(se) terms = Terms(field='conceptid', terms=[conceptid]) match = Match(field='type', query='preflabel', type='phrase') query.add_filter(terms) query.add_query(match) preflabels = query.search(index='concept_labels')['hits']['hits'] for preflabel in preflabels: default = preflabel['_source'] # get the label in the preferred language, otherwise get the label in the default language if preflabel['_source']['language'] == lang: return preflabel['_source'] if preflabel['_source']['language'].split('-')[0] == lang.split( '-')[0]: ret = preflabel['_source'] if preflabel['_source'][ 'language'] == settings.LANGUAGE_CODE and ret == None: ret = preflabel['_source'] return default if ret == None else ret
def find_overlapping(request): '''This function queries ES when called via Ajax when a new geometry is created in the Location tab. If pre-existing resources are found within the perimeter of the polygon (or the buffered zone around a point/line/polygon), an alert is raised.''' geomString = request.GET.get('geom', '') geom = GEOSGeometry(geomString, srid=4326) mindistance = settings.METER_RADIUS if not mindistance: mindistance = 1000 # if settings.METER_RADIUS isn't set, default to 1Km geom.transform(3857) buffered_geom = geom.buffer(mindistance) buffered_geom.transform(4326) print geom, buffered_geom se = SearchEngineFactory().create() query = Query(se) boolfilter = Bool() geoshape = GeoShape(field='geometries.value', type=buffered_geom.geom_type, coordinates=buffered_geom.coords) nested = Nested(path='geometries', query=geoshape) boolfilter.must(nested) query.add_filter(boolfilter) results = query.search(index='entity', doc_type='') overlaps = [] for hit in results['hits']['hits']: overlaps.append({ 'id': hit['_id'], 'type': hit['_type'], 'primaryname': hit['_source']['primaryname'] }) return JSONResponse(overlaps)
def get_relations(resourceinstanceid, start, limit, resourceinstance_graphid=None): query = Query(se, start=start, limit=limit) bool_filter = Bool() bool_filter.should( Terms(field="resourceinstanceidfrom", terms=resourceinstanceid)) bool_filter.should( Terms(field="resourceinstanceidto", terms=resourceinstanceid)) if resourceinstance_graphid: graph_id_filter = Bool() graph_id_filter.should( Terms(field="resourceinstancefrom_graphid", terms=resourceinstance_graphid)) graph_id_filter.should( Terms(field="resourceinstanceto_graphid", terms=resourceinstance_graphid)) bool_filter.must(graph_id_filter) query.add_query(bool_filter) return query.search(index=RESOURCE_RELATIONS_INDEX)
def delete(self, *args, **kwargs): se = SearchEngineFactory().create() request = kwargs.pop("request", None) provisional_edit_log_details = kwargs.pop("provisional_edit_log_details", None) for tile in self.tiles: tile.delete(*args, request=request, **kwargs) try: user = request.user user_is_reviewer = user_is_resource_reviewer(user) except AttributeError: # no user user = None user_is_reviewer = True if user_is_reviewer is True or self.user_owns_provisional(user): query = Query(se) bool_query = Bool() bool_query.filter(Terms(field="tileid", terms=[self.tileid])) query.add_query(bool_query) results = query.search(index="terms")["hits"]["hits"] for result in results: se.delete(index="terms", id=result["_id"]) self.__preDelete(request) self.save_edit( user=request.user, edit_type="tile delete", old_value=self.data, provisional_edit_log_details=provisional_edit_log_details ) super(Tile, self).delete(*args, **kwargs) resource = Resource.objects.get(resourceinstanceid=self.resourceinstance.resourceinstanceid) resource.index() else: self.apply_provisional_edit(user, data={}, action="delete") super(Tile, self).save(*args, **kwargs)
def get_preflabel_from_conceptid(conceptid, lang): ret = None default = { "category": "", "conceptid": "", "language": "", "value": "", "type": "", "id": "" } se = SearchEngineFactory().create() query = Query(se) terms = Terms(field='conceptid', terms=[conceptid]) match = Match(field='type', query='preflabel', type='phrase') query.add_filter(terms) query.add_query(match) preflabels = query.search(index='concept_labels')['hits']['hits'] for preflabel in preflabels: default = preflabel['_source'] # get the label in the preferred language, otherwise get the label in the default language if preflabel['_source']['language'] == lang: return preflabel['_source'] if preflabel['_source']['language'].split('-')[0] == lang.split('-')[0]: ret = preflabel['_source'] if preflabel['_source']['language'] == settings.LANGUAGE_CODE and ret == None: ret = preflabel['_source'] return default if ret == None else ret
def get_related_resources(resourceid, lang, limit=1000, start=0): ret = { 'resource_relationships': [], 'related_resources': [] } se = SearchEngineFactory().create() query = Query(se, limit=limit, start=start) query.add_filter(Terms(field='entityid1', terms=resourceid).dsl, operator='or') query.add_filter(Terms(field='entityid2', terms=resourceid).dsl, operator='or') resource_relations = query.search(index='resource_relations', doc_type='all') ret['total'] = resource_relations['hits']['total'] entityids = set() for relation in resource_relations['hits']['hits']: relation['_source']['preflabel'] = get_preflabel_from_valueid(relation['_source']['relationshiptype'], lang) ret['resource_relationships'].append(relation['_source']) entityids.add(relation['_source']['entityid1']) entityids.add(relation['_source']['entityid2']) if len(entityids) > 0: entityids.remove(resourceid) related_resources = se.search(index='entity', doc_type='_all', id=list(entityids)) if related_resources: for resource in related_resources['docs']: ret['related_resources'].append(resource['_source']) return ret
def get_preflabel_from_conceptid(conceptid, lang): ret = None default = { "category": "", "conceptid": "", "language": "", "value": "", "type": "", "id": "" } se = SearchEngineFactory().create() query = Query(se) terms = Terms(field='conceptid', terms=[conceptid]) # Uncomment the following line only after having reindexed ElasticSearch cause currently the Arabic labels are indexed as altLabels # match = Match(field='type', query='prefLabel', type='phrase') query.add_filter(terms) # Uncomment the following line only after having reindexed ElasticSearch cause currently the Arabic labels are indexed as altLabels # query.add_query(match) preflabels = query.search(index='concept_labels')['hits']['hits'] for preflabel in preflabels: # print 'Language at this point %s and label language %s and ret is %s' % (lang, preflabel['_source']['language'], ret) default = preflabel['_source'] # get the label in the preferred language, otherwise get the label in the default language if preflabel['_source']['language'] == lang: # print 'prefLabel from Conceptid: %s' % preflabel['_source'] return preflabel['_source'] if preflabel['_source']['language'].split('-')[0] == lang.split( '-')[0]: ret = preflabel['_source'] if preflabel['_source']['language'] == lang and ret == None: ret = preflabel['_source'] return default if ret == None else ret
def get_preflabel_from_conceptid(conceptid, lang): ret = None default = { "category": "", "conceptid": "", "language": "", "value": "", "type": "", "id": "" } se = SearchEngineFactory().create() query = Query(se) terms = Terms(field='conceptid', terms=[conceptid]) # Uncomment the following line only after having reindexed ElasticSearch cause currently the Arabic labels are indexed as altLabels # match = Match(field='type', query='prefLabel', type='phrase') query.add_filter(terms) # Uncomment the following line only after having reindexed ElasticSearch cause currently the Arabic labels are indexed as altLabels # query.add_query(match) preflabels = query.search(index='concept_labels')['hits']['hits'] for preflabel in preflabels: # print 'Language at this point %s and label language %s and ret is %s' % (lang, preflabel['_source']['language'], ret) default = preflabel['_source'] # get the label in the preferred language, otherwise get the label in the default language if preflabel['_source']['language'] == lang: # print 'prefLabel from Conceptid: %s' % preflabel['_source'] return preflabel['_source'] if preflabel['_source']['language'].split('-')[0] == lang.split('-')[0]: ret = preflabel['_source'] if preflabel['_source']['language'] == lang and ret == None: ret = preflabel['_source'] return default if ret == None else ret
def delete(self, *args, **kwargs): se = SearchEngineFactory().create() request = kwargs.pop('request', None) provisional_edit_log_details = kwargs.pop('provisional_edit_log_details', None) for tile in self.tiles: tile.delete(*args, request=request, **kwargs) try: user = request.user user_is_reviewer = request.user.groups.filter(name='Resource Reviewer').exists() except AttributeError: #no user user = None if user_is_reviewer is True or self.user_owns_provisional(user): query = Query(se) bool_query = Bool() bool_query.filter(Terms(field='tileid', terms=[self.tileid])) query.add_query(bool_query) results = query.search(index='terms')['hits']['hits'] for result in results: se.delete(index='terms', id=result['_id']) self.__preDelete(request) self.save_edit( user=request.user, edit_type='tile delete', old_value=self.data, provisional_edit_log_details=provisional_edit_log_details) super(Tile, self).delete(*args, **kwargs) resource = Resource.objects.get(resourceinstanceid=self.resourceinstance.resourceinstanceid) resource.index() else: self.apply_provisional_edit(user, data={}, action='delete') super(Tile, self).save(*args, **kwargs)
def search_terms(request): lang = request.GET.get('lang', settings.LANGUAGE_CODE) se = SearchEngineFactory().create() searchString = request.GET.get('q', '') user_is_reviewer = request.user.groups.filter(name='Resource Reviewer').exists() i = 0 ret = {} for index in ['terms', 'concepts']: query = Query(se, start=0, limit=0) boolquery = Bool() boolquery.should(Match(field='value', query=searchString.lower(), type='phrase_prefix')) boolquery.should(Match(field='value.folded', query=searchString.lower(), type='phrase_prefix')) boolquery.should(Match(field='value.folded', query=searchString.lower(), fuzziness='AUTO', prefix_length=settings.SEARCH_TERM_SENSITIVITY)) if user_is_reviewer is False and index == 'terms': boolquery.filter(Terms(field='provisional', terms=['false'])) query.add_query(boolquery) base_agg = Aggregation(name='value_agg', type='terms', field='value.raw', size=settings.SEARCH_DROPDOWN_LENGTH, order={"max_score": "desc"}) nodegroupid_agg = Aggregation(name='nodegroupid', type='terms', field='nodegroupid') top_concept_agg = Aggregation(name='top_concept', type='terms', field='top_concept') conceptid_agg = Aggregation(name='conceptid', type='terms', field='conceptid') max_score_agg = MaxAgg(name='max_score', script='_score') top_concept_agg.add_aggregation(conceptid_agg) base_agg.add_aggregation(max_score_agg) base_agg.add_aggregation(top_concept_agg) base_agg.add_aggregation(nodegroupid_agg) query.add_aggregation(base_agg) ret[index] = [] results = query.search(index=index) for result in results['aggregations']['value_agg']['buckets']: if len(result['top_concept']['buckets']) > 0: for top_concept in result['top_concept']['buckets']: top_concept_id = top_concept['key'] top_concept_label = get_preflabel_from_conceptid(top_concept['key'], lang)['value'] for concept in top_concept['conceptid']['buckets']: ret[index].append({ 'type': 'concept', 'context': top_concept_id, 'context_label': top_concept_label, 'id': i, 'text': result['key'], 'value': concept['key'] }) i = i + 1 else: ret[index].append({ 'type': 'term', 'context': '', 'context_label': get_resource_model_label(result), 'id': i, 'text': result['key'], 'value': result['key'] }) i = i + 1 return JSONResponse(ret)
def get_auto_filter(request): lang = request.GET.get('lang', settings.LANGUAGE_CODE) se1 = SearchEngineFactory().create() searchString1 = settings.PUBLISHED_LABEL query1 = Query(se1, start=0, limit=settings.SEARCH_DROPDOWN_LENGTH) boolquery1 = Bool() boolquery1.should(Match(field='term', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), fuzziness='AUTO')) query1.add_query(boolquery1) results1 = query1.search(index='term', doc_type='value') conceptid1 = '' context1 = '' for result1 in results1['hits']['hits']: prefLabel = get_preflabel_from_conceptid(result1['_source']['context'], lang) result1['_source']['options']['context_label'] = prefLabel['value'] if (prefLabel['value'] == settings.EW_STATUS_TERM and result1['_source']['term'] == settings.PUBLISHED_LABEL) : conceptid1 = result1['_source']['options']['conceptid'] context1 = result1['_source']['context'] AUTO_TERM_FILTER = {"inverted": False, "type": "concept"} AUTO_TERM_FILTER["text"] = settings.PUBLISHED_LABEL AUTO_TERM_FILTER["value"] = conceptid1 AUTO_TERM_FILTER["context"] = context1 AUTO_TERM_FILTER["context_label"] = settings.EW_STATUS_TERM AUTO_TERM_FILTER["id"] = AUTO_TERM_FILTER['text'] + conceptid1 return AUTO_TERM_FILTER
def delete(self, user={}, note=''): """ Deletes a single resource and any related indexed data """ se = SearchEngineFactory().create() related_resources = self.get_related_resources(lang="en-US", start=0, limit=1000) for rr in related_resources['resource_relationships']: models.ResourceXResource.objects.get(pk=rr['resourcexid']).delete() query = Query(se) bool_query = Bool() bool_query.filter( Terms(field='resourceinstanceid', terms=[self.resourceinstanceid])) query.add_query(bool_query) results = query.search(index='strings', doc_type='term')['hits']['hits'] for result in results: se.delete(index='strings', doc_type='term', id=result['_id']) se.delete(index='resource', doc_type=str(self.graph_id), id=self.resourceinstanceid) self.save_edit(edit_type='delete') super(Resource, self).delete()
def get_related_resources(resourceid, lang, limit=1000, start=0): ret = {'resource_relationships': [], 'related_resources': []} se = SearchEngineFactory().create() query = Query(se, limit=limit, start=start) query.add_filter(Terms(field='entityid1', terms=resourceid).dsl, operator='or') query.add_filter(Terms(field='entityid2', terms=resourceid).dsl, operator='or') resource_relations = query.search(index='resource_relations', doc_type='all') ret['total'] = resource_relations['hits']['total'] entityids = set() for relation in resource_relations['hits']['hits']: relation['_source']['preflabel'] = get_preflabel_from_valueid( relation['_source']['relationshiptype'], lang) ret['resource_relationships'].append(relation['_source']) entityids.add(relation['_source']['entityid1']) entityids.add(relation['_source']['entityid2']) if len(entityids) > 0: entityids.remove(resourceid) related_resources = se.search(index='entity', doc_type='_all', id=list(entityids)) if related_resources: for resource in related_resources['docs']: ret['related_resources'].append(resource['_source']) return ret
def get_relations(resourceinstanceid, start, limit): query = Query(se, start=start, limit=limit) bool_filter = Bool() bool_filter.should(Terms(field='resourceinstanceidfrom', terms=resourceinstanceid)) bool_filter.should(Terms(field='resourceinstanceidto', terms=resourceinstanceid)) query.add_query(bool_filter) return query.search(index='resource_relations', doc_type='all')
def get_preflabel_from_conceptid(conceptid, lang): ret = None default = { "category": "", "conceptid": "", "language": "", "value": "", "type": "", "id": "" } query = Query(se) bool_query = Bool() bool_query.must(Match(field="type", query="prefLabel", type="phrase")) bool_query.filter(Terms(field="conceptid", terms=[conceptid])) query.add_query(bool_query) preflabels = query.search(index=CONCEPTS_INDEX)["hits"]["hits"] for preflabel in preflabels: default = preflabel["_source"] # get the label in the preferred language, otherwise get the label in the default language if preflabel["_source"]["language"] == lang: return preflabel["_source"] if preflabel["_source"]["language"].split("-")[0] == lang.split( "-")[0]: ret = preflabel["_source"] if preflabel["_source"][ "language"] == settings.LANGUAGE_CODE and ret is None: ret = preflabel["_source"] return default if ret is None else ret
def arch_investigation_layer(request, boundtype=''): data = [] geom_param = request.GET.get('geom', None) bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) geojson_collection = { "type": "FeatureCollection", "features": [] } se = SearchEngineFactory().create() query = Query(se, limit=limit) args = { 'index':'entity', 'doc_type':'ARCHAEOLOGICAL_ZONE.E53', } data = query.search(**args) for item in data['hits']['hits']: for geom in item['_source']['geometries']: if geom['entitytypeid'] == 'SHOVEL_TEST_GEOMETRY.E47': print json.dumps(geom,indent=2) feat = { 'geometry':geom['value'], 'type':"Feature", 'id':item['_source']['entityid'], } geojson_collection['features'].append(feat) return JSONResponse(geojson_collection)
def get_resource_bounds(node): query = Query(se, start=0, limit=0) search_query = Bool() query.add_query(search_query) query.add_aggregation(GeoBoundsAgg(field='points.point', name='bounds')) results = query.search(index='resource', doc_type=[str(node.graph.pk)]) bounds = results['aggregations']['bounds']['bounds'] if 'bounds' in results['aggregations']['bounds'] else None return bounds
def search_terms(request): lang = request.GET.get('lang', settings.LANGUAGE_CODE) se = SearchEngineFactory().create() searchString = request.GET.get('q', '') query = Query(se, start=0, limit=0) user_is_reviewer = request.user.groups.filter(name='Resource Reviewer').exists() boolquery = Bool() boolquery.should(Match(field='value', query=searchString.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery.should(Match(field='value.folded', query=searchString.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery.should(Match(field='value.folded', query=searchString.lower(), fuzziness='AUTO')) if user_is_reviewer is False: boolquery.filter(Terms(field='provisional', terms=['false'])) query.add_query(boolquery) base_agg = Aggregation(name='value_agg', type='terms', field='value.raw', size=settings.SEARCH_DROPDOWN_LENGTH, order={"max_score": "desc"}) nodegroupid_agg = Aggregation(name='nodegroupid', type='terms', field='nodegroupid') top_concept_agg = Aggregation(name='top_concept', type='terms', field='top_concept') conceptid_agg = Aggregation(name='conceptid', type='terms', field='conceptid') max_score_agg = MaxAgg(name='max_score', script='_score') top_concept_agg.add_aggregation(conceptid_agg) base_agg.add_aggregation(max_score_agg) base_agg.add_aggregation(top_concept_agg) base_agg.add_aggregation(nodegroupid_agg) query.add_aggregation(base_agg) results = query.search(index='strings') or {'hits': {'hits':[]}} i = 0; ret = [] for result in results['aggregations']['value_agg']['buckets']: if len(result['top_concept']['buckets']) > 0: for top_concept in result['top_concept']['buckets']: top_concept_id = top_concept['key'] top_concept_label = get_preflabel_from_conceptid(top_concept['key'], lang)['value'] for concept in top_concept['conceptid']['buckets']: ret.append({ 'type': 'concept', 'context': top_concept_id, 'context_label': top_concept_label, 'id': i, 'text': result['key'], 'value': concept['key'] }) i = i + 1 else: ret.append({ 'type': 'term', 'context': '', 'context_label': get_resource_model_label(result), 'id': i, 'text': result['key'], 'value': result['key'] }) i = i + 1 return JSONResponse(ret)
def delete(self, user={}, note=""): """ Deletes a single resource and any related indexed data """ permit_deletion = False graph = models.GraphModel.objects.get(graphid=self.graph_id) if graph.isactive is False: message = _("This model is not yet active; unable to delete.") raise ModelInactiveError(message) if user != {}: user_is_reviewer = user_is_resource_reviewer(user) if user_is_reviewer is False: tiles = list(models.TileModel.objects.filter(resourceinstance=self)) resource_is_provisional = True if sum([len(t.data) for t in tiles]) == 0 else False if resource_is_provisional is True: permit_deletion = True else: permit_deletion = True else: permit_deletion = True if permit_deletion is True: related_resources = self.get_related_resources(lang="en-US", start=0, limit=1000, page=0) for rr in related_resources["resource_relationships"]: # delete any related resource entries, also reindex the resource that references this resource that's being deleted try: resourceXresource = models.ResourceXResource.objects.get(pk=rr["resourcexid"]) resource_to_reindex = ( resourceXresource.resourceinstanceidfrom_id if resourceXresource.resourceinstanceidto_id == self.resourceinstanceid else resourceXresource.resourceinstanceidto_id ) resourceXresource.delete(deletedResourceId=self.resourceinstanceid) res = Resource.objects.get(pk=resource_to_reindex) res.load_tiles() res.index() except ObjectDoesNotExist: se.delete(index=RESOURCE_RELATIONS_INDEX, id=rr["resourcexid"]) query = Query(se) bool_query = Bool() bool_query.filter(Terms(field="resourceinstanceid", terms=[self.resourceinstanceid])) query.add_query(bool_query) results = query.search(index=TERMS_INDEX)["hits"]["hits"] for result in results: se.delete(index=TERMS_INDEX, id=result["_id"]) se.delete(index=RESOURCES_INDEX, id=self.resourceinstanceid) try: self.save_edit(edit_type="delete", user=user, note=self.displayname) except: pass super(Resource, self).delete() return permit_deletion
def get_relations(resourceinstanceid, start, limit): query = Query(se, limit=limit, start=start) bool_filter = Bool() bool_filter.should( Terms(field='resourceinstanceidfrom', terms=resourceinstanceid)) bool_filter.should( Terms(field='resourceinstanceidto', terms=resourceinstanceid)) query.add_query(bool_filter) return query.search(index='resource_relations', doc_type='all')
def search_terms(request): lang = request.GET.get('lang', settings.LANGUAGE_CODE) se = SearchEngineFactory().create() searchString = request.GET.get('q', '') query = Query(se, start=0, limit=0) boolquery = Bool() boolquery.should(Match(field='value', query=searchString.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery.should(Match(field='value.folded', query=searchString.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery.should(Match(field='value.folded', query=searchString.lower(), fuzziness='AUTO')) query.add_query(boolquery) base_agg = Aggregation(name='value_agg', type='terms', field='value.raw', size=settings.SEARCH_DROPDOWN_LENGTH, order={"max_score": "desc"}) nodegroupid_agg = Aggregation(name='nodegroupid', type='terms', field='nodegroupid') top_concept_agg = Aggregation(name='top_concept', type='terms', field='top_concept') conceptid_agg = Aggregation(name='conceptid', type='terms', field='conceptid') max_score_agg = MaxAgg(name='max_score', script='_score') top_concept_agg.add_aggregation(conceptid_agg) base_agg.add_aggregation(max_score_agg) base_agg.add_aggregation(top_concept_agg) base_agg.add_aggregation(nodegroupid_agg) query.add_aggregation(base_agg) results = query.search(index='strings') or {'hits': {'hits':[]}} i = 0; ret = [] for result in results['aggregations']['value_agg']['buckets']: if len(result['top_concept']['buckets']) > 0: for top_concept in result['top_concept']['buckets']: top_concept_id = top_concept['key'] top_concept_label = get_preflabel_from_conceptid(top_concept['key'], lang)['value'] for concept in top_concept['conceptid']['buckets']: ret.append({ 'type': 'concept', 'context': top_concept_id, 'context_label': top_concept_label, 'id': i, 'text': result['key'], 'value': concept['key'] }) i = i + 1 else: ret.append({ 'type': 'term', 'context': '', 'context_label': '', 'id': i, 'text': result['key'], 'value': result['key'] }) i = i + 1 return JSONResponse(ret)
def get_related_resources(resourceid, lang, limit=1000, start=0, allowedtypes=[], is_anon=False): ret = { 'resource_relationships': [], 'related_resources': [] } se = SearchEngineFactory().create() query = Query(se, limit=limit, start=start) query.add_filter(Terms(field='entityid1', terms=resourceid).dsl, operator='or') query.add_filter(Terms(field='entityid2', terms=resourceid).dsl, operator='or') resource_relations = query.search(index='resource_relations', doc_type="all") entityids = set() for relation in resource_relations['hits']['hits']: relation['_source']['preflabel'] = get_preflabel_from_valueid(relation['_source']['relationshiptype'], lang) ret['resource_relationships'].append(relation['_source']) entityids.add(relation['_source']['entityid1']) entityids.add(relation['_source']['entityid2']) if len(entityids) > 0: entityids.remove(resourceid) # can't figure why passing allowed types to doc_type param doesn't work, # so filter is carried out later related_resources = se.search(index='entity', doc_type='_all', id=list(entityids)) filtered_ids = [] if related_resources: for resource in related_resources['docs']: if not resource['_type'] in allowedtypes: filtered_ids.append(resource['_source']['entityid']) continue if is_anon: # filter out protected resources if user is anonymous # (this is basically a subset of the get_protected_entityids below # they should be combined probably) from search import get_protection_conceptids protect_id = get_protection_conceptids(settings.PROTECTION_LEVEL_NODE) conceptids = [d['conceptid'] for d in resource['_source']['domains']] if protect_id in conceptids: filtered_ids.append(resource['_source']['entityid']) continue ret['related_resources'].append(resource['_source']) if len(filtered_ids) > 0: # remove all relationships in ret that match a filtered id (this lc is yuge but I think concise) filtered_relationships = [rel for rel in ret['resource_relationships'] if not rel['entityid1'] in filtered_ids and not rel['entityid2'] in filtered_ids] # update ret values ret['resource_relationships'] = filtered_relationships ret['total'] = len(ret['resource_relationships']) return ret
def map_layers(request, entitytypeid='all', get_centroids=False): data = [] geom_param = request.GET.get('geom', None) bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) entityids = request.GET.get('entityid', '') geojson_collection = {"type": "FeatureCollection", "features": []} se = SearchEngineFactory().create() query = Query(se, limit=limit) args = {'index': 'maplayers'} if entitytypeid != 'all': args['doc_type'] = entitytypeid if entityids != '': for entityid in entityids.split(','): geojson_collection['features'].append( se.search(index='maplayers', id=entityid)['_source']) return JSONResponse(geojson_collection) data = query.search(**args) if not data: return JSONResponse({}) for item in data['hits']['hits']: # Ce uporabnik ni avtenticiran, prikazemo le veljavne (to je verjetno potrebno se dodelati (mogoce da vidijo le svoje???)!!!) if (not request.user.username != 'anonymous'): if (item['_source']['properties']['ewstatus'] != settings.PUBLISHED_LABEL): continue if get_centroids: item['_source']['geometry'] = item['_source']['properties'][ 'centroid'] #item['_source'].pop('properties', None) item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('elements', None) item['_source']['properties'].pop('entitytypeid', None) item['_source']['properties'].pop('constructions', None) item['_source']['properties'].pop('centroid', None) item['_source']['properties'].pop('ewstatus', None) item['_source']['properties'].pop('address', None) item['_source']['properties'].pop('designations', None) item['_source']['properties'].pop('primaryname', None) item['_source']['properties'].pop('resource_type', None) elif geom_param != None: item['_source']['geometry'] = item['_source']['properties'][ geom_param] item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop(geom_param, None) else: item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('centroid', None) geojson_collection['features'].append(item['_source']) return JSONResponse(geojson_collection)
def get_resource_bounds(node): query = Query(se, start=0, limit=0) search_query = Bool() query.add_query(search_query) query.add_aggregation( GeoBoundsAgg(field='points.point', name='bounds')) results = query.search(index='resource', doc_type=[str(node.graph_id)]) bounds = results['aggregations']['bounds'][ 'bounds'] if 'bounds' in results['aggregations'][ 'bounds'] else None return bounds
def delete_index(self, resourceinstanceid=None): """ Deletes all references to a resource from all indexes Keyword Arguments: resourceinstanceid -- the resource instance id to delete from related indexes, if supplied will use this over self.resourceinstanceid """ if resourceinstanceid is None: resourceinstanceid = self.resourceinstanceid resourceinstanceid = str(resourceinstanceid) # delete any related terms query = Query(se) bool_query = Bool() bool_query.filter( Terms(field="resourceinstanceid", terms=[resourceinstanceid])) query.add_query(bool_query) query.delete(index=TERMS_INDEX) # delete any related resource index entries query = Query(se) bool_query = Bool() bool_query.should( Terms(field="resourceinstanceidto", terms=[resourceinstanceid])) bool_query.should( Terms(field="resourceinstanceidfrom", terms=[resourceinstanceid])) query.add_query(bool_query) query.delete(index=RESOURCE_RELATIONS_INDEX) # reindex any related resources query = Query(se) bool_query = Bool() bool_query.filter( Nested(path="ids", query=Terms(field="ids.id", terms=[resourceinstanceid]))) query.add_query(bool_query) results = query.search(index=RESOURCES_INDEX)["hits"]["hits"] for result in results: try: res = Resource.objects.get(pk=result["_id"]) res.load_tiles() res.index() except ObjectDoesNotExist: pass # delete resource index se.delete(index=RESOURCES_INDEX, id=resourceinstanceid) # delete resources from custom indexes for index in settings.ELASTICSEARCH_CUSTOM_INDEXES: es_index = import_class_from_string(index["module"])(index["name"]) es_index.delete_resources(resources=self)
def map_layers(request, entitytypeid='all', get_centroids=False): data = [] geom_param = request.GET.get('geom', None) bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) entityids = request.GET.get('entityid', '') geojson_collection = { "type": "FeatureCollection", "features": [] } se = SearchEngineFactory().create() query = Query(se, limit=limit) args = { 'index': 'maplayers' } if entitytypeid != 'all': args['doc_type'] = entitytypeid if entityids != '': for entityid in entityids.split(','): geojson_collection['features'].append(se.search(index='maplayers', id=entityid)['_source']) return JSONResponse(geojson_collection) data = query.search(**args) if not data: return JSONResponse({}) for item in data['hits']['hits']: # Ce uporabnik ni avtenticiran, prikazemo le veljavne (to je verjetno potrebno se dodelati (mogoce da vidijo le svoje???)!!!) if (not request.user.username != 'anonymous'): if (item['_source']['properties']['ewstatus'] != settings.PUBLISHED_LABEL): continue if get_centroids: item['_source']['geometry'] = item['_source']['properties']['centroid'] #item['_source'].pop('properties', None) item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('elements', None) item['_source']['properties'].pop('entitytypeid', None) item['_source']['properties'].pop('constructions', None) item['_source']['properties'].pop('centroid', None) item['_source']['properties'].pop('ewstatus', None) item['_source']['properties'].pop('address', None) item['_source']['properties'].pop('designations', None) item['_source']['properties'].pop('primaryname', None) item['_source']['properties'].pop('resource_type', None) elif geom_param != None: item['_source']['geometry'] = item['_source']['properties'][geom_param] item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop(geom_param, None) else: item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('centroid', None) geojson_collection['features'].append(item['_source']) return JSONResponse(geojson_collection)
def map_layers(request, entitytypeid='all', get_centroids=False): data = [] geom_param = request.GET.get('geom', None) bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) entityids = request.GET.get('entityid', '') geojson_collection = { "type": "FeatureCollection", "features": [] } se = SearchEngineFactory().create() query = Query(se, limit=limit) args = { 'index': 'maplayers' } if entitytypeid != 'all': args['doc_type'] = entitytypeid if entityids != '': for entityid in entityids.split(','): geojson_collection['features'].append(se.search(index='maplayers', id=entityid)['_source']) return JSONResponse(geojson_collection) data = query.search(**args) # if anonymous user, get list of protected entity ids to be excluded from map protected = [] if request.user.username == 'anonymous': protected = get_protected_entityids() print protected for item in data['hits']['hits']: if item['_id'] in protected: print "hide this one" print json.dumps(item,indent=2) continue if get_centroids: item['_source']['geometry'] = item['_source']['properties']['centroid'] item['_source'].pop('properties', None) elif geom_param != None: item['_source']['geometry'] = item['_source']['properties'][geom_param] item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop(geom_param, None) else: item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('centroid', None) geojson_collection['features'].append(item['_source']) return JSONResponse(geojson_collection)
def delete(self, user={}, note=''): """ Deletes a single resource and any related indexed data """ permit_deletion = False graph = models.GraphModel.objects.get(graphid=self.graph_id) if graph.isactive is False: message = _('This model is not yet active; unable to delete.') raise ModelInactiveError(message) if user != {}: user_is_reviewer = user.groups.filter( name='Resource Reviewer').exists() if user_is_reviewer is False: tiles = list( models.TileModel.objects.filter(resourceinstance=self)) resource_is_provisional = True if sum( [len(t.data) for t in tiles]) == 0 else False if resource_is_provisional is True: permit_deletion = True else: permit_deletion = True else: permit_deletion = True if permit_deletion is True: se = SearchEngineFactory().create() related_resources = self.get_related_resources(lang="en-US", start=0, limit=1000, page=0) for rr in related_resources['resource_relationships']: models.ResourceXResource.objects.get( pk=rr['resourcexid']).delete() query = Query(se) bool_query = Bool() bool_query.filter( Terms(field='resourceinstanceid', terms=[self.resourceinstanceid])) query.add_query(bool_query) results = query.search(index='terms')['hits']['hits'] for result in results: se.delete(index='terms', id=result['_id']) se.delete(index='resources', id=self.resourceinstanceid) self.save_edit(edit_type='delete', user=user, note=self.displayname) super(Resource, self).delete() return permit_deletion
def get_related_resources(resourceid, lang='en-US', limit=1000, start=0): ret = {'resource_relationships': [], 'related_resources': []} se = SearchEngineFactory().create() query = Query(se, limit=limit, start=start) query.add_filter(Terms(field='entityid1', terms=resourceid).dsl, operator='or') resource_relations = query.search(index='resource_relations', doc_type='all') ret['total'] = resource_relations['hits']['total'] for relation in resource_relations['hits']['hits']: ret['resource_relationships'].append(relation['_source']) return ret
def time_wheel_config(request): se = SearchEngineFactory().create() query = Query(se, limit=0) query.add_aggregation(MinAgg(field='dates', format='y')) query.add_aggregation(MaxAgg(field='dates', format='y')) results = query.search(index='resource') if results is not None and results['aggregations']['min_dates']['value'] is not None and results['aggregations']['max_dates']['value'] is not None: min_date = int(results['aggregations']['min_dates']['value_as_string']) max_date = int(results['aggregations']['max_dates']['value_as_string']) # round min and max date to the nearest 1000 years min_date = math.ceil(math.fabs(min_date)/1000)*-1000 if min_date < 0 else math.floor(min_date/1000)*1000 max_date = math.floor(math.fabs(max_date)/1000)*-1000 if max_date < 0 else math.ceil(max_date/1000)*1000 query = Query(se, limit=0) for millennium in range(int(min_date),int(max_date)+1000,1000): min_millenium = millennium max_millenium = millennium + 1000 millenium_agg = DateRangeAgg(name="Millennium (%s-%s)"%(min_millenium, max_millenium), field='dates', format='y', min_date=str(min_millenium), max_date=str(max_millenium)) for century in range(min_millenium,max_millenium,100): min_century = century max_century = century + 100 century_aggregation = DateRangeAgg(name="Century (%s-%s)"%(min_century, max_century), field='dates', format='y', min_date=str(min_century), max_date=str(max_century)) millenium_agg.add_aggregation(century_aggregation) for decade in range(min_century,max_century,10): min_decade = decade max_decade = decade + 10 decade_aggregation = DateRangeAgg(name="Decade (%s-%s)"%(min_decade, max_decade), field='dates', format='y', min_date=str(min_decade), max_date=str(max_decade)) century_aggregation.add_aggregation(decade_aggregation) query.add_aggregation(millenium_agg) root = d3Item(name='root') transformESAggToD3Hierarchy({'buckets':[query.search(index='resource')['aggregations']]}, root) return JSONResponse(root, indent=4) else: return HttpResponseNotFound(_('Error retrieving the time wheel config'))
def get_resource_bounds(node): query = Query(se, start=0, limit=0) search_query = Bool() query.add_query(search_query) query.add_aggregation( GeoBoundsAgg(field="points.point", name="bounds")) query.add_query( Term(field="graph_id", term=str(node.graph.graphid))) results = query.search(index=RESOURCES_INDEX) bounds = results["aggregations"]["bounds"][ "bounds"] if "bounds" in results["aggregations"][ "bounds"] else None return bounds
def get_related_resources(resourceid, lang='en-US', limit=1000, start=0): ret = { 'resource_relationships': [], 'related_resources': [] } se = SearchEngineFactory().create() query = Query(se, limit=limit, start=start) query.add_filter(Terms(field='entityid1', terms=resourceid).dsl, operator='or') resource_relations = query.search(index='resource_relations', doc_type='all') ret['total'] = resource_relations['hits']['total'] for relation in resource_relations['hits']['hits']: ret['resource_relationships'].append(relation['_source']) return ret
def get_search_range_contexts(request): search_range_context = {} search_range_context = cache.get('search_range_contexts') if search_range_context is not None: #print 'Search_range_context iz cacha!' return search_range_context lang = request.GET.get('lang', request.LANGUAGE_CODE) se1 = SearchEngineFactory().create() context_label1 = '-' search_range_context = {} for search_term in settings.RANGE_TERMS: searchString1 = search_term['text'] query1 = Query(se1, start=0, limit=settings.SEARCH_DROPDOWN_LENGTH) boolquery1 = Bool() boolquery1.should(Match(field='term', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), fuzziness='AUTO')) query1.add_query(boolquery1) results1 = query1.search(index='term', doc_type='value') conceptid1 = '' context1 = '' for result1 in results1['hits']['hits']: prefLabel = get_preflabel_from_conceptid(result1['_source']['context'], lang) result1['_source']['options']['context_label'] = prefLabel['value'] if (prefLabel['value'] == search_term['context_label'] and result1['_source']['term'] == search_term['text']): #print result1['_source']['ids'][0] conceptid1 = result1['_source']['options']['conceptid'] context1 = result1['_source']['context'] valueid1 = result1['_source']['ids'][0] #print search_term['context_label'] + ': ' + conceptid1 #print searchString1 #print result1 result = {'conceptid': conceptid1, 'context': context1, 'valueid': valueid1} if context_label1 <> search_term['context_label']: value = {} #print result value[search_term['text_key']] = result #print value search_range_context[search_term['context_key']] = value #print search_range_context #print 'Iscem [' + search_term['context_label'] + '][' + search_term['text'] + ']' #print value context_label1 = search_term['context_label'] #print search_range_context #print search_range_context['Historical_Period']['BRONZE_AGE'] #print 'Shranjujem search_range_context v cache' cache.set('search_range_contexts', search_range_context, 86400) return search_range_context
def get_search_contexts(request): search_context = {} search_context = cache.get('search_contexts') if search_context is not None: #print 'Search_context iz cacha!' return search_context lang = request.GET.get('lang', settings.LANGUAGE_CODE) se1 = SearchEngineFactory().create() context_label1 = '-' search_context = {} for search_term in settings.SEARCH_TERMS: searchString1 = search_term['text'] print searchString1 query1 = Query(se1, start=0, limit=settings.SEARCH_DROPDOWN_LENGTH) boolquery1 = Bool() boolquery1.should(Match(field='term', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), fuzziness='AUTO')) query1.add_query(boolquery1) results1 = query1.search(index='term', doc_type='value') conceptid1 = '' context1 = '' for result1 in results1['hits']['hits']: prefLabel = get_preflabel_from_conceptid(result1['_source']['context'], lang) result1['_source']['options']['context_label'] = prefLabel['value'] if (prefLabel['value'] == search_term['context_label'] and result1['_source']['term'] == search_term['text']): conceptid1 = result1['_source']['options']['conceptid'] context1 = result1['_source']['context'] #print search_term['context_label'] + ': ' + conceptid1 #print searchString1 #print result1 result = {'conceptid': conceptid1, 'context': context1} if context_label1 <> search_term['context_label']: value = {} print result value[search_term['text_key']] = result #print value search_context[search_term['context_key']] = value #print search_context #print 'Iscem [' + search_term['context_label'] + '][' + search_term['text'] + ']' #print value context_label1 = search_term['context_label'] #print search_context #print search_context['Historical_Period']['BRONZE_AGE'] #print 'Shranjujem search_context v cache' cache.set('search_contexts', search_context, 86400) return search_context
def get_indexed_concepts(se, conceptid, concept_value): """ Searches for a conceptid from the database and confirms that the database concept value matches the indexed value """ result = 'failed: cannot find' + conceptid query = Query(se, start=0, limit=100) phrase = Match(field='conceptid', query=conceptid, type='phrase_prefix') query.add_query(phrase) results = query.search(index='concept_labels') if len(results['hits']['hits']) > 0: source = results['hits']['hits'][0]['_source'] if conceptid == source['conceptid'] or concept_value == source['value']: result = 'passed' else: result = 'failed: concept value does not match' return result
def quick_query(self, rules, doc): if rules["access_level"] == "attribute_filter": self.add_attribute_filter_clause(doc, rules["filter_config"]) elif rules["access_level"] == "geo_filter": self.add_geo_filter_clause(doc, rules["filter_config"]["geometry"]) se = SearchEngineFactory().create() query = Query(se, start=0, limit=10000) query.include('graph_id') query.include('resourceinstanceid') query.add_query(self.paramount) ## doc_type is deprecated, must use a filter for graphid instead (i think) results = query.search(index='resources', doc_type=doc) return results
def delete(self, *args, **kwargs): se = SearchEngineFactory().create() request = kwargs.pop('request', None) for tiles in self.tiles.itervalues(): for tile in tiles: tile.delete(*args, request=request, **kwargs) query = Query(se) bool_query = Bool() bool_query.filter(Terms(field='tileid', terms=[self.tileid])) query.add_query(bool_query) results = query.search(index='strings', doc_type='term')['hits']['hits'] for result in results: se.delete(index='strings', doc_type='term', id=result['_id']) self.__preDelete(request) super(Tile, self).delete(*args, **kwargs) resource = Resource.objects.get(resourceinstanceid=self.resourceinstance.resourceinstanceid) resource.index()
def delete(self): """ Deletes a single resource and any related indexed data """ se = SearchEngineFactory().create() related_resources = self.get_related_resources(lang="en-US", start=0, limit=15) for rr in related_resources['resource_relationships']: models.ResourceXResource.objects.get(pk=rr['resourcexid']).delete() query = Query(se) bool_query = Bool() bool_query.filter(Terms(field='resourceinstanceid', terms=[self.resourceinstanceid])) query.add_query(bool_query) results = query.search(index='strings', doc_type='term')['hits']['hits'] for result in results: se.delete(index='strings', doc_type='term', id=result['_id']) se.delete(index='resource', doc_type=str(self.graph_id), id=self.resourceinstanceid) super(Resource, self).delete()
def get_related_resource_ids(resourceids, lang, limit=1000, start=0): se = SearchEngineFactory().create() query = Query(se, limit=limit, start=start) query.add_filter(Terms(field='entityid1', terms=resourceids).dsl, operator='or') query.add_filter(Terms(field='entityid2', terms=resourceids).dsl, operator='or') resource_relations = query.search( index='resource_relations', doc_type='all') entityids = set() for relation in resource_relations['hits']['hits']: # add the other halves add the relations which are not in the original list of ids from_is_original_result = relation['_source']['entityid1'] in resourceids to_is_original_result = relation['_source']['entityid2'] in resourceids if from_is_original_result: entityids.add(relation['_source']['entityid2']) if to_is_original_result: entityids.add(relation['_source']['entityid1']) return entityids
def delete(self, *args, **kwargs): se = SearchEngineFactory().create() request = kwargs.pop('request', None) for tiles in self.tiles.itervalues(): for tile in tiles: tile.delete(*args, request=request, **kwargs) query = Query(se) bool_query = Bool() bool_query.filter(Terms(field='tileid', terms=[self.tileid])) query.add_query(bool_query) results = query.search(index='strings', doc_type='term')['hits']['hits'] for result in results: se.delete(index='strings', doc_type='term', id=result['_id']) self.__preDelete(request) self.save_edit(user=request.user, edit_type='tile delete', old_value=self.data) super(Tile, self).delete(*args, **kwargs) resource = Resource.objects.get(resourceinstanceid=self.resourceinstance.resourceinstanceid) resource.index()
def get_preflabel_from_conceptid(conceptid, lang): ret = None default = {"category": "", "conceptid": "", "language": "", "value": "", "type": "", "id": ""} se = SearchEngineFactory().create() query = Query(se) terms = Terms(field="conceptid", terms=[conceptid]) match = Match(field="type", query="preflabel", type="phrase") query.add_filter(terms) query.add_query(match) preflabels = query.search(index="concept_labels")["hits"]["hits"] for preflabel in preflabels: default = preflabel["_source"] # get the label in the preferred language, otherwise get the label in the default language if preflabel["_source"]["language"] == lang: return preflabel["_source"] if preflabel["_source"]["language"].split("-")[0] == lang.split("-")[0]: ret = preflabel["_source"] if preflabel["_source"]["language"] == settings.LANGUAGE_CODE and ret == None: ret = preflabel["_source"] return default if ret == None else ret
def get_related_resources(self, lang='en-US', limit=1000, start=0): """ Returns an object that lists the related resources, the relationship types, and a reference to the current resource """ ret = { 'resource_instance': self, 'resource_relationships': [], 'related_resources': [] } se = SearchEngineFactory().create() query = Query(se, limit=limit, start=start) bool_filter = Bool() bool_filter.should( Terms(field='resourceinstanceidfrom', terms=self.resourceinstanceid)) bool_filter.should( Terms(field='resourceinstanceidto', terms=self.resourceinstanceid)) query.add_query(bool_filter) resource_relations = query.search(index='resource_relations', doc_type='all') ret['total'] = resource_relations['hits']['total'] instanceids = set() for relation in resource_relations['hits']['hits']: relation['_source']['preflabel'] = get_preflabel_from_valueid( relation['_source']['relationshiptype'], lang) ret['resource_relationships'].append(relation['_source']) instanceids.add(relation['_source']['resourceinstanceidto']) instanceids.add(relation['_source']['resourceinstanceidfrom']) if len(instanceids) > 0: instanceids.remove(str(self.resourceinstanceid)) related_resources = se.search(index='resource', doc_type='_all', id=list(instanceids)) if related_resources: for resource in related_resources['docs']: ret['related_resources'].append(resource['_source']) return ret
def get_min_max_extended_dates(): se = SearchEngineFactory().create() query = Query(se) aggs = { "extendeddates": { "nested": { "path": "extendeddates" }, "aggs": { "min_date": { "min": { "field": "extendeddates.value" } }, "max_date": { "max": { "field": "extendeddates.value" } } } } } query._dsl['aggs'] = aggs results = query.search(index='entity', doc_type='') if not results: return {'val__min':None,'val__max':None} min_date = results['aggregations']['extendeddates']['min_date']['value'] max_date = results['aggregations']['extendeddates']['max_date']['value'] minyear = get_year_from_int(min_date) maxyear = get_year_from_int(max_date) min_max_date = {'val__min':minyear,'val__max':maxyear} return min_max_date
def map_layers(request, entitytypeid='all', get_centroids=False): data = [] geom_param = request.GET.get('geom', None) bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) entityids = request.GET.get('entityid', '') geojson_collection = { "type": "FeatureCollection", "features": [] } se = SearchEngineFactory().create() query = Query(se, limit=limit) args = { 'index': 'maplayers' } if entitytypeid != 'all': args['doc_type'] = entitytypeid if entityids != '': for entityid in entityids.split(','): geojson_collection['features'].append(se.search(index='maplayers', id=entityid)['_source']) return JSONResponse(geojson_collection) data = query.search(**args) for item in data['hits']['hits']: if get_centroids: item['_source']['geometry'] = item['_source']['properties']['centroid'] item['_source'].pop('properties', None) elif geom_param != None: item['_source']['geometry'] = item['_source']['properties'][geom_param] item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop(geom_param, None) else: item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('centroid', None) geojson_collection['features'].append(item['_source']) return JSONResponse(geojson_collection)
def map_layers(request, entitytypeid='all', get_centroids=False): data = [] geom_param = request.GET.get('geom', None) bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) entityids = request.GET.get('entityid', '') geojson_collection = {"type": "FeatureCollection", "features": []} se = SearchEngineFactory().create() query = Query(se, limit=limit) args = {'index': 'maplayers'} if entitytypeid != 'all': args['doc_type'] = entitytypeid if entityids != '': for entityid in entityids.split(','): geojson_collection['features'].append( se.search(index='maplayers', id=entityid)['_source']) return JSONResponse(geojson_collection) data = query.search(**args) for item in data['hits']['hits']: if get_centroids: item['_source']['geometry'] = item['_source']['properties'][ 'centroid'] item['_source'].pop('properties', None) elif geom_param != None: item['_source']['geometry'] = item['_source']['properties'][ geom_param] item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop(geom_param, None) else: item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('centroid', None) geojson_collection['features'].append(item['_source']) return JSONResponse(geojson_collection)
def polygon_layers(request, entitytypeid='all'): data = [] geom_param = request.GET.get('geom', None) bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) entityids = request.GET.get('entityid', '') geojson_collection = { "type": "FeatureCollection", "features": [] } circ_features = [] se = SearchEngineFactory().create() query = Query(se, limit=limit) args = { 'index': 'maplayers' } if entitytypeid != 'all': args['doc_type'] = entitytypeid data = query.search(**args) for item in data['hits']['hits']: for shape in item['_source']['geometry']['geometries']: feat = { "geometry":shape, "type":"Feature", "id":item['_source']['id'] } if item['_source']['properties']['primaryname'] == "Circulation": circ_features.append(feat) continue geojson_collection['features'].append(feat) for circ_feat in circ_features: geojson_collection['features'].append(circ_feat) return JSONResponse(geojson_collection)
def prepare_documents_for_search_index(self): """ Generates a list of specialized resource based documents to support resource search """ # Arches_hip documents = super(Resource, self).prepare_documents_for_search_index() for document in documents: document['date_groups'] = [] for nodes in self.get_nodes('BEGINNING_OF_EXISTENCE.E63', keys=['value']): document['date_groups'].append({ 'conceptid': nodes['BEGINNING_OF_EXISTENCE_TYPE_E55__value'], 'value': nodes['START_DATE_OF_EXISTENCE_E49__value'] }) for nodes in self.get_nodes('END_OF_EXISTENCE.E64', keys=['value']): document['date_groups'].append({ 'conceptid': nodes['END_OF_EXISTENCE_TYPE_E55__value'], 'value': nodes['END_DATE_OF_EXISTENCE_E49__value'] }) for nodes in self.get_nodes('GRAVE_MEASUREMENT_TYPE.E55', keys=['value','label']): # Poiscemo in shranimo le contextid (sicer je v vsakem jeziku drugacna vrednost) lang = settings.LANGUAGE_CODE se1 = SearchEngineFactory().create() context_label1 = '-' search_context = {} #print 'Iscem podatke za ' + nodes['GRAVE_MEASUREMENT_TYPE_E55__value'] searchString1 = nodes['GRAVE_MEASUREMENT_TYPE_E55__label'] query1 = Query(se1, start=0, limit=settings.SEARCH_DROPDOWN_LENGTH) boolquery1 = Bool() boolquery1.should(Match(field='term', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), fuzziness='AUTO')) query1.add_query(boolquery1) results1 = query1.search(index='term', doc_type='value') conceptid1 = '' context1 = '' data_type = nodes['GRAVE_MEASUREMENT_TYPE_E55__value'] for result1 in results1['hits']['hits']: #print result1result1['_source']['ids'][0] conceptid1 = result1['_source']['options'] valueid1 = result1['_source']['ids'][0] if nodes['GRAVE_MEASUREMENT_TYPE_E55__value'] == valueid1: #print 'Nasel: ' + conceptid1['conceptid'] data_type = conceptid1['conceptid'] document['value_' + data_type] = float(nodes['VALUE_OF_MEASUREMENT_E60__value']) for nodes in self.get_nodes('OBJECT_MEASUREMENT_TYPE.E55', keys=['value','label']): # Poiscemo in shranimo le contextid (sicer je v vsakem jeziku drugacna vrednost) lang = settings.LANGUAGE_CODE se1 = SearchEngineFactory().create() context_label1 = '-' search_context = {} #print 'Iscem podatke za ' + nodes['GRAVE_MEASUREMENT_TYPE_E55__value'] searchString1 = nodes['OBJECT_MEASUREMENT_TYPE_E55__label'] query1 = Query(se1, start=0, limit=settings.SEARCH_DROPDOWN_LENGTH) boolquery1 = Bool() boolquery1.should(Match(field='term', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), type='phrase_prefix', fuzziness='AUTO')) boolquery1.should(Match(field='term.folded', query=searchString1.lower(), fuzziness='AUTO')) query1.add_query(boolquery1) results1 = query1.search(index='term', doc_type='value') conceptid1 = '' context1 = '' data_type = nodes['OBJECT_MEASUREMENT_TYPE_E55__value'] for result1 in results1['hits']['hits']: #print result1result1['_source']['ids'][0] conceptid1 = result1['_source']['options'] valueid1 = result1['_source']['ids'][0] if nodes['OBJECT_MEASUREMENT_TYPE_E55__value'] == valueid1: #print 'Nasel: ' + conceptid1['conceptid'] data_type = conceptid1['conceptid'] document['value_' + data_type] = float(nodes['VALUE_OF_MEASUREMENT_E60__value']) #print document #for nodes in self.get_nodes('GRAVE_MEASUREMENT_TYPE.E55', keys=['value']): # document['measurement_groups'].append({ # 'conceptid': nodes['GRAVE_MEASUREMENT_TYPE_E55__value'], # 'value': nodes['VALUE_OF_MEASUREMENT_E60__value'] # }) #for nodes in self.get_nodes('OBJECT_MEASUREMENT_TYPE.E55', keys=['value']): # document['measurement_groups'].append({ # 'conceptid': nodes['OBJECT_MEASUREMENT_TYPE_E55__value'], # 'value': nodes['VALUE_OF_MEASUREMENT_E60__value'] # }) if self.entitytypeid == 'HERITAGE_RESOURCE.E18' or self.entitytypeid == 'SITE.E18' or self.entitytypeid == 'GRAVE.E18' or self.entitytypeid == 'OBJECT.E18': document['searchType'] = self.get_current_type() #document['parentName'] = self.get_parent_name() #document_data['designations'] = get_entity_data('TYPE_OF_DESIGNATION_OR_PROTECTION.E55', get_label=True) if self.get_nodes('SPATIAL_COORDINATES_GEOMETRY.E47', keys=['value']): point = self.get_nodes('SPATIAL_COORDINATES_GEOMETRY.E47', keys=['value'])[0]['SPATIAL_COORDINATES_GEOMETRY_E47__value'] if not isinstance(point, basestring): point = str(point) if point.find('POINT')>=0: lon = point[6:point.find(' ', 7)] #print lon lat = point[point.find(' ',7)+1:point.find(')')] #print lat document['longitude'] = lon document['latitude'] = lat return documents
def search(request): se = SearchEngineFactory().create() searchString = request.GET['q'] removechildren = request.GET.get('removechildren', None) query = Query(se, start=0, limit=100) phrase = Match(field='value', query=searchString.lower(), type='phrase_prefix') query.add_query(phrase) results = query.search(index='strings', doc_type='concept') ids = [] if removechildren != None: ids = [concept[0] for concept in Concept().get_child_concepts(removechildren, columns="conceptidto::text")] ids.append(removechildren) newresults = [] cached_scheme_names = {} for result in results['hits']['hits']: if result['_source']['conceptid'] not in ids: # first look to see if we've already retrieved the top concept name # else look up the top concept name with ES and cache the result top_concept = result['_source']['top_concept'] if top_concept in cached_scheme_names: result['in_scheme_name'] = cached_scheme_names[top_concept] else: query = Query(se, start=0, limit=100) phrase = Match(field='conceptid', query=top_concept, type='phrase') query.add_query(phrase) scheme = query.search(index='strings', doc_type='concept') for label in scheme['hits']['hits']: if label['_source']['type'] == 'prefLabel': cached_scheme_names[top_concept] = label['_source']['value'] result['in_scheme_name'] = label['_source']['value'] newresults.append(result) # Use the db to get the concept context but this is SLOW # for result in results['hits']['hits']: # if result['_source']['conceptid'] not in ids: # concept = Concept().get(id=result['_source']['conceptid'], include_parentconcepts=True) # pathlist = concept.get_paths() # result['in_scheme_name'] = pathlist[0][0]['label'] # newresults.append(result) # def crawl(conceptid, path=[]): # query = Query(se, start=0, limit=100) # bool = Bool() # bool.must(Match(field='conceptto', query=conceptid, type='phrase')) # bool.must(Match(field='relationtype', query='narrower', type='phrase')) # query.add_query(bool) # relations = query.search(index='concept_relations') # for relation in relations['hits']['hits']: # path.insert(0, relation) # crawl(relation['_source']['conceptfrom'], path=path) # return path # for result in results['hits']['hits']: # if result['_source']['conceptid'] not in ids: # concept_relations = crawl(result['_source']['conceptid'], path=[]) # if len(concept_relations) > 0: # conceptid = concept_relations[0]['_source']['conceptfrom'] # if conceptid in cached_scheme_names: # result['in_scheme_name'] = cached_scheme_names[conceptid] # else: # result['in_scheme_name'] = get_preflabel_from_conceptid(conceptid, lang=settings.LANGUAGE_CODE)['value'] # cached_scheme_names[conceptid] = result['in_scheme_name'] # newresults.append(result) results['hits']['hits'] = newresults return JSONResponse(results)
def search(request): se = SearchEngineFactory().create() searchString = request.GET["q"] removechildren = request.GET.get("removechildren", None) query = Query(se, start=0, limit=100) phrase = Match(field="value", query=searchString.lower(), type="phrase_prefix") query.add_query(phrase) results = query.search(index="concept_labels") ids = [] if removechildren != None: concepts = Concept().get(id=removechildren, include_subconcepts=True, include=None) def get_children(concept): ids.append(concept.id) concepts.traverse(get_children) newresults = [] cached_scheme_names = {} for result in results["hits"]["hits"]: if result["_source"]["conceptid"] not in ids: # first look to see if we've already retrieved the scheme name # else look up the scheme name with ES and cache the result if result["_type"] in cached_scheme_names: result["in_scheme_name"] = cached_scheme_names[result["_type"]] else: query = Query(se, start=0, limit=100) phrase = Match(field="conceptid", query=result["_type"], type="phrase") query.add_query(phrase) scheme = query.search(index="concept_labels") for label in scheme["hits"]["hits"]: if label["_source"]["type"] == "prefLabel": cached_scheme_names[result["_type"]] = label["_source"]["value"] result["in_scheme_name"] = label["_source"]["value"] newresults.append(result) # Use the db to get the concept context but this is SLOW # for result in results['hits']['hits']: # if result['_source']['conceptid'] not in ids: # concept = Concept().get(id=result['_source']['conceptid'], include_parentconcepts=True) # pathlist = concept.get_paths() # result['in_scheme_name'] = pathlist[0][0]['label'] # newresults.append(result) # def crawl(conceptid, path=[]): # query = Query(se, start=0, limit=100) # bool = Bool() # bool.must(Match(field='conceptidto', query=conceptid, type='phrase')) # bool.must(Match(field='relationtype', query='narrower', type='phrase')) # query.add_query(bool) # relations = query.search(index='concept_relations') # for relation in relations['hits']['hits']: # path.insert(0, relation) # crawl(relation['_source']['conceptidfrom'], path=path) # return path # for result in results['hits']['hits']: # if result['_source']['conceptid'] not in ids: # concept_relations = crawl(result['_source']['conceptid'], path=[]) # if len(concept_relations) > 0: # conceptid = concept_relations[0]['_source']['conceptidfrom'] # if conceptid in cached_scheme_names: # result['in_scheme_name'] = cached_scheme_names[conceptid] # else: # result['in_scheme_name'] = get_preflabel_from_conceptid(conceptid, lang=settings.LANGUAGE_CODE)['value'] # cached_scheme_names[conceptid] = result['in_scheme_name'] # newresults.append(result) results["hits"]["hits"] = newresults return JSONResponse(results)
def map_layers(request, entitytypeid='all', get_centroids=False): lang = request.GET.get('lang', request.LANGUAGE_CODE) if lang == 'en': lang = 'en-US' data = [] geom_param = request.GET.get('geom', None) print 'map_layers: ' + entitytypeid #print request.method bbox = request.GET.get('bbox', '') limit = request.GET.get('limit', settings.MAP_LAYER_FEATURE_LIMIT) if request.method == 'GET': entityids = request.GET.get('entityid', '') elif request.method == 'POST': entityids = request.POST.get('entityid', '') #print entityids geojson_collection = { "type": "FeatureCollection", "features": [] } #print request.META url = request.META.get('HTTP_REFERER') searchType = 'Search' if not url: return JSONResponse(geojson_collection) if url.find('searchType')>0: parsed = urlparse.urlparse(url) searchType = urlparse.parse_qs(parsed.query)['searchType'][0] else: if url.find('search_sites')>0: searchType = 'Site' entitytypeid = 'SITE.E18' elif url.find('search_graves')>0: searchType = 'Grave' entitytypeid = 'GRAVE.E18' elif url.find('search_objects')>0: searchType = 'Object' entitytypeid = 'OBJECT.E18' #print searchType se = SearchEngineFactory().create() query = Query(se, limit=limit) args = { 'index': 'maplayers' } if entitytypeid != 'all': args['doc_type'] = entitytypeid if entityids != '': for entityid in entityids.split(','): item = se.search(index='maplayers', id=entityid) #print item # Prevodi #print 'Result_item' #print item['_source']['properties'] concept_label_ids = set() uuid_regex = re.compile('[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}') # gather together all uuid's referenced in the resource graph def crawl(items): for item in items: if isinstance(item, dict): for key in item: if isinstance(item[key], list): crawl(item[key]) else: if isinstance(item[key], basestring) and uuid_regex.match(item[key]): concept_label_ids.add(item[key]) crawl([item['_source']['properties']]) # get all the concept labels from the uuid's concept_labels = se.search(index='concept_labels', id=list(concept_label_ids)) # convert all labels to their localized prefLabel temp = {} if concept_labels != None: for concept_label in concept_labels['docs']: #temp[concept_label['_id']] = concept_label if concept_label['found']: # the resource graph already referenced the preferred label in the desired language if concept_label['_source']['type'] == 'prefLabel' and concept_label['_source']['language'] == lang: temp[concept_label['_id']] = concept_label['_source'] else: # the resource graph referenced a non-preferred label or a label not in our target language, so we need to get the right label temp[concept_label['_id']] = get_preflabel_from_conceptid(concept_label['_source']['conceptid'], lang) # replace the uuid's in the resource graph with their preferred and localized label def crawl_again(items): for item in items: if isinstance(item, dict): for key in item: if isinstance(item[key], list): crawl_again(item[key]) else: if isinstance(item[key], basestring) and uuid_regex.match(item[key]): try: item[key] = temp[item[key]]['value'] except: pass crawl_again([item['_source']['properties']]) #print 'crawl_again' #print item['_source']['properties'] geojson_collection['features'].append(item['_source']) #geojson_collection['features'].append(se.search(index='maplayers', id=entityid)['_source']) #Poiskus pridobitve slik - pridobiti se jih da, vendar jih je potem problem prikazati, zato jih tu ne bomo prikazovali #related_resources = get_related_resources(entityid, lang, start=0, limit=15) #if related_resources['related_resources']: # thumbnails = {'thumbnail': [] } # for entity in related_resources['related_resources'][0]['child_entities']: #print entity # if entity['entitytypeid']=='THUMBNAIL.E62': # thumbnails['thumbnail'].append(entity['value']) # item['_source']['properties']['thumbnails'] = thumbnails #print item['_source']['properties'] geojson_collection['features'] = sorted(geojson_collection['features'], key=lambda k: (k['properties']['primaryname'].lower())) return JSONResponse(geojson_collection) data = query.search(**args) for item in data['hits']['hits']: # Ce nismo na splosnem searchu, upostevamo samo ustrezne tipe resourcov if (searchType != 'Search'): #print item #print item['_source']['properties']['searchType'] if (item['_source']['properties']['searchType'] != searchType): continue #print 'Je' if get_centroids: item['_source']['geometry'] = item['_source']['properties']['centroid'] item['_source'].pop('properties', None) elif geom_param != None: item['_source']['geometry'] = item['_source']['properties'][geom_param] item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop(geom_param, None) else: item['_source']['properties'].pop('extent', None) item['_source']['properties'].pop('centroid', None) geojson_collection['features'].append(item['_source']) print 'St. zapisov: ' print len(data['hits']['hits']) return JSONResponse(geojson_collection)
def time_wheel_config(request): se = SearchEngineFactory().create() query = Query(se, limit=0) nested_agg = NestedAgg(path='dates', name='min_max_agg') nested_agg.add_aggregation(MinAgg(field='dates.date')) nested_agg.add_aggregation(MaxAgg(field='dates.date')) query.add_aggregation(nested_agg) results = query.search(index='resource') if results is not None and results['aggregations']['min_max_agg']['min_dates.date']['value'] is not None and results['aggregations']['min_max_agg']['max_dates.date']['value'] is not None: min_date = int(results['aggregations']['min_max_agg']['min_dates.date']['value'])/10000 max_date = int(results['aggregations']['min_max_agg']['max_dates.date']['value'])/10000 # round min and max date to the nearest 1000 years min_date = math.ceil(math.fabs(min_date)/1000)*-1000 if min_date < 0 else math.floor(min_date/1000)*1000 max_date = math.floor(math.fabs(max_date)/1000)*-1000 if max_date < 0 else math.ceil(max_date/1000)*1000 query = Query(se, limit=0) range_lookup = {} def gen_range_agg(gte=None, lte=None, permitted_nodegroups=None): date_query = Bool() date_query.filter(Range(field='dates.date', gte=gte, lte=lte, relation='intersects')) if permitted_nodegroups: date_query.filter(Terms(field='dates.nodegroup_id', terms=permitted_nodegroups)) date_ranges_query = Bool() date_ranges_query.filter(Range(field='date_ranges.date_range', gte=gte, lte=lte, relation='intersects')) if permitted_nodegroups: date_ranges_query.filter(Terms(field='date_ranges.nodegroup_id', terms=permitted_nodegroups)) wrapper_query = Bool() wrapper_query.should(Nested(path='date_ranges', query=date_ranges_query)) wrapper_query.should(Nested(path='dates', query=date_query)) return wrapper_query for millennium in range(int(min_date),int(max_date)+1000,1000): min_millenium = millennium max_millenium = millennium + 1000 millenium_name = "Millennium (%s - %s)"%(min_millenium, max_millenium) mill_boolquery = gen_range_agg(gte=SortableDate(min_millenium).as_float()-1, lte=SortableDate(max_millenium).as_float(), permitted_nodegroups=get_permitted_nodegroups(request.user)) millenium_agg = FiltersAgg(name=millenium_name) millenium_agg.add_filter(mill_boolquery) range_lookup[millenium_name] = [min_millenium, max_millenium] for century in range(min_millenium,max_millenium,100): min_century = century max_century = century + 100 century_name="Century (%s - %s)"%(min_century, max_century) cent_boolquery = gen_range_agg(gte=SortableDate(min_century).as_float()-1, lte=SortableDate(max_century).as_float()) century_agg = FiltersAgg(name=century_name) century_agg.add_filter(cent_boolquery) millenium_agg.add_aggregation(century_agg) range_lookup[century_name] = [min_century, max_century] for decade in range(min_century,max_century,10): min_decade = decade max_decade = decade + 10 decade_name = "Decade (%s - %s)"%(min_decade, max_decade) dec_boolquery = gen_range_agg(gte=SortableDate(min_decade).as_float()-1, lte=SortableDate(max_decade).as_float()) decade_agg = FiltersAgg(name=decade_name) decade_agg.add_filter(dec_boolquery) century_agg.add_aggregation(decade_agg) range_lookup[decade_name] = [min_decade, max_decade] query.add_aggregation(millenium_agg) root = d3Item(name='root') results = {'buckets':[query.search(index='resource')['aggregations']]} results_with_ranges = appendDateRanges(results, range_lookup) transformESAggToD3Hierarchy(results_with_ranges, root) return JSONResponse(root, indent=4) else: return HttpResponseNotFound(_('Error retrieving the time wheel config'))