def search(self): """Execute the query as assmbled.""" # log.info("Search index: %s", self.get_index()) result = search_safe(index=self.get_index(), body=self.get_body()) log.info("Took: %sms", result.get('took')) # log.info("%s", pformat(result.get('profile'))) return result
def get_collection_stats(collection_id): """Compute some statistics on the content of a collection.""" key = cache.key('cstats', collection_id) data = cache.get_complex(key) if data is not None: return data log.info("Generating collection stats: %s", collection_id) query = { 'size': 0, 'query': { 'bool': { 'filter': [{ 'term': { 'collection_id': collection_id } }] } }, 'aggs': { 'schemata': { 'terms': { 'field': 'schema', 'size': 1000 } }, 'countries': { 'terms': { 'field': 'countries', 'size': 500 } }, 'languages': { 'terms': { 'field': 'languages', 'size': 10 } }, } } result = search_safe(index=entities_read_index(), body=query) aggregations = result.get('aggregations', {}) data = {'count': result['hits']['total']} for facet in ['schemata', 'countries', 'languages']: data[facet] = {} for bucket in aggregations[facet]['buckets']: data[facet][bucket['key']] = bucket['doc_count'] expire = randint(3600 * 3, 3600 * 12) cache.set_complex(key, data, expire=expire) return data
def _index_updates(collection, entities): """Look up existing index documents and generate an updated form. This is necessary to make the index accumulative, i.e. if an entity or link gets indexed twice with different field values, it'll add up the different field values into a single record. This is to avoid overwriting the document and losing field values. An alternative solution would be to implement this in Groovy on the ES. """ common = { 'collection_id': collection.id, 'bulk': True, 'roles': collection.roles, 'updated_at': datetime.utcnow() } if not len(entities): return query = { 'query': { 'ids': { 'values': list(entities.keys()) } }, '_source': ['schema', 'properties', 'created_at'] } result = search_safe(index=entity_index(), body=query) for doc in result.get('hits').get('hits', []): entity_id = doc['_id'] entity = entities.get(entity_id) existing = doc.get('_source') combined = model.merge(existing, entity) combined['created_at'] = existing.get('created_at') entities[entity_id] = combined for doc_id, entity in entities.items(): entity.pop('id', None) entity.update(common) schema = model.get(entity.get('schema')) entity = finalize_index(entity, schema, []) # pprint(entity) yield { '_id': doc_id, '_index': entity_index(), '_type': 'doc', '_source': entity }
def xref_item(proxy): """Cross-reference an entity or document, given as an indexed document.""" query = match_query(proxy) if query == none_query(): return query = { 'query': query, 'size': 100, '_source': {'includes': ['schema', 'properties', 'collection_id']} } result = search_safe(index=entities_index(), body=query) results = result.get('hits').get('hits') for result in results: result = unpack_result(result) if result is not None: other = model.get_proxy(result) score = compare(model, proxy, other) yield score, result.get('collection_id'), other
def _xref_item(item, collection_id=None): """Cross-reference an entity or document, given as an indexed document.""" name = item.get('name') or item.get('title') query = entity_query(item, collection_id=collection_id) if 'match_none' in query: return query = { 'query': query, 'size': 10, '_source': ['collection_id', 'name'], } result = search_safe(index=entities_index(), body=query) results = result.get('hits').get('hits') entity_id, document_id = None, None if Document.SCHEMA in item.get('schemata'): document_id = item.get('id') else: entity_id = item.get('id') dq = db.session.query(Match) dq = dq.filter(Match.entity_id == entity_id) dq = dq.filter(Match.document_id == document_id) if collection_id is not None: dq = dq.filter(Match.match_collection_id == collection_id) dq.delete() for result in results: source = result.get('_source', {}) log.info("Xref [%.1f]: %s <=> %s", result.get('_score'), name, source.get('name')) obj = Match() obj.entity_id = entity_id obj.document_id = document_id obj.collection_id = item.get('collection_id') obj.match_id = result.get('_id') obj.match_collection_id = source.get('collection_id') obj.score = result.get('_score') db.session.add(obj) db.session.commit()
def check_alert(alert_id): alert = Alert.by_id(alert_id) if alert is None or alert.role is None: return if not alert.role.is_alertable: return authz = Authz.from_role(alert.role) query = alert_query(alert, authz) result = search_safe(index=entities_index(), body=query) for result in result.get('hits').get('hits', []): entity = unpack_result(result) if entity is None: continue log.info('Alert [%s]: %s', alert.query, entity.get('name')) params = {'alert': alert, 'role': alert.role, 'entity': entity} publish(Events.MATCH_ALERT, actor_id=entity.get('uploader_id'), params=params) alert.update() db.session.commit() db.session.close()
def iter_entities_by_ids(ids, authz=None): """Iterate over unpacked entities based on a search for the given entity IDs.""" for i in range(0, len(ids), MAX_PAGE): chunk = ids[i:i + MAX_PAGE] if not len(chunk): return query = bool_query() query['bool']['filter'].append({'ids': {'values': chunk}}) if authz is not None: query['bool']['filter'].append(authz_query(authz)) includes = ['schema', 'properties', 'collection_id', 'created_at'] query = { 'query': query, '_source': {'includes': includes}, 'size': min(MAX_PAGE, len(chunk) * 2) } result = search_safe(index=entity_index(), body=query, request_cache=False) for doc in result.get('hits', {}).get('hits', []): entity = unpack_result(doc) if entity is not None: yield entity
def entities_by_ids(ids, authz=None, schemata=None): """Iterate over unpacked entities based on a search for the given entity IDs.""" for i in range(0, len(ids), MAX_PAGE): chunk = ids[i:i + MAX_PAGE] if not len(chunk): return query = bool_query() query['bool']['filter'].append({'ids': {'values': chunk}}) if authz is not None: query['bool']['filter'].append(authz_query(authz)) query = { 'query': query, '_source': { 'excludes': ['text'] }, 'size': min(MAX_PAGE, len(chunk)) } index = entities_read_index(schema=schemata) result = search_safe(index=index, body=query, ignore=[404]) for doc in result.get('hits', {}).get('hits', []): entity = unpack_result(doc) if entity is not None: yield entity
def index_collection(collection): """Index a collection.""" if collection.deleted_at is not None: return delete_collection(collection.id) data = { 'foreign_id': collection.foreign_id, 'created_at': collection.created_at, 'updated_at': collection.updated_at, 'label': collection.label, 'kind': collection.kind, 'summary': collection.summary, 'category': Collection.DEFAULT, 'publisher': collection.publisher, 'publisher_url': collection.publisher_url, 'info_url': collection.info_url, 'data_url': collection.data_url, 'casefile': collection.casefile, 'roles': collection.roles, 'schemata': {}, 'team': [] } texts = [v for v in data.values() if isinstance(v, str)] if collection.category in Collection.CATEGORIES: data['category'] = collection.category if collection.creator is not None: data['creator'] = { 'id': collection.creator.id, 'type': collection.creator.type, 'name': collection.creator.name } texts.append(collection.creator.name) for role in collection.team: data['team'].append({ 'id': role.id, 'type': role.type, 'name': role.name }) texts.append(role.name) # Compute some statistics on the content of a collection. query = { 'size': 0, 'query': { 'bool': { 'filter': [{ 'term': { 'collection_id': collection.id } }, { 'term': { 'schemata': Entity.THING } }] } }, 'aggs': { 'schema': { 'terms': { 'field': 'schema', 'size': 1000 } }, 'countries': { 'terms': { 'field': 'countries', 'size': 500 } }, 'languages': { 'terms': { 'field': 'languages', 'size': 100 } }, } } result = search_safe(index=entities_index(), body=query) aggregations = result.get('aggregations') data['count'] = result['hits']['total'] # expose entities by schema count. for schema in aggregations['schema']['buckets']: data['schemata'][schema['key']] = schema['doc_count'] # if no countries or langs are given, take the most common from the data. countries = collection.countries if countries is None or not len(countries): countries = aggregations['countries']['buckets'] countries = [c['key'] for c in countries] data['countries'] = exactitude.countries.normalize_set(countries) languages = collection.languages if languages is None or not len(languages): languages = aggregations['languages']['buckets'] languages = [c['key'] for c in languages] data['languages'] = exactitude.languages.normalize_set(languages) texts.extend([normalize(t, ascii=True) for t in texts]) data['text'] = index_form(texts) data = index_safe(collections_index(), collection.id, data) refresh_index(index=collections_index()) return data