def _process_entity(entity, sync=False): """Perform pre-index processing on an entity, includes running the NLP pipeline.""" analyze_entity(entity) refresh_entity(entity.id, sync=sync) # log.debug("Index: %r", entity) return entity
def ingest(document_id, file_path=None, refresh=False): """Process a given document by extracting its contents. This may include creating or updating child documents.""" document = Document.by_id(document_id) if document is None: log.error("Could not find document: %s", document_id) return # Work path will be used by storagelayer to cache a local # copy of data from an S3-based archive, and by ingestors # to perform processing and generate intermediary files. work_path = mkdtemp(prefix="aleph.ingest.") if file_path is None: file_path = archive.load_file(document.content_hash, file_name=document.safe_file_name, temp_path=work_path) try: manager = get_manager() result = DocumentResult(manager, document, file_path=file_path) get_manager().ingest(file_path, result=result, work_path=work_path) document.status = Document.STATUS_SUCCESS log.debug('Ingested [%s:%s]: %s', document.id, document.schema, document.name) if document.collection.casefile and not refresh: params = {'collection': document.collection, 'document': document} publish(Events.INGEST_DOCUMENT, actor_id=document.uploader_id, params=params) db.session.commit() except Exception: db.session.rollback() document = Document.by_id(document_id) log.exception("Ingest failed [%s]: %s", document.id, document.name) document.status = Document.STATUS_FAIL db.session.commit() finally: # Removing the temp_path given to storagelayer makes it redundant # to also call cleanup on the archive. remove_directory(work_path) extract_document_tags(document) # delete_entity(document.id, exclude=document.schema) index_document(document) refresh_entity(document)
def save_entityset_item(entityset, collection, entity_id, **data): """Change the association between an entity and an entityset. In the case of a profile, this may require re-indexing of the entity to update the associated profile_id. """ item = EntitySetItem.save(entityset, entity_id, collection_id=collection.id, **data) if entityset.type == EntitySet.PROFILE and entityset.collection_id == collection.id: from aleph.logic.profiles import profile_fragments aggregator = get_aggregator(collection) profile_fragments(collection, aggregator, entity_id=entity_id) index_aggregator(collection, aggregator, entity_ids=[entity_id]) refresh_entity(collection, entity_id) refresh_entityset(entityset.id) return item
def process_document(document): """Perform post-ingest tasks like analysis and indexing.""" extract_document_tags(document) refresh_entity(document) index_document(document)
def delete_document(document, deleted_at=None, sync=False): refresh_entity(document, sync=sync) _delete_document(document, deleted_at=deleted_at, sync=sync)
def update_document(document, shallow=False, sync=False): # These are operations that should be executed after each # write to a document or its metadata. refresh_entity(document, sync=sync) return index.index_document(document, shallow=shallow, sync=sync)
def delete_document(document, deleted_at=None, sync=False): refresh_entity(document, sync=sync) _delete_document(document, deleted_at=deleted_at, sync=sync)
def update_document(document, shallow=False, sync=False): # These are operations that should be executed after each # write to a document or its metadata. refresh_entity(document, sync=sync) return index.index_document(document, shallow=shallow, sync=sync)