def predict(): data = request.get_json() task_id = data['task_id'] jobs = data.get('jobs', [PREDICT_ENTITIES]) document_id = data['document_id'] user_id = data.get('user_id', current_user.get_id()) current_prediction_user = prediction_user_for_user(user_id) prediction_user_doc_id = load_user_doc_id(document_id, current_prediction_user) delete_user_document(prediction_user_doc_id) document_data = json.loads(data.get('current_state', None)) if document_data is None: document_data = load_document(document_id, user_id) else: # the current status has to be saved first in order to disambiguate the ids of the annotations user_doc_id = load_user_doc_id(document_id, current_user.get_id()) successful = save_document(document_data, user_doc_id, document_id, current_user.get_id(), task_id) if not successful: return "Could not save the document", 500 if PREDICT_ENTITIES in jobs: cursor = get_connection().cursor() cursor.execute( 'INSERT INTO "LTN_DEVELOP"."USER_DOCUMENTS" ' 'VALUES (?, ?, ?, 0, current_timestamp, current_timestamp)', ( prediction_user_doc_id, current_prediction_user, document_id, )) cursor.close() get_connection().commit() predict_entities(document_id, task_id, prediction_user_doc_id) if PREDICT_RELATIONS in jobs: if PREDICT_ENTITIES not in jobs: save_document(document_data, prediction_user_doc_id, document_id, current_prediction_user, task_id, False) predicted_pairs = predict_relations(prediction_user_doc_id, task_id) if PREDICT_ENTITIES not in jobs: remove_entities_without_relations(predicted_pairs, document_data, prediction_user_doc_id) document_data = load_document(document_id, current_user.get_id(), True) return respond_with(document_data)
def export_document(document_id, users): bcollection = bioc.BioCCollection() for user_id in users: document = load_document(document_id, user_id) bdocument = create_bioc_document_from_document_json(document) bcollection.add_document(bdocument) result = bcollection.tobioc() response = Response(result, mimetype='text/xml') response.headers["Content-Disposition"] = "attachment; filename=" + document_id + ".xml" return response
def export_document(document_id, users): bcollection = bioc.BioCCollection() for user_id in users: document = load_document(document_id, user_id) bdocument = create_bioc_document_from_document_json(document) bcollection.add_document(bdocument) result = bcollection.tobioc() response = Response(result, mimetype='text/xml') response.headers[ "Content-Disposition"] = "attachment; filename=" + document_id + ".xml" return response
def predict(): data = request.get_json() task_id = data['task_id'] jobs = data.get('jobs', [PREDICT_ENTITIES]) document_id = data['document_id'] user_id = data.get('user_id', current_user.get_id()) current_prediction_user = prediction_user_for_user(user_id) prediction_user_doc_id = load_user_doc_id(document_id, current_prediction_user) delete_user_document(prediction_user_doc_id) document_data = json.loads(data.get('current_state', None)) if document_data is None: document_data = load_document(document_id, user_id) else: # the current status has to be saved first in order to disambiguate the ids of the annotations user_doc_id = load_user_doc_id(document_id, current_user.get_id()) successful = save_document(document_data, user_doc_id, document_id, current_user.get_id(), task_id) if not successful: return "Could not save the document", 500 if PREDICT_ENTITIES in jobs: cursor = get_connection().cursor() cursor.execute('INSERT INTO "LTN_DEVELOP"."USER_DOCUMENTS" ' 'VALUES (?, ?, ?, 0, current_timestamp, current_timestamp)', (prediction_user_doc_id, current_prediction_user, document_id,)) cursor.close() get_connection().commit() predict_entities(document_id, task_id, prediction_user_doc_id) if PREDICT_RELATIONS in jobs: if PREDICT_ENTITIES not in jobs: save_document(document_data, prediction_user_doc_id, document_id, current_prediction_user, task_id, False) predicted_pairs = predict_relations(prediction_user_doc_id, task_id) if PREDICT_ENTITIES not in jobs: remove_entities_without_relations(predicted_pairs, document_data, prediction_user_doc_id) document_data = load_document(document_id, current_user.get_id(), True) return respond_with(document_data)