def get_revisions(endpoint, pid_value): """Get revisions of given record""" try: Transaction = transaction_class(RecordMetadata) pid_type = PidStoreBase.get_pid_type_from_endpoint(endpoint) record = InspireRecord.get_record_by_pid_value(pid_value, pid_type, original_record=True) if not check_permissions_for_private_collection_read( record.get("_collections", [])): return jsonify(message="Unauthorized", code=403), 403 revisions = [] for revision in reversed(record.revisions): transaction_id = revision.model.transaction_id user = Transaction.query.filter( Transaction.id == transaction_id).one().user if user: user_email = user.email else: user_email = "system" revisions.append({ "updated": revision.updated, "revision_id": revision.revision_id, "user_email": user_email, "transaction_id": transaction_id, "rec_uuid": record.id, }) return jsonify(revisions) except Exception: raise EditorGetRevisionError
def authorlist_text(): """Run authorlist on a piece of text.""" try: parsed_authors = authorlist(request.json["text"]) return jsonify(parsed_authors) except Exception as err: return jsonify(status=400, message=" / ".join(err.args)), 400
def put(self, pid_value): """Updates existing record in db""" data = self.load_data_from_request() try: pid, _ = pid_value.data record = JobsRecord.get_record_by_pid_value(pid.pid_value) if not self.user_can_edit(record): return ( jsonify( {"message": "You are not allowed to edit this Job opening"} ), 403, ) except PIDDoesNotExistError: abort(404) self.raise_if_user_can_not_modify_status(data, record) builder = self.get_builder_with_updated_record(data, record) data = self.get_valid_record_data_from_builder(builder) record.update(data) db.session.commit() if not is_superuser_or_cataloger_logged_in(): self.create_ticket(record, "rt/update_job.html") return jsonify({"pid_value": record["control_number"]})
def upload(): """Upload file to S3.""" if "file" not in request.files: return jsonify(success=False, message="File key is missing."), 400 file_data = request.files["file"] filename = file_data.filename mime_type = file_data.mimetype _, extension = splitext(filename) if extension not in current_app.config["EDITOR_UPLOAD_ALLOWED_EXTENSIONS"]: return ( jsonify( success=False, message=f"File extension '{extension}' is not supported." ), 400, ) key = hash_data(file_data.read()) bucket = current_app.config.get("S3_EDITOR_BUCKET") file_data.seek(0) current_s3_instance.upload_file( file_data, key, filename, mime_type, current_app.config["S3_FILE_ACL"], bucket ) file_url = current_s3_instance.get_s3_url(key, bucket) return jsonify({"path": file_url}), 200
def refextract_url(): """Run refextract on a URL.""" if current_app.config.get("FEATURE_FLAG_ENABLE_REFEXTRACT_SERVICE"): headers = { "Content-Type": "application/json", "Accept": "application/json" } data = { "journal_kb_data": create_journal_dict(), "url": request.json["url"] } response = requests.post( f"{current_app.config['REFEXTRACT_SERVICE_URL']}/extract_references_from_url", headers=headers, data=orjson.dumps(data), ) if response.status_code != 200: return jsonify({"message": "Can not extract references"}, 500) extracted_references = response.json()["extracted_references"] else: extracted_references = extract_references_from_url( request.json["url"], override_kbs_files={"journals": create_journal_dict()}, reference_format="{title},{volume},{page}", ) deduplicated_extracted_references = dedupe_list(extracted_references) references = map_refextract_to_schema(deduplicated_extracted_references) match_result = match_references(references) return jsonify(match_result.get("matched_references"))
def literature_export_to_cds(args): literature_recids = args["literature_recids"] try: export_papers_to_cds(literature_recids) except Exception: LOGGER.exception("Cannot start 'export_to_cds' task.") return jsonify({"message": "Internal Error"}), 500 return jsonify({"message": "Success"}), 200
def literature_assign_conferences_view(args): conference_recid = args["conference_recid"] literature_recids = args["literature_recids"] try: assign_paper_to_conference.delay(literature_recids, conference_recid) except Exception: LOGGER.exception("Cannot start 'assign_paper_to_conference' task.") return jsonify({"message": "Internal Error"}), 500 return jsonify({"message": "Success"}), 200
def local_login(): body = request.get_json() email = body["email"] password = body["password"] user = User.query.filter_by(email=email).one_or_none() if user and verify_password(password, user.password): login_user(user) data_current_user = get_current_user_data() return jsonify(data_current_user), 200 return jsonify({"message": "Email or password is incorrect"}), 422
def sign_up_user(): try: current_oauthclient.signup_handlers["orcid"]["view"]() except IntegrityError: # invenio-oauthclient doesn't handle properly the case of duplicate # emails, and it's raising a sqlalchemy ``IntegrityError``. return jsonify({"message": "Email already exists.", "code": 400}), 400 except Exception: return jsonify({"message": "Cannot create user.", "code": 400}), 400 else: data_current_user = get_current_user_data() return jsonify(data_current_user), 200
def author_assign_view(): body = request.get_json() to_author_recid = body.get("to_author_recid") from_author_recid = body["from_author_recid"] literature_recids = body["literature_recids"] if to_author_recid is None: stub_author_id = assign_to_new_stub_author(from_author_recid, literature_recids) else: assign_to_author(from_author_recid, to_author_recid, literature_recids) db.session.commit() if to_author_recid is None: return jsonify({"stub_author_id": stub_author_id}), 200 return jsonify({"message": "Success"}), 200
def get(self, pid_value): try: record = AuthorsRecord.get_record_by_pid_value(pid_value) if not can_user_edit_author_record(record): return ( jsonify({"message": "You are not allowed to edit this author"}), 403, ) except PIDDoesNotExistError: abort(404) serialized_record = author_v1.dump(record) return jsonify({"data": serialized_record})
def remove_editor_lock(endpoint, pid_value): version_id = request.headers["ETag"] record = request.json if not check_permissions_for_private_collection_read_write( record.get("_collections", [])): return jsonify(message="Unauthorized", code=403), 403 version_from_etag = re.findall(r"\d+", version_id) if not version_from_etag: return jsonify(message="Incorrect Etag passed", code=400), 400 version_id = int(version_from_etag[0]) + 1 editor_soft_lock = EditorSoftLock(recid=pid_value, record_version=version_id, user_email=current_user.email) editor_soft_lock.remove_lock() return jsonify(success=True)
def set_orcid_push_setting(): orcid_account = get_current_user_remote_orcid_account() data = request.json allow_push = data["value"] orcid_account.extra_data["allow_push"] = allow_push db.session.add(orcid_account) db.session.commit() if allow_push is True: orcid = orcid_account.extra_data["orcid"] tokens = orcid_account.remote_tokens if len(tokens) != 1: raise ValueError( f"One token per remote account is expected, but found {len(tokens)} for {orcid}" ) push_account_literature_to_orcid.apply_async( kwargs={ "orcid": orcid, "token": tokens[0].access_token }) return jsonify({"message": "Successfully changed orcid push setting"}), 200
def get_tickets_for_record(endpoint, pid_value): """View to get rt ticket belongs to given record""" tickets_for_record = tickets.get_tickets_by_recid(pid_value) simplified_tickets = [ _simplify_ticket_response(ticket) for ticket in tickets_for_record ] return jsonify(simplified_tickets)
def get_record_and_schema(endpoint, pid_value): pid_type = PidStoreBase.get_pid_type_from_endpoint(endpoint) record = InspireRecord.get_record_by_pid_value(pid_value, pid_type, original_record=True) if not check_permissions_for_private_collection_read( record.get("_collections", [])): return jsonify(message="Unauthorized", code=403), 403 editor_soft_lock_service = EditorSoftLock( recid=record["control_number"], record_version=record.model.version_id, user_email=current_user.email, ) editor_lock_payload = editor_soft_lock_service.prepare_editor_lock_api_payload( ) editor_soft_lock_service.add_lock() json = { "record": { "metadata": record }, "schema": load_schema(record["$schema"]), } json.update(editor_lock_payload) response = make_response(json) set_headers_for_record_caching_and_concurrency(response, record) return response
def delete(self, args): record_uuid = args.get("record_uuid") source = args.get("source") result = WorkflowsRecordSources.query.filter_by( record_uuid=str(record_uuid), source=source.lower()).one_or_none() if not result: return ( jsonify({ "message": "No record found for given record_uuid and source!" }), 404, ) db.session.delete(result) db.session.commit() return jsonify({"message": "Record succesfully deleted"}), 200
def query_parser(): try: query = request.values.get("q", "", type=str) result = parse_query(query) return jsonify(result) except Exception: abort(400)
def post(self): """Adds new experiment record""" data = self.load_data_from_request() record = ExperimentsRecord(data=data).create(data) db.session.commit() return (jsonify({"control_number": record["control_number"]}), 201)
def author_assign_view(args): to_author_recid = args.get("to_author_recid") from_author_recid = args["from_author_recid"] literature_recids = args.get("literature_recids", []) claimed_literature_recids = args.get("papers_ids_already_claimed") not_allowed_to_be_claimed_literature_recids = args.get( "papers_ids_not_matching_name" ) if not any( [ literature_recids, claimed_literature_recids, not_allowed_to_be_claimed_literature_recids, ] ): return ( jsonify( success=False, message="None of required fields was passed", ), 400, ) if claimed_literature_recids: from_author_record = AuthorsRecord.get_record_by_pid_value(from_author_recid) if from_author_record.get("stub"): literature_recids = literature_recids + claimed_literature_recids claimed_literature_recids = [] if claimed_literature_recids or not_allowed_to_be_claimed_literature_recids: create_rt_ticket_for_claiming_action.delay( from_author_recid, to_author_recid, claimed_literature_recids, not_allowed_to_be_claimed_literature_recids, ) return jsonify({"message": "Success", "created_rt_ticket": True}), 200 if to_author_recid is None: stub_author_id = assign_to_new_stub_author(from_author_recid, literature_recids) else: assign_to_author(from_author_recid, to_author_recid, literature_recids) db.session.commit() if to_author_recid is None: return jsonify({"stub_author_id": stub_author_id}), 200 return jsonify({"message": "Success"}), 200
def post(self): """Adds new Institution""" data = self.load_data_from_request() record = InstitutionsRecord(data=data).create(data) db.session.commit() return jsonify({"control_number": record["control_number"]}), 201
def get(self, pid_value): try: record = SeminarsRecord.get_record_by_pid_value(pid_value) except PIDDoesNotExistError: abort(404) serialized_record = seminar_serializer_v1.dump(record) return jsonify({"data": serialized_record})
def revert_to_revision(endpoint, pid_value): """Revert given record to given revision""" try: pid_type = PidStoreBase.get_pid_type_from_endpoint(endpoint) record = InspireRecord.get_record_by_pid_value(pid_value, pid_type, original_record=True) if not check_permissions_for_private_collection_read_write( record.get("_collections", [])): return jsonify(message="Unauthorized", code=403), 403 revision_id = request.json["revision_id"] record.revert(revision_id) db.session.commit() return jsonify(success=True) except Exception: raise EditorRevertToRevisionError
def post(self): """Adds new job record""" data = self.load_data_from_request() builder = self.get_builder_with_new_record(data) data = self.get_valid_record_data_from_builder(builder) record = JobsRecord.create(data) db.session.commit() self.create_ticket(record, "rt/new_job.html") return jsonify({"pid_value": record["control_number"]}), 201
def refextract_url(): """Run refextract on a URL.""" extracted_references = extract_references_from_url( request.json["url"], override_kbs_files={"journals": create_journal_dict()}, reference_format="{title},{volume},{page}", ) references = map_refextract_to_schema(extracted_references) match_result = match_references(references) return jsonify(match_result.get("matched_references"))
def put(self, pid_value): try: record = SeminarsRecord.get_record_by_pid_value(pid_value) if not can_user_edit_record(record): return ( jsonify({"message": "You are not allowed to edit this seminar"}), 403, ) except PIDDoesNotExistError: abort(404) data = self.load_data_from_request() updated_record_data = self.get_updated_record_data(data, record) record.update(updated_record_data) db.session.commit() if not is_superuser_or_cataloger_logged_in(): self.create_ticket(record, "rt/update_seminar.html") return jsonify({"pid_value": record["control_number"]})
def put(self, pid_value): try: record = AuthorsRecord.get_record_by_pid_value(pid_value) # check if we need to check the orcid in the acquisition source or the one in ids if not can_user_edit_author_record(record): return ( jsonify({"message": "You are not allowed to edit this author"}), 403, ) except PIDDoesNotExistError: abort(404) data = self.load_data_from_request() self.update_author_record(data, record) db.session.commit() if current_app.config.get("FEATURE_FLAG_ENABLE_WORKFLOW_ON_AUTHOR_UPDATE"): self.start_workflow_for_submission(record) return jsonify({"pid_value": record["control_number"]})
def query_parser(): if not current_app.config.get("FEATURE_FLAG_ENABLE_QUERY_PARSER_ENDPOINT", True): abort(404) try: query = request.values.get("q", "", type=str) result = parse_query(query) return jsonify(result) except Exception: abort(400)
def create_rt_ticket(endpoint, pid_value): """View to create an rt ticket""" json = request.json ticket_id = tickets.create_ticket( json["queue"], current_user.email, json.get("description"), json.get("subject"), pid_value, Owner=json.get("owner"), ) if ticket_id != -1: return jsonify( success=True, data={ "id": str(ticket_id), "link": tickets.get_rt_link_for_ticket(ticket_id), }, ) else: return jsonify(success=False), 500
def disambiguate(): data = request.get_json() request_data = DisambiguateSignaturesSchema().load(data) if request_data.errors: LOGGER.info("Validation error.", user=data, errors=request_data.errors) return ( jsonify({ "message": "Validation Error.", "errors": request_data.errors }), 400, ) clusters = request_data.data["clusters"] if not current_app.config["FEATURE_FLAG_ENABLE_DISAMBIGUATION"]: LOGGER.info("Disambiguation is disabled.") return jsonify({"message": "Disambiguation feature is disabled."}), 200 disambiguation_task = disambiguate_signatures.apply_async( kwargs={"clusters": clusters}, queue="disambiguation") LOGGER.info("Disambiguation task started.", task_uuid=disambiguation_task.id) return jsonify({"message": "Disambiguation started."}), 200
def import_article_view(identifier): try: article = import_article(identifier) return jsonify({"data": literature_v1.dump(article)}) except ExistingArticleError as e: message, recid = e.args return jsonify(message=str(message), recid=str(recid)), 409 except ImportArticleNotFoundError as e: LOGGER.exception("Exception in import_article_view", exception=e) return jsonify(message=str(e)), 404 except ImportParsingError as e: LOGGER.exception("Exception in import_article_view", exception=e) return jsonify(message="The article has an invalid format."), 500 except UnknownImportIdentifierError: return jsonify( message=f"{identifier} is not a recognized identifier."), 400 except RequestException as e: LOGGER.exception("Exception in import_article_view", exception=e) return ( jsonify( message= "There was an error when importing metadata. Please try again later or fill the form manually." ), 502, )