def work_get(work_id=''): """ Get a work swagger_from_file: api_doc/work_get.yml """ result = persistence.get_work(work_id) if result: thedata = json.loads(result.get('wtf_json')) resp = make_response(json.dumps(thedata, indent=4), 200) resp.headers['Content-Type'] = 'application/json' return resp else: resp = make_response( 'NOT FOUND: work resource \'%s\' not found!' % work_id, 404) resp.headers['Content-Type'] = 'text/plain' return resp
def work_put(work_id=''): """ Update an existing work swagger_from_file: api_doc/work_put.yml """ if request.headers.get('Content-Type') == 'application/json': if is_token_valid(request.headers.get('Authorization')): result = persistence.get_work(work_id) if result: original_work = json.loads(result.get('wtf_json')) addition_work = '' try: addition_work = json.loads(request.data.decode("utf-8")) # print(addition_work) except: pass if addition_work: if addition_work.get('id') and addition_work.get( 'id') != original_work.get('id'): return make_response( 'Conflict: The ID of the additional data already exists as "same_as"! Please check your data!', 409) else: # init merger "work" with open( 'conf/works_merger.schema.json') as data_file: schema_works_merger = json.load(data_file) merger = Merger(schema_works_merger) # merge it! merged_work = merger.merge(original_work, addition_work) # print(json.dumps(merged_work, indent=4)) # load it! form = display_vocabularies.PUBTYPE2FORM.get( merged_work.get('pubtype')).from_json(merged_work) form.changed.data = timestamp() # store it rel = str2bool(request.args.get('rel', 'true')) new_id, message = persistence.record2solr( form, action='update', relitems=rel) response_json = { "message": message, "work": merged_work } return make_response( json.dumps(response_json, indent=4), 200) else: # load it! form = display_vocabularies.PUBTYPE2FORM.get( original_work.get('pubtype')).from_json(original_work) form.changed.data = timestamp() # store it rel = str2bool(request.args.get('rel', 'true')) new_id, message = persistence.record2solr(form, action='update', relitems=rel) response_json = {"message": message, "work": original_work} return make_response(json.dumps(response_json, indent=4), 200) else: return make_response( 'work resource \'%s\' not found!' % work_id, 404) else: return make_response('Unauthorized', 401) else: return make_response('Bad request: invalid accept header!', 400)
def work_post(): """ Create a new work swagger_from_file: api_doc/work_post.yml """ if request.headers.get('Content-Type') == 'application/json': if request.headers.get('Authorization'): if is_token_valid(request.headers.get('Authorization')): thedata = request.data.decode("utf-8") result = persistence.get_work(json.loads(thedata).get('id')) rel = str2bool(request.args.get('rel', 'true')) if result: # TODO if force=true and existing id not equals posted id: add data force = str2bool(request.args.get('force', 'false')) rewrite = str2bool(request.args.get('rewrite', 'false')) if (force and json.loads(thedata).get('id') != json.loads( result.get('wtf_json')).get('id')) or rewrite: form = display_vocabularies.PUBTYPE2FORM.get( json.loads(thedata).get('pubtype')).from_json( json.loads(thedata)) form.created.data = timestamp() form.changed.data = timestamp() new_id, message = persistence.record2solr( form, action='create', relitems=rel) message.append('record forced: %s' % json.loads(thedata).get('id')) result = persistence.get_work(new_id) if result: response_json = { "message": message, "work": json.loads(result.get('wtf_json')) } return make_response( json.dumps(response_json, indent=4), 201) else: response_json = { "message": "failed! record not indexed!", "work": json.loads(thedata) } return make_response( json.dumps(response_json, indent=4), 500) else: return make_response( 'Bad request: work already exist!', 400) else: form = display_vocabularies.PUBTYPE2FORM.get( json.loads(thedata).get('pubtype')).from_json( json.loads(thedata)) form.created.data = timestamp() form.changed.data = timestamp() new_id, message = persistence.record2solr(form, action='create', relitems=rel) result = persistence.get_work(new_id) if result: response_json = { "message": message, "work": json.loads(result.get('wtf_json')) } return make_response( json.dumps(response_json, indent=4), 201) else: response_json = { "message": "failed! record not indexed!", "work": json.loads(thedata) } return make_response( json.dumps(response_json, indent=4), 500) else: return make_response('Unauthorized', 401) else: return make_response('Unauthorized', 401) else: return make_response('Bad request: invalid accept header!', 400)
def export_openapc(year=''): ''' Getting a bibliography swagger_from_file: api_doc/export_openapc.yml ''' if theme(request.access_route) == 'dortmund': affiliation = 'tudo' affiliation_str = 'TU Dortmund' elif theme(request.access_route) == 'bochum': affiliation = 'rubi' affiliation_str = 'Ruhr-Universität Bochum' else: affiliation = '' affiliation_str = '' if affiliation: csv = '"institution";"period";"euro";"doi";"is_hybrid";"publisher";"journal_full_title";"issn";"url";"local_id"\n' oa_solr = Solr(host=secrets.SOLR_HOST, port=secrets.SOLR_PORT, application=secrets.SOLR_APP, core='hb2', handler='query', query='oa_funds:true', facet='false', rows=100000, fquery=['%s:true' % affiliation, 'fdate:%s' % year]) oa_solr.request() results = oa_solr.results if len(results) > 0: for record in results: thedata = json.loads(record.get('wtf_json')) doi = record.get('doi')[0] is_hybrid = False if record.get('is_hybrid'): is_hybrid = record.get('is_hybrid') publisher = '' journal_title = '' issn = '' url = '' if not doi: journal_title = '' if record.get('is_part_of_id'): if record.get('is_part_of_id')[0]: host = persistence.get_work( record.get('is_part_of_id')[0]) if host: record = json.loads(host.get('wtf_json')) # print(json.dumps(record, indent=4)) journal_title = record.get('title') if record.get('fsubseries'): journal_title = record.get('fsubseries') publisher = '' if record.get('publisher'): publisher = record.get('publisher') issn = '' if record.get('ISSN'): for entry in record.get('ISSN'): if entry: issn = entry break url = '' if thedata.get('uri'): for uri in thedata.get('uri'): url = uri break csv += '"%s";%s;%s;"%s";"%s";"%s";"%s";"%s";"%s";"%s"\n' % ( affiliation_str, year, 0.00, doi, is_hybrid, publisher, journal_title, issn, url, record.get('id')) resp = make_response(csv, 200) resp.headers['Content-Type'] = 'text/csv; charset=utf-8' return resp else: return make_response('No results', 404) else: return make_response( 'No affiliation parameter set. Please contact the administrator!', 400)
def export_oa_report(year=''): ''' Getting a bibliography swagger_from_file: api_doc/export_oa_report.yml ''' pubtype = request.args.get('pubtype', 'ArticleJournal') if theme(request.access_route) == 'dortmund': affiliation = 'tudo' affiliation_str = 'TU Dortmund' elif theme(request.access_route) == 'bochum': affiliation = 'rubi' affiliation_str = 'Ruhr-Universität Bochum' else: affiliation = '' affiliation_str = '' if affiliation: csv = '"AU";"TI";"SO";"DT";"RP";"EM";"OI";"PU";"ISSN";"E-ISSN";"DOI";"OA";"RP TUDO";"Fak"\n' # TODO search for all publications of the given year oa_solr = Solr(host=secrets.SOLR_HOST, port=secrets.SOLR_PORT, application=secrets.SOLR_APP, core='hb2', handler='query', query='*:*', facet='false', rows=100000, fquery=[ '%s:true' % affiliation, 'fdate:%s' % year, 'pubtype:%s' % pubtype ]) oa_solr.request() results = oa_solr.results if results: for record in results: thedata = json.loads(record.get('wtf_json')) author = '' corresponding_author = '' corresponding_affiliation = '' faks = '' for person in thedata.get('person'): if 'aut' in person.get('role'): author += person.get('name') + ';' if person.get('corresponding_author'): corresponding_author = person.get('name') if person.get('tudo'): corresponding_affiliation = True if person.get('gnd'): tudo = persistence.get_person( person.get('gnd')) # print(person.get('gnd')) if tudo: if tudo.get('affiliation_id'): faks = '' for entry in tudo.get( 'affiliation_id'): affil = persistence.get_orga( entry) fak = '' if affil: has_parent = False fak = affil.get( 'pref_label') if affil.get('parent_id'): has_parent = True fak = '%s / %s' % ( affil.get( 'parent_label' ), affil.get( 'pref_label')) while has_parent: affil = persistence.get_orga( affil.get( 'parent_id')) if affil.get( 'parent_id'): has_parent = True fak = '%s / %s' % ( affil.get( 'parent_label' ), affil.get( 'pref_label' )) else: has_parent = False else: fak = 'LinkError: Person %s' % person.get( 'gnd') faks += fak + ';' faks = faks[:-1] author = author[:-1] publisher = '' journal_title = '' issn = '' journal_title = '' if record.get('is_part_of_id'): if record.get('is_part_of_id')[0]: host = persistence.get_work( record.get('is_part_of_id')[0]) if host: record = json.loads(host.get('wtf_json')) # print(json.dumps(record, indent=4)) journal_title = record.get('title') if record.get('fsubseries'): journal_title = record.get('fsubseries') publisher = '' if record.get('publisher'): publisher = record.get('publisher') issn = '' if record.get('ISSN'): for entry in record.get('ISSN'): if entry: issn = entry break csv += '"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s"\n' % ( author, thedata.get('title'), journal_title, 'article', corresponding_author, '', '', publisher, issn, '', thedata.get('DOI')[0], thedata.get('oa_funded'), corresponding_affiliation, faks, ) resp = make_response(csv, 200) resp.headers['Content-Type'] = 'text/csv; charset=utf-8' return resp else: return make_response( 'No affiliation parameter set. Please contact the administrator!', 400)