def process_single(tarball, sdir=CFG_TMPDIR, xtract_text=False, \ upload_plots=True, force=False, squash=False): ''' Processes one tarball end-to-end. @param: tarball (string): the absolute location of the tarball we wish to process @param: sdir (string): where we should put all the intermediate files for the processing. if you're uploading, this directory should be one of the ones specified in CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS, else the upload won't work @param: xtract_text (boolean): true iff you want to run pdftotext on the pdf versions of the tarfiles. this programme assumes that the pdfs are named the same as the tarballs but with a .pdf extension. @param: upload_plots (boolean): true iff you want to bibupload the plots extracted by this process @return: None ''' sub_dir, refno = get_defaults(tarball, sdir) if not squash: marc_name = os.path.join(sub_dir, refno + '.xml') else: marc_name = os.path.join(sdir, SQUASHED_FILE) if (force or not os.path.exists(marc_name)) and not squash: open(marc_name, 'w').close() if xtract_text: extract_text(tarball) image_list, tex_files = untar(tarball, sub_dir) if tex_files == [] or tex_files == None: write_message(os.path.split(tarball)[-1] + ' is not a tarball') run_shell_command('rm -r ' + sub_dir) return converted_image_list = convert_images(image_list) images_and_captions_and_labels = [['','', []]] for tex_file in tex_files: images_and_captions_and_labels.extend(extract_captions(tex_file, sub_dir, converted_image_list)) marc_name = create_MARC(images_and_captions_and_labels, tex_files[0], refno, converted_image_list, marc_name) if marc_name != None and not squash: write_message('generated ' + marc_name) if upload_plots: upload_to_site(marc_name) clean_up(image_list) write_message('work complete on ' + os.path.split(tarball)[-1])
def process_single(tarball, sdir=CFG_TMPDIR, xtract_text=False, \ upload_plots=False, force=False, squash="", \ yes_i_know=False, refno_url="", \ clean=False): """ Processes one tarball end-to-end. @param: tarball (string): the absolute location of the tarball we wish to process @param: sdir (string): where we should put all the intermediate files for the processing. if you're uploading, this directory should be one of the ones specified in CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS, else the upload won't work @param: xtract_text (boolean): true iff you want to run pdftotext on the pdf versions of the tarfiles. this programme assumes that the pdfs are named the same as the tarballs but with a .pdf extension. @param: upload_plots (boolean): true iff you want to bibupload the plots extracted by this process @param: force (boolean): force creation of new xml file @param: squash: write MARCXML output into a specified 'squash' file instead of single files. @param: yes_i_know: if True, no user interaction if upload_plots is True @param: refno_url: URL to the invenio-instance to query for refno. @param: clean: if True, everything except the original tarball, plots and context- files will be removed @return: marc_name(string): path to generated marcxml file """ sub_dir, refno = get_defaults(tarball, sdir, refno_url) if not squash: marc_name = os.path.join(sub_dir, '%s.xml' % (refno,)) if (force or not os.path.exists(marc_name)): marc_fd = open(marc_name, 'w') marc_fd.write('<?xml version="1.0" encoding="UTF-8"?>\n<collection>\n') marc_fd.close() else: marc_name = squash if xtract_text: extract_text(tarball) try: extracted_files_list, image_list, tex_files = untar(tarball, sub_dir) except Timeout: write_message('Timeout during tarball extraction on %s' % (tarball,)) return if tex_files == [] or tex_files == None: write_message('%s is not a tarball' % (os.path.split(tarball)[-1],)) run_shell_command('rm -r %s', (sub_dir,)) return converted_image_list = convert_images(image_list) write_message('converted %d of %d images found for %s' % (len(converted_image_list), \ len(image_list), \ os.path.basename(tarball))) extracted_image_data = [] for tex_file in tex_files: # Extract images, captions and labels partly_extracted_image_data = extract_captions(tex_file, sub_dir, \ converted_image_list) if partly_extracted_image_data != []: # Add proper filepaths and do various cleaning cleaned_image_data = prepare_image_data(partly_extracted_image_data, \ tex_file, converted_image_list) # Using prev. extracted info, get contexts for each image found extracted_image_data.extend((extract_context(tex_file, cleaned_image_data))) extracted_image_data = remove_dups(extracted_image_data) if extracted_image_data == []: write_message('No plots detected in %s' % (refno,)) else: if refno_url == "": refno = None create_contextfiles(extracted_image_data) marc_xml = create_MARC(extracted_image_data, tarball, refno) if not squash: marc_xml += "\n</collection>" if marc_name != None: marc_fd = open(marc_name, 'a') marc_fd.write('%s\n' % (marc_xml,)) marc_fd.close() if not squash: write_message('generated %s' % (marc_name,)) if upload_plots: upload_to_site(marc_name, yes_i_know) if clean: clean_up(extracted_files_list, image_list) write_message('work complete on %s' % (os.path.split(tarball)[-1],)) return marc_name
def apply_filter(rec): """ Filters the record to be compatible within Inspire Parameters: * rec - dictionary: BibRecord structure Returns: dictionary, BibRecord structure """ # Move recid from 001 to 035 if not hidden cds_id = rec['001'][0][3] if not 'hidden' in [ x.lower() for x in record_get_field_values(rec, "980", code="a") ]: record_add_field(rec, '035', subfields=[('9', 'CDS'), ('a', cds_id)]) # Clear control fields record_strip_controlfields(rec) # Clear other uninteresting fields interesting_fields = [ "024", "041", "035", "037", "088", "100", "110", "111", "242", "245", "246", "260", "269", "300", "502", "650", "653", "693", "700", "710", "773", "856", "520", "500", "980" ] for tag in rec.keys(): if tag not in interesting_fields: record_delete_fields(rec, tag) # 980 Determine Collections collections = set([]) for value in record_get_field_values(rec, '980', code='a'): if 'NOTE' in value.upper(): collections.add('NOTE') if 'THESIS' in value.upper(): collections.add('THESIS') if 'CONFERENCEPAPER' in value.upper(): collections.add('ConferencePaper') if is_published(rec): collections.add("PUBLISHED") collections.add("CITEABLE") if not 'NOTE' in collections: # TODO: Move this to a KB kb = [ 'ATLAS-CONF-', 'CMS-PAS-', 'ATL-', 'CMS-DP-', 'ALICE-INT-', 'LHCb-PUB-' ] values = record_get_field_values(rec, "088", code='a') for val, rep in product(values, kb): if val.startswith(rep): collections.add('NOTE') break # 980 Arxiv tag if record_get_field_values(rec, '035', filter_subfield_code="a", filter_subfield_value="arXiv"): collections.add("arXiv") # 980 HEP && CORE collections.add('HEP') collections.add('CORE') # 980 Conference Note if not 'ConferencePaper' in collections: for value in record_get_field_values(rec, '962', code='n'): if value[-2:].isdigit(): collections.add('ConferencePaper') break record_delete_fields(rec, "980") intnote = record_get_field_values(rec, '690', filter_subfield_code="a", filter_subfield_value='INTNOTE') if intnote: val_088 = record_get_field_values(rec, '088', filter_subfield_code="a") for val in val_088: if 'CMS' in val: url = ('http://weblib.cern.ch/abstract?CERN-CMS' + val.split('CMS', 1)[-1]) record_add_field(rec, '856', ind1='4', subfields=[('u', url)]) # 041 Language languages = get_languages() language_fields = record_get_field_instances(rec, '041') record_delete_fields(rec, "041") for field in language_fields: subs = field_get_subfields(field) if 'a' in subs: if "eng" in subs['a']: continue new_value = translate_config(subs['a'][0], languages) new_subs = [('a', new_value)] record_add_field(rec, "041", subfields=new_subs) # 035 Externals scn_035_fields = record_get_field_instances(rec, '035') forbidden_values = [ "cercer", "inspire", "xx", "cern annual report", "cmscms", "wai01" ] for field in scn_035_fields: subs = field_get_subfields(field) if '9' in subs: if not 'a' in subs: continue for sub in subs['9']: if sub.lower() in forbidden_values: break else: # No forbidden values (We did not "break") suffixes = [s.lower() for s in subs['9']] if 'spires' in suffixes: new_subs = [('a', 'SPIRES-%s' % subs['a'][0])] record_add_field(rec, '970', subfields=new_subs) continue if 'a' in subs: for sub in subs['a']: if sub.lower() in forbidden_values: record_delete_field(rec, tag="035", field_position_global=field[4]) rep_088_fields = record_get_field_instances(rec, '088') for field in rep_088_fields: subs = field_get_subfields(field) if '9' in subs: for val in subs['9']: if val.startswith('P0') or val.startswith('CM-P0'): sf = [('9', 'CERN'), ('b', val)] record_add_field(rec, '595', subfields=sf) for key, val in field[0]: if key in ['a', '9'] and not val.startswith('SIS-'): record_add_field(rec, '037', subfields=[('a', val)]) record_delete_fields(rec, "088") # 037 Externals also... rep_037_fields = record_get_field_instances(rec, '037') for field in rep_037_fields: subs = field_get_subfields(field) if 'a' in subs: for value in subs['a']: if 'arXiv' in value: new_subs = [('a', value), ('9', 'arXiv')] for fld in record_get_field_instances(rec, '695'): for key, val in field_get_subfield_instances(fld): if key == 'a': new_subs.append(('c', val)) break nf = create_field(subfields=new_subs) record_replace_field(rec, '037', nf, field[4]) for key, val in field[0]: if key in ['a', '9'] and val.startswith('SIS-'): record_delete_field(rec, '037', field_position_global=field[4]) for field in record_get_field_instances(rec, '242'): record_add_field(rec, '246', subfields=field[0]) record_delete_fields(rec, '242') # 269 Date normalization for field in record_get_field_instances(rec, '269'): for idx, (key, value) in enumerate(field[0]): if key == "c": field[0][idx] = ("c", convert_date_to_iso(value)) record_delete_fields(rec, "260") if not 'THESIS' in collections: for field in record_get_field_instances(rec, '260'): record_add_field(rec, '269', subfields=field[0]) record_delete_fields(rec, '260') # 300 page number for field in record_get_field_instances(rec, '300'): for idx, (key, value) in enumerate(field[0]): if key == 'a': if "mult." not in value and value != " p": field[0][idx] = ('a', re.sub(r'[^\d-]+', '', value)) else: record_delete_field(rec, '300', field_position_global=field[4]) break # 100 & 700 punctuate author names author_names = record_get_field_instances(rec, '100') author_names.extend(record_get_field_instances(rec, '700')) for field in author_names: subs = field_get_subfields(field) if not 'i' in subs or 'XX' in subs['i']: if not 'j' in subs or 'YY' in subs['j']: for idx, (key, value) in enumerate(field[0]): if key == 'a': field[0][idx] = ('a', punctuate_authorname(value)) # 700 -> 701 Thesis supervisors if 'THESIS' in collections: for field in record_get_field_instances(rec, '700'): record_add_field(rec, '701', subfields=field[0]) record_delete_fields(rec, '700') # 501 move subfields fields_501 = record_get_field_instances(rec, '502') for idx, field in enumerate(fields_501): new_subs = [] for key, value in field[0]: if key == 'a': new_subs.append(('b', value)) elif key == 'b': new_subs.append(('c', value)) elif key == 'c': new_subs.append(('d', value)) else: new_subs.append((key, value)) fields_501[idx] = field_swap_subfields(field, new_subs) # 650 Translate Categories categories = get_categories() category_fields = record_get_field_instances(rec, '650', ind1='1', ind2='7') record_delete_fields(rec, "650") for field in category_fields: for idx, (key, value) in enumerate(field[0]): if key == 'a': new_value = translate_config(value, categories) if new_value != value: new_subs = [('2', 'INSPIRE'), ('a', new_value)] else: new_subs = [('2', 'SzGeCERN'), ('a', value)] record_add_field(rec, "650", ind1="1", ind2="7", subfields=new_subs) break # 653 Free Keywords for field in record_get_field_instances(rec, '653', ind1='1'): subs = field_get_subfields(field) new_subs = [] if 'a' in subs: for val in subs['a']: new_subs.extend([('9', 'author'), ('a', val)]) new_field = create_field(subfields=new_subs, ind1='1') record_replace_field(rec, '653', new_field, field_position_global=field[4]) experiments = get_experiments() # 693 Remove if 'not applicable' for field in record_get_field_instances(rec, '693'): subs = field_get_subfields(field) all_subs = subs.get('a', []) + subs.get('e', []) if 'not applicable' in [x.lower() for x in all_subs]: record_delete_field(rec, '693', field_position_global=field[4]) new_subs = [] experiment_a = "" experiment_e = "" for (key, value) in subs.iteritems(): if key == 'a': experiment_a = value[0] new_subs.append((key, value[0])) elif key == 'e': experiment_e = value[0] experiment = "%s---%s" % (experiment_a.replace(" ", "-"), experiment_e) translated_experiments = translate_config(experiment, experiments) new_subs.append(("e", translated_experiments)) record_delete_field(rec, tag="693", field_position_global=field[4]) record_add_field(rec, "693", subfields=new_subs) # 710 Collaboration for field in record_get_field_instances(rec, '710'): subs = field_get_subfield_instances(field) for idx, (key, value) in enumerate(subs[:]): if key == '5': subs.pop(idx) elif value.startswith('CERN. Geneva'): subs.pop(idx) if len(subs) == 0: record_delete_field(rec, '710', field_position_global=field[4]) # 773 journal translations journals = get_journals() for field in record_get_field_instances(rec, '773'): subs = field_get_subfield_instances(field) new_subs = [] for idx, (key, value) in enumerate(subs): if key == 'p': new_subs.append((key, translate_config(value, journals))) else: new_subs.append((key, value)) record_delete_field(rec, tag="773", field_position_global=field[4]) record_add_field(rec, "773", subfields=new_subs) # FFT (856) Dealing with graphs figure_counter = 0 for field in record_get_field_instances(rec, '856', ind1='4'): subs = field_get_subfields(field) newsubs = [] remove = False if 'z' in subs: is_figure = [s for s in subs['z'] if "figure" in s.lower()] if is_figure and 'u' in subs: is_subformat = [ s for s in subs['u'] if "subformat" in s.lower() ] if not is_subformat: url = subs['u'][0] if url.endswith(".pdf"): # We try to convert fd, local_url = mkstemp(suffix=os.path.basename(url), dir=CFG_TMPSHAREDDIR) os.close(fd) _print("Downloading %s into %s" % (url, local_url), verbose=5) plotfile = "" try: plotfile = download_url(url=url, download_to_file=local_url, timeout=30.0) except InvenioFileDownloadError: _print( "Download failed while attempting to reach %s. Skipping.." % (url, )) remove = True if plotfile: converted = convert_images([plotfile]) if converted: url = converted.pop() _print("Successfully converted %s to %s" % (local_url, url), verbose=5) else: _print("Conversion failed on %s" % (local_url, )) url = None remove = True if url: newsubs.append(('a', url)) newsubs.append(('t', 'Plot')) figure_counter += 1 if 'y' in subs: newsubs.append( ('d', "%05d %s" % (figure_counter, subs['y'][0]))) newsubs.append(('n', subs['y'][0])) else: # Get basename without extension. name = os.path.basename( os.path.splitext(subs['u'][0])[0]) newsubs.append( ('d', "%05d %s" % (figure_counter, name))) newsubs.append(('n', name)) if not newsubs and 'u' in subs: is_fulltext = [s for s in subs['u'] if ".pdf" in s] if is_fulltext: newsubs = [('t', 'INSPIRE-PUBLIC'), ('a', subs['u'][0])] if not newsubs and 'u' in subs: remove = True is_zipfile = [s for s in subs['u'] if ".zip" in s] if is_zipfile: url = is_zipfile[0] local_url = os.path.join(CFG_TMPSHAREDDIR, os.path.basename(url)) _print("Downloading %s into %s" % (url, local_url), verbose=5) zipped_archive = "" try: zipped_archive = download_url(url=is_zipfile[0], download_to_file=local_url, timeout=30.0) except InvenioFileDownloadError: _print( "Download failed while attempting to reach %s. Skipping.." % (is_zipfile[0], )) remove = True if zipped_archive: unzipped_archive = unzip(zipped_archive) list_of_pngs = locate("*.png", unzipped_archive) for png in list_of_pngs: if "_vti_" in png or "__MACOSX" in png: continue figure_counter += 1 plotsubs = [] plotsubs.append(('a', png)) caption = '%05d %s' % (figure_counter, os.path.basename(png)) plotsubs.append(('d', caption)) plotsubs.append(('t', 'Plot')) record_add_field(rec, 'FFT', subfields=plotsubs) if not remove and not newsubs and 'u' in subs: urls = ('http://cdsweb.cern.ch', 'http://cms.cern.ch', 'http://cmsdoc.cern.ch', 'http://documents.cern.ch', 'http://preprints.cern.ch', 'http://cds.cern.ch') for val in subs['u']: if any(url in val for url in urls): remove = True break if val.endswith('ps.gz'): remove = True if newsubs: record_add_field(rec, 'FFT', subfields=newsubs) remove = True if remove: record_delete_field(rec, '856', ind1='4', field_position_global=field[4]) # 500 - Preliminary results if "THESIS" not in collections: subs = [('a', "Preliminary results")] record_add_field(rec, "500", subfields=subs) for collection in collections: record_add_field(rec, '980', subfields=[('a', collection)]) return rec
def apply_filter(rec): """ Filters the record to be compatible within Inspire Parameters: * rec - dictionary: BibRecord structure Returns: dictionary, BibRecord structure """ # Move recid from 001 to 035 if not hidden cds_id = rec['001'][0][3] if not 'hidden' in [x.lower() for x in record_get_field_values(rec, "980", code="a")]: record_add_field(rec, '035', subfields=[('9', 'CDS'), ('a', cds_id)]) # Clear control fields record_strip_controlfields(rec) # Clear other uninteresting fields interesting_fields = ["024", "041", "035", "037", "088", "100", "110", "111", "242", "245", "246", "260", "269", "300", "502", "650", "653", "693", "700", "710", "773", "856", "520", "500", "980"] for tag in rec.keys(): if tag not in interesting_fields: record_delete_fields(rec, tag) # 980 Determine Collections collections = set([]) for value in record_get_field_values(rec, '980', code='a'): if 'NOTE' in value.upper(): collections.add('NOTE') if 'THESIS' in value.upper(): collections.add('THESIS') if 'CONFERENCEPAPER' in value.upper(): collections.add('ConferencePaper') if is_published(rec): collections.add("PUBLISHED") collections.add("CITEABLE") if not 'NOTE' in collections: # TODO: Move this to a KB kb = ['ATLAS-CONF-', 'CMS-PAS-', 'ATL-', 'CMS-DP-', 'ALICE-INT-', 'LHCb-PUB-'] values = record_get_field_values(rec, "088", code='a') for val, rep in product(values, kb): if val.startswith(rep): collections.add('NOTE') break # 980 Arxiv tag if record_get_field_values(rec, '035', filter_subfield_code="a", filter_subfield_value="arXiv"): collections.add("arXiv") # 980 HEP && CORE collections.add('HEP') collections.add('CORE') # 980 Conference Note if not 'ConferencePaper' in collections: for value in record_get_field_values(rec, '962', code='n'): if value[-2:].isdigit(): collections.add('ConferencePaper') break record_delete_fields(rec, "980") intnote = record_get_field_values(rec, '690', filter_subfield_code="a", filter_subfield_value='INTNOTE') if intnote: val_088 = record_get_field_values(rec, '088', filter_subfield_code="a") for val in val_088: if 'CMS' in val: url = ('http://weblib.cern.ch/abstract?CERN-CMS' + val.split('CMS', 1)[-1]) record_add_field(rec, '856', ind1='4', subfields=[('u', url)]) # 041 Language languages = get_languages() language_fields = record_get_field_instances(rec, '041') record_delete_fields(rec, "041") for field in language_fields: subs = field_get_subfields(field) if 'a' in subs: if "eng" in subs['a']: continue new_value = translate_config(subs['a'][0], languages) new_subs = [('a', new_value)] record_add_field(rec, "041", subfields=new_subs) # 035 Externals scn_035_fields = record_get_field_instances(rec, '035') forbidden_values = ["cercer", "inspire", "xx", "cern annual report", "cmscms", "wai01"] for field in scn_035_fields: subs = field_get_subfields(field) if '9' in subs: if not 'a' in subs: continue for sub in subs['9']: if sub.lower() in forbidden_values: break else: # No forbidden values (We did not "break") suffixes = [s.lower() for s in subs['9']] if 'spires' in suffixes: new_subs = [('a', 'SPIRES-%s' % subs['a'][0])] record_add_field(rec, '970', subfields=new_subs) continue if 'a' in subs: for sub in subs['a']: if sub.lower() in forbidden_values: record_delete_field(rec, tag="035", field_position_global=field[4]) rep_088_fields = record_get_field_instances(rec, '088') for field in rep_088_fields: subs = field_get_subfields(field) if '9' in subs: for val in subs['9']: if val.startswith('P0') or val.startswith('CM-P0'): sf = [('9', 'CERN'), ('b', val)] record_add_field(rec, '595', subfields=sf) for key, val in field[0]: if key in ['a', '9'] and not val.startswith('SIS-'): record_add_field(rec, '037', subfields=[('a', val)]) record_delete_fields(rec, "088") # 037 Externals also... rep_037_fields = record_get_field_instances(rec, '037') for field in rep_037_fields: subs = field_get_subfields(field) if 'a' in subs: for value in subs['a']: if 'arXiv' in value: new_subs = [('a', value), ('9', 'arXiv')] for fld in record_get_field_instances(rec, '695'): for key, val in field_get_subfield_instances(fld): if key == 'a': new_subs.append(('c', val)) break nf = create_field(subfields=new_subs) record_replace_field(rec, '037', nf, field[4]) for key, val in field[0]: if key in ['a', '9'] and val.startswith('SIS-'): record_delete_field(rec, '037', field_position_global=field[4]) for field in record_get_field_instances(rec, '242'): record_add_field(rec, '246', subfields=field[0]) record_delete_fields(rec, '242') # 269 Date normalization for field in record_get_field_instances(rec, '269'): for idx, (key, value) in enumerate(field[0]): if key == "c": field[0][idx] = ("c", convert_date_to_iso(value)) record_delete_fields(rec, "260") if not 'THESIS' in collections: for field in record_get_field_instances(rec, '260'): record_add_field(rec, '269', subfields=field[0]) record_delete_fields(rec, '260') # 300 page number for field in record_get_field_instances(rec, '300'): for idx, (key, value) in enumerate(field[0]): if key == 'a': if "mult." not in value and value != " p": field[0][idx] = ('a', re.sub(r'[^\d-]+', '', value)) else: record_delete_field(rec, '300', field_position_global=field[4]) break # 100 & 700 punctuate author names author_names = record_get_field_instances(rec, '100') author_names.extend(record_get_field_instances(rec, '700')) for field in author_names: subs = field_get_subfields(field) if not 'i' in subs or 'XX' in subs['i']: if not 'j' in subs or 'YY' in subs['j']: for idx, (key, value) in enumerate(field[0]): if key == 'a': field[0][idx] = ('a', punctuate_authorname(value)) # 700 -> 701 Thesis supervisors if 'THESIS' in collections: for field in record_get_field_instances(rec, '700'): record_add_field(rec, '701', subfields=field[0]) record_delete_fields(rec, '700') # 501 move subfields fields_501 = record_get_field_instances(rec, '502') for idx, field in enumerate(fields_501): new_subs = [] for key, value in field[0]: if key == 'a': new_subs.append(('b', value)) elif key == 'b': new_subs.append(('c', value)) elif key == 'c': new_subs.append(('d', value)) else: new_subs.append((key, value)) fields_501[idx] = field_swap_subfields(field, new_subs) # 650 Translate Categories categories = get_categories() category_fields = record_get_field_instances(rec, '650', ind1='1', ind2='7') record_delete_fields(rec, "650") for field in category_fields: for idx, (key, value) in enumerate(field[0]): if key == 'a': new_value = translate_config(value, categories) if new_value != value: new_subs = [('2', 'INSPIRE'), ('a', new_value)] else: new_subs = [('2', 'SzGeCERN'), ('a', value)] record_add_field(rec, "650", ind1="1", ind2="7", subfields=new_subs) break # 653 Free Keywords for field in record_get_field_instances(rec, '653', ind1='1'): subs = field_get_subfields(field) new_subs = [] if 'a' in subs: for val in subs['a']: new_subs.extend([('9', 'author'), ('a', val)]) new_field = create_field(subfields=new_subs, ind1='1') record_replace_field(rec, '653', new_field, field_position_global=field[4]) experiments = get_experiments() # 693 Remove if 'not applicable' for field in record_get_field_instances(rec, '693'): subs = field_get_subfields(field) all_subs = subs.get('a', []) + subs.get('e', []) if 'not applicable' in [x.lower() for x in all_subs]: record_delete_field(rec, '693', field_position_global=field[4]) new_subs = [] experiment_a = "" experiment_e = "" for (key, value) in subs.iteritems(): if key == 'a': experiment_a = value[0] new_subs.append((key, value[0])) elif key == 'e': experiment_e = value[0] experiment = "%s---%s" % (experiment_a.replace(" ", "-"), experiment_e) translated_experiments = translate_config(experiment, experiments) new_subs.append(("e", translated_experiments)) record_delete_field(rec, tag="693", field_position_global=field[4]) record_add_field(rec, "693", subfields=new_subs) # 710 Collaboration for field in record_get_field_instances(rec, '710'): subs = field_get_subfield_instances(field) for idx, (key, value) in enumerate(subs[:]): if key == '5': subs.pop(idx) elif value.startswith('CERN. Geneva'): subs.pop(idx) if len(subs) == 0: record_delete_field(rec, '710', field_position_global=field[4]) # 773 journal translations journals = get_journals() for field in record_get_field_instances(rec, '773'): subs = field_get_subfield_instances(field) new_subs = [] for idx, (key, value) in enumerate(subs): if key == 'p': new_subs.append((key, translate_config(value, journals))) else: new_subs.append((key, value)) record_delete_field(rec, tag="773", field_position_global=field[4]) record_add_field(rec, "773", subfields=new_subs) # FFT (856) Dealing with graphs figure_counter = 0 for field in record_get_field_instances(rec, '856', ind1='4'): subs = field_get_subfields(field) newsubs = [] remove = False if 'z' in subs: is_figure = [s for s in subs['z'] if "figure" in s.lower()] if is_figure and 'u' in subs: is_subformat = [s for s in subs['u'] if "subformat" in s.lower()] if not is_subformat: url = subs['u'][0] if url.endswith(".pdf"): # We try to convert fd, local_url = mkstemp(suffix=os.path.basename(url), dir=CFG_TMPSHAREDDIR) os.close(fd) _print("Downloading %s into %s" % (url, local_url), verbose=5) plotfile = "" try: plotfile = download_url(url=url, download_to_file=local_url, timeout=30.0) except InvenioFileDownloadError: _print("Download failed while attempting to reach %s. Skipping.." % (url,)) remove = True if plotfile: converted = convert_images([plotfile]) if converted: url = converted.pop() _print("Successfully converted %s to %s" % (local_url, url), verbose=5) else: _print("Conversion failed on %s" % (local_url,)) url = None remove = True if url: newsubs.append(('a', url)) newsubs.append(('t', 'Plot')) figure_counter += 1 if 'y' in subs: newsubs.append(('d', "%05d %s" % (figure_counter, subs['y'][0]))) newsubs.append(('n', subs['y'][0])) else: # Get basename without extension. name = os.path.basename(os.path.splitext(subs['u'][0])[0]) newsubs.append(('d', "%05d %s" % (figure_counter, name))) newsubs.append(('n', name)) if not newsubs and 'u' in subs: is_fulltext = [s for s in subs['u'] if ".pdf" in s and not "subformat=pdfa" in s] if is_fulltext: newsubs = [('t', 'INSPIRE-PUBLIC'), ('a', subs['u'][0])] if not newsubs and 'u' in subs: remove = True is_zipfile = [s for s in subs['u'] if ".zip" in s] if is_zipfile: url = is_zipfile[0] local_url = os.path.join(CFG_TMPSHAREDDIR, os.path.basename(url)) _print("Downloading %s into %s" % (url, local_url), verbose=5) zipped_archive = "" try: zipped_archive = download_url(url=is_zipfile[0], download_to_file=local_url, timeout=30.0) except InvenioFileDownloadError: _print("Download failed while attempting to reach %s. Skipping.." % (is_zipfile[0],)) remove = True if zipped_archive: unzipped_archive = unzip(zipped_archive) list_of_pngs = locate("*.png", unzipped_archive) for png in list_of_pngs: if "_vti_" in png or "__MACOSX" in png: continue figure_counter += 1 plotsubs = [] plotsubs.append(('a', png)) caption = '%05d %s' % (figure_counter, os.path.basename(png)) plotsubs.append(('d', caption)) plotsubs.append(('t', 'Plot')) record_add_field(rec, 'FFT', subfields=plotsubs) if not remove and not newsubs and 'u' in subs: urls = ('http://cdsweb.cern.ch', 'http://cms.cern.ch', 'http://cmsdoc.cern.ch', 'http://documents.cern.ch', 'http://preprints.cern.ch', 'http://cds.cern.ch') for val in subs['u']: if any(url in val for url in urls): remove = True break if val.endswith('ps.gz'): remove = True if newsubs: record_add_field(rec, 'FFT', subfields=newsubs) remove = True if remove: record_delete_field(rec, '856', ind1='4', field_position_global=field[4]) # 500 - Preliminary results if "THESIS" not in collections: subs = [('a', "Preliminary results")] record_add_field(rec, "500", subfields=subs) for collection in collections: record_add_field(rec, '980', subfields=[('a', collection)]) return rec
def apply_filter(rec): """ Filters the record to be compatible within Inspire Parameters: * rec - dictionary: BibRecord structure Returns: dictionary, BibRecord structure """ # Move recid from 001 to 035 if not hidden cds_id = rec["001"][0][3] if not "hidden" in [x.lower() for x in record_get_field_values(rec, "980", code="a")]: record_add_field(rec, "035", subfields=[("9", "CDS"), ("a", cds_id)]) # Clear control fields record_strip_controlfields(rec) # Clear other uninteresting fields interesting_fields = [ "024", "041", "035", "037", "088", "100", "110", "111", "242", "245", "246", "260", "269", "300", "502", "650", "653", "693", "700", "710", "773", "856", "520", "500", "980", ] for tag in rec.keys(): if tag not in interesting_fields: record_delete_fields(rec, tag) # 980 Determine Collections collections = set([]) for value in record_get_field_values(rec, "980", code="a"): if "NOTE" in value.upper(): collections.add("NOTE") if "THESIS" in value.upper(): collections.add("THESIS") if "CONFERENCEPAPER" in value.upper(): collections.add("ConferencePaper") if is_published(rec): collections.add("PUBLISHED") collections.add("CITEABLE") if not "NOTE" in collections: # TODO: Move this to a KB kb = ["ATLAS-CONF-", "CMS-PAS-", "ATL-", "CMS-DP-", "ALICE-INT-", "LHCb-PUB-"] values = record_get_field_values(rec, "088", code="a") for val, rep in product(values, kb): if val.startswith(rep): collections.add("NOTE") break # 980 Arxiv tag if record_get_field_values(rec, "035", filter_subfield_code="a", filter_subfield_value="arXiv"): collections.add("arXiv") # 980 HEP && CORE collections.add("HEP") collections.add("CORE") # 980 Conference Note if not "ConferencePaper" in collections: for value in record_get_field_values(rec, "962", code="n"): if value[-2:].isdigit(): collections.add("ConferencePaper") break record_delete_fields(rec, "980") intnote = record_get_field_values(rec, "690", filter_subfield_code="a", filter_subfield_value="INTNOTE") if intnote: val_088 = record_get_field_values(rec, "088", filter_subfield_code="a") for val in val_088: if "CMS" in val: url = "http://weblib.cern.ch/abstract?CERN-CMS" + val.split("CMS", 1)[-1] record_add_field(rec, "856", ind1="4", subfields=[("u", url)]) # 041 Language languages = get_languages() language_fields = record_get_field_instances(rec, "041") record_delete_fields(rec, "041") for field in language_fields: subs = field_get_subfields(field) if "a" in subs: if "eng" in subs["a"]: continue new_value = translate_config(subs["a"][0], languages) new_subs = [("a", new_value)] record_add_field(rec, "041", subfields=new_subs) # 035 Externals scn_035_fields = record_get_field_instances(rec, "035") forbidden_values = ["cercer", "inspire", "xx", "cern annual report", "cmscms", "wai01"] for field in scn_035_fields: subs = field_get_subfields(field) if "9" in subs: if not "a" in subs: continue for sub in subs["9"]: if sub.lower() in forbidden_values: break else: # No forbidden values (We did not "break") suffixes = [s.lower() for s in subs["9"]] if "spires" in suffixes: new_subs = [("a", "SPIRES-%s" % subs["a"][0])] record_add_field(rec, "970", subfields=new_subs) continue if "a" in subs: for sub in subs["a"]: if sub.lower() in forbidden_values: record_delete_field(rec, tag="035", field_position_global=field[4]) rep_088_fields = record_get_field_instances(rec, "088") for field in rep_088_fields: subs = field_get_subfields(field) if "9" in subs: for val in subs["9"]: if val.startswith("P0") or val.startswith("CM-P0"): sf = [("9", "CERN"), ("b", val)] record_add_field(rec, "595", subfields=sf) for key, val in field[0]: if key in ["a", "9"] and not val.startswith("SIS-"): record_add_field(rec, "037", subfields=[("a", val)]) record_delete_fields(rec, "088") # 037 Externals also... rep_037_fields = record_get_field_instances(rec, "037") for field in rep_037_fields: subs = field_get_subfields(field) if "a" in subs: for value in subs["a"]: if "arXiv" in value: new_subs = [("a", value), ("9", "arXiv")] for fld in record_get_field_instances(rec, "695"): for key, val in field_get_subfield_instances(fld): if key == "a": new_subs.append(("c", val)) break nf = create_field(subfields=new_subs) record_replace_field(rec, "037", nf, field[4]) for key, val in field[0]: if key in ["a", "9"] and val.startswith("SIS-"): record_delete_field(rec, "037", field_position_global=field[4]) for field in record_get_field_instances(rec, "242"): record_add_field(rec, "246", subfields=field[0]) record_delete_fields(rec, "242") # 269 Date normalization for field in record_get_field_instances(rec, "269"): for idx, (key, value) in enumerate(field[0]): if key == "c": field[0][idx] = ("c", convert_date_to_iso(value)) record_delete_fields(rec, "260") if not "THESIS" in collections: for field in record_get_field_instances(rec, "260"): record_add_field(rec, "269", subfields=field[0]) record_delete_fields(rec, "260") # 300 page number for field in record_get_field_instances(rec, "300"): for idx, (key, value) in enumerate(field[0]): if key == "a": if "mult." not in value and value != " p": field[0][idx] = ("a", re.sub(r"[^\d-]+", "", value)) else: record_delete_field(rec, "300", field_position_global=field[4]) break # 100 & 700 punctuate author names author_names = record_get_field_instances(rec, "100") author_names.extend(record_get_field_instances(rec, "700")) for field in author_names: subs = field_get_subfields(field) if not "i" in subs or "XX" in subs["i"]: if not "j" in subs or "YY" in subs["j"]: for idx, (key, value) in enumerate(field[0]): if key == "a": field[0][idx] = ("a", punctuate_authorname(value)) # 700 -> 701 Thesis supervisors if "THESIS" in collections: for field in record_get_field_instances(rec, "700"): record_add_field(rec, "701", subfields=field[0]) record_delete_fields(rec, "700") # 501 move subfields fields_501 = record_get_field_instances(rec, "502") for idx, field in enumerate(fields_501): new_subs = [] for key, value in field[0]: if key == "a": new_subs.append(("b", value)) elif key == "b": new_subs.append(("c", value)) elif key == "c": new_subs.append(("d", value)) else: new_subs.append((key, value)) fields_501[idx] = field_swap_subfields(field, new_subs) # 650 Translate Categories categories = get_categories() category_fields = record_get_field_instances(rec, "650", ind1="1", ind2="7") record_delete_fields(rec, "650") for field in category_fields: for idx, (key, value) in enumerate(field[0]): if key == "a": new_value = translate_config(value, categories) if new_value != value: new_subs = [("2", "INSPIRE"), ("a", new_value)] else: new_subs = [("2", "SzGeCERN"), ("a", value)] record_add_field(rec, "650", ind1="1", ind2="7", subfields=new_subs) break # 653 Free Keywords for field in record_get_field_instances(rec, "653", ind1="1"): subs = field_get_subfields(field) new_subs = [] if "a" in subs: for val in subs["a"]: new_subs.extend([("9", "author"), ("a", val)]) new_field = create_field(subfields=new_subs, ind1="1") record_replace_field(rec, "653", new_field, field_position_global=field[4]) experiments = get_experiments() # 693 Remove if 'not applicable' for field in record_get_field_instances(rec, "693"): subs = field_get_subfields(field) all_subs = subs.get("a", []) + subs.get("e", []) if "not applicable" in [x.lower() for x in all_subs]: record_delete_field(rec, "693", field_position_global=field[4]) new_subs = [] experiment_a = "" experiment_e = "" for (key, value) in subs.iteritems(): if key == "a": experiment_a = value[0] new_subs.append((key, value[0])) elif key == "e": experiment_e = value[0] experiment = "%s---%s" % (experiment_a.replace(" ", "-"), experiment_e) translated_experiments = translate_config(experiment, experiments) new_subs.append(("e", translated_experiments)) record_delete_field(rec, tag="693", field_position_global=field[4]) record_add_field(rec, "693", subfields=new_subs) # 710 Collaboration for field in record_get_field_instances(rec, "710"): subs = field_get_subfield_instances(field) for idx, (key, value) in enumerate(subs[:]): if key == "5": subs.pop(idx) elif value.startswith("CERN. Geneva"): subs.pop(idx) if len(subs) == 0: record_delete_field(rec, "710", field_position_global=field[4]) # 773 journal translations journals = get_journals() for field in record_get_field_instances(rec, "773"): subs = field_get_subfield_instances(field) new_subs = [] for idx, (key, value) in enumerate(subs): if key == "p": new_subs.append((key, translate_config(value, journals))) else: new_subs.append((key, value)) record_delete_field(rec, tag="773", field_position_global=field[4]) record_add_field(rec, "773", subfields=new_subs) # FFT (856) Dealing with graphs figure_counter = 0 for field in record_get_field_instances(rec, "856", ind1="4"): subs = field_get_subfields(field) newsubs = [] remove = False if "z" in subs: is_figure = [s for s in subs["z"] if "figure" in s.lower()] if is_figure and "u" in subs: is_subformat = [s for s in subs["u"] if "subformat" in s.lower()] if not is_subformat: url = subs["u"][0] if url.endswith(".pdf"): # We try to convert fd, local_url = mkstemp(suffix=os.path.basename(url), dir=CFG_TMPSHAREDDIR) os.close(fd) _print("Downloading %s into %s" % (url, local_url), verbose=5) plotfile = "" try: plotfile = download_url(url=url, download_to_file=local_url, timeout=30.0) except InvenioFileDownloadError: _print("Download failed while attempting to reach %s. Skipping.." % (url,)) remove = True if plotfile: converted = convert_images([plotfile]) if converted: url = converted.pop() _print("Successfully converted %s to %s" % (local_url, url), verbose=5) else: _print("Conversion failed on %s" % (local_url,)) url = None remove = True if url: newsubs.append(("a", url)) newsubs.append(("t", "Plot")) figure_counter += 1 if "y" in subs: newsubs.append(("d", "%05d %s" % (figure_counter, subs["y"][0]))) newsubs.append(("n", subs["y"][0])) else: # Get basename without extension. name = os.path.basename(os.path.splitext(subs["u"][0])[0]) newsubs.append(("d", "%05d %s" % (figure_counter, name))) newsubs.append(("n", name)) if not newsubs and "u" in subs: is_fulltext = [s for s in subs["u"] if ".pdf" in s] if is_fulltext: newsubs = [("t", "INSPIRE-PUBLIC"), ("a", subs["u"][0])] if not newsubs and "u" in subs: remove = True is_zipfile = [s for s in subs["u"] if ".zip" in s] if is_zipfile: url = is_zipfile[0] local_url = os.path.join(CFG_TMPSHAREDDIR, os.path.basename(url)) _print("Downloading %s into %s" % (url, local_url), verbose=5) zipped_archive = "" try: zipped_archive = download_url(url=is_zipfile[0], download_to_file=local_url, timeout=30.0) except InvenioFileDownloadError: _print("Download failed while attempting to reach %s. Skipping.." % (is_zipfile[0],)) remove = True if zipped_archive: unzipped_archive = unzip(zipped_archive) list_of_pngs = locate("*.png", unzipped_archive) for png in list_of_pngs: if "_vti_" in png or "__MACOSX" in png: continue figure_counter += 1 plotsubs = [] plotsubs.append(("a", png)) caption = "%05d %s" % (figure_counter, os.path.basename(png)) plotsubs.append(("d", caption)) plotsubs.append(("t", "Plot")) record_add_field(rec, "FFT", subfields=plotsubs) if not remove and not newsubs and "u" in subs: urls = ( "http://cdsweb.cern.ch", "http://cms.cern.ch", "http://cmsdoc.cern.ch", "http://documents.cern.ch", "http://preprints.cern.ch", "http://cds.cern.ch", ) for val in subs["u"]: if any(url in val for url in urls): remove = True break if val.endswith("ps.gz"): remove = True if newsubs: record_add_field(rec, "FFT", subfields=newsubs) remove = True if remove: record_delete_field(rec, "856", ind1="4", field_position_global=field[4]) # 500 - Preliminary results if "THESIS" not in collections: subs = [("a", "Preliminary results")] record_add_field(rec, "500", subfields=subs) for collection in collections: record_add_field(rec, "980", subfields=[("a", collection)]) return rec