def diffFilesIndex(): """Compute differences between Bibtex index and PDF files Returns a dict with bibtex entry: * full bibtex entry with file='' if file is not found * only file entry if file with missing bibtex entry """ files = tools.listDir(config.get("folder")) files = [i for i in files if tools.getExtension(i) in ['.pdf', '.djvu']] try: with open(config.get("folder")+'index.bib', 'r', encoding='utf-8') \ as fh: index = BibTexParser(fh.read()) index_diff = index.get_entry_dict() except (TypeError, IOError): tools.warning("Unable to open index file.") return False for key in index_diff.keys(): if index_diff[key]['file'] not in files: index_diff[key]['file'] = '' else: files.remove(index_diff[key]['file']) for filename in files: index_diff[filename] = {'file': filename} return index.get_entry_dict()
def test_homogenizes_fields(self): self.maxDiff = None with io.open('bibtexparser/tests/data/article_homogenize.bib', 'r', encoding='utf-8') as bibfile: bib = BibTexParser(bibfile.read(), homogenize_fields=True) expected_dict = { 'Cesar2013': { 'keyword': 'keyword1, keyword2', 'ENTRYTYPE': 'article', 'abstract': 'This is an abstract. This line should be ' 'long enough to test\nmultilines... and with ' 'a french érudit word', 'year': '2013', 'journal': 'Nice Journal', 'ID': 'Cesar2013', 'pages': '12-23', 'title': 'An amazing title', 'comments': 'A comment', 'author': 'Jean César', 'volume': '12', 'month': 'jan', 'url': "http://my.link/to-content", 'subject': "Some topic of interest", 'editor': "Edith Or", } } self.assertEqual(bib.get_entry_dict(), expected_dict)
def load_bibtex(bib_file_name): # Open and parse the BibTeX file in `bib_file_name` using # `bibtexparser` with open(bib_file_name, 'r') as bib_file: bp = BibTexParser(bib_file.read(), customization=convert_to_unicode) # Get a dictionary of dictionaries of key, value pairs from the # BibTeX file. The structure is # {ID:{authors:...},ID:{authors:...}}. refsdict = bp.get_entry_dict() # Create a list of all the types of documents found in the BibTeX # file, typically `article`, `inproceedings`, and `phdthesis`. # Dedupe the list. entry_types = [] for k, ref in refsdict.items(): entry_types.append(ref["ENTRYTYPE"]) entry_types = set(entry_types) # For each of the types of reference, we need to sort each by month # then year. We store the dictionary representing each reference in # a sorted list for each type of reference. Then we store each of # these sorted lists in a dictionary whose key is the type of # reference and value is the list of dictionaries. sort_dict = {} for t in entry_types: temp = [val for key, val in refsdict.items() if val["ENTRYTYPE"] == t] sort_dict[t] = sorted(temp, key=lambda k: k["year"], reverse=True) return sort_dict
def updateArXiv(entry): """Look for new versions of arXiv entry `entry` Returns False if no new versions or not an arXiv entry, Returns the new bibtex otherwise. """ bibtex = getBibtex(entry) # Check arXiv if('archiveprefix' not in bibtex or 'arXiv' not in bibtex['archiveprefix']): return False arxiv_id = bibtex['eprint'] arxiv_id_no_v = re.sub(r'v\d+\Z', '', arxiv_id) ids = set(arxiv_id) for entry in getEntries(): if('archiveprefix' not in bibtex or 'arXiv' not in bibtex['archiveprefix']): continue ids.add(bibtex['eprint']) last_bibtex = BibTexParser(fetcher.arXiv2Bib(arxiv_id_no_v)) last_bibtex = last_bibtex.get_entry_dict() last_bibtex = last_bibtex[list(last_bibtex.keys())[0]] if last_bibtex['eprint'] not in ids: return last_bibtex else: return False
def test_article_annotation(self): with codecs.open('bibtexparser/tests/data/article_with_annotation.bib', 'r', 'utf-8') as bibfile: bib = BibTexParser(bibfile.read()) res_list = bib.get_entry_list() res_dict = bib.get_entry_dict() expected_list = [{'keyword': 'keyword1, keyword2', 'ENTRYTYPE': 'article', 'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word', 'year': '2013', 'journal': 'Nice Journal', 'ID': 'Cesar2013', 'pages': '12-23', 'title': 'An amazing title', 'comments': 'A comment', 'author': 'Jean César', 'author+an': '1=highlight', 'volume': '12', 'month': 'jan' }] expected_dict = {'Cesar2013': {'keyword': 'keyword1, keyword2', 'ENTRYTYPE': 'article', 'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word', 'year': '2013', 'journal': 'Nice Journal', 'ID': 'Cesar2013', 'pages': '12-23', 'title': 'An amazing title', 'comments': 'A comment', 'author': 'Jean César', 'author+an': '1=highlight', 'volume': '12', 'month': 'jan' }} self.assertEqual(res_list, expected_list) self.assertEqual(res_dict, expected_dict)
def getBibtex(entry, file_id='both', clean=False): """Returns the bibtex entry corresponding to entry, as a dict entry is either a filename or a bibtex ident file_id is file or id or both to search for a file / id / both clean is to clean the ignored fields specified in config """ try: with open(config.get("folder")+'index.bib', 'r', encoding='utf-8') \ as fh: bibtex = BibTexParser(fh.read()) bibtex = bibtex.get_entry_dict() except (TypeError, IOError): tools.warning("Unable to open index file.") return False bibtex_entry = False if file_id == 'both' or file_id == 'id': try: bibtex_entry = bibtex[entry] except KeyError: pass if file_id == 'both' or file_id == 'file': if os.path.isfile(entry): for key in bibtex.keys(): if os.path.samefile(bibtex[key]['file'], entry): bibtex_entry = bibtex[key] break if clean: for field in config.get("ignore_fields"): try: del(bibtex_entry[field]) except KeyError: pass return bibtex_entry
def test_article(self): with codecs.open('bibtexparser/tests/data/article.bib', 'r', 'utf-8') as bibfile: bib = BibTexParser(bibfile.read()) res_list = bib.get_entry_list() res_dict = bib.get_entry_dict() expected_list = [{'keyword': 'keyword1, keyword2', 'ENTRYTYPE': 'article', 'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word', 'year': '2013', 'journal': 'Nice Journal', 'ID': 'Cesar2013', 'pages': '12-23', 'title': 'An amazing title', 'comments': 'A comment', 'author': 'Jean César', 'volume': '12', 'month': 'jan' }] expected_dict = {'Cesar2013': {'keyword': 'keyword1, keyword2', 'ENTRYTYPE': 'article', 'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word', 'year': '2013', 'journal': 'Nice Journal', 'ID': 'Cesar2013', 'pages': '12-23', 'title': 'An amazing title', 'comments': 'A comment', 'author': 'Jean César', 'volume': '12', 'month': 'jan' }} self.assertEqual(res_list, expected_list) self.assertEqual(res_dict, expected_dict)
def deleteId(ident): """Delete a file based on its id in the bibtex file""" try: with open(config.get("folder")+'index.bib', 'r', encoding='utf-8') \ as fh: bibtex = BibTexParser(fh.read()) bibtex = bibtex.get_entry_dict() except (IOError, TypeError): tools.warning("Unable to open index file.") return False if ident not in bibtex.keys(): return False try: os.remove(bibtex[ident]['file']) except (KeyError, OSError): tools.warning("Unable to delete file associated to id "+ident+" : " + bibtex[ident]['file']) try: if not os.listdir(os.path.dirname(bibtex[ident]['file'])): os.rmdir(os.path.dirname(bibtex[ident]['file'])) except (KeyError, OSError): tools.warning("Unable to delete empty tag dir " + os.path.dirname(bibtex[ident]['file'])) try: del(bibtex[ident]) bibtexRewrite(bibtex) except KeyError: tools.warning("No associated bibtex entry in index for file " + bibtex[ident]['file']) return True
def load_bibtex(bib_file_name): # Open and parse the BibTeX file in `bib_file_name` using # `bibtexparser` with open(bib_file_name, 'r') as bib_file: bp = BibTexParser(bib_file.read(), customization=convert_to_unicode) # Get a dictionary of dictionaries of key, value pairs from the # BibTeX file. The structure is # {ID:{authors:...},ID:{authors:...}}. refsdict = bp.get_entry_dict() # Create a list of all the types of documents found in the BibTeX # file, typically `article`, `inproceedings`, and `phdthesis`. # Dedupe the list. entry_types = [] for k, ref in refsdict.items(): entry_types.append(ref["ENTRYTYPE"]) entry_types = set(entry_types) # For each of the types of reference, we need to sort each by month # then year. We store the dictionary representing each reference in # a sorted list for each type of reference. Then we store each of # these sorted lists in a dictionary whose key is the type of # reference and value is the list of dictionaries. sort_dict = {} for t in entry_types: temp = sorted([val for key, val in refsdict.items() if val["ENTRYTYPE"] == t], key=lambda l: datetime.strptime(l["month"], '%b').month, reverse=True) sort_dict[t] = sorted(temp, key=lambda k: k["year"], reverse=True) return sort_dict
def test_comments_spaces_and_declarations(self): with codecs.open( 'bibtexparser/tests/data/comments_spaces_and_declarations.bib', 'r', 'utf-8') as bibfile: bib = BibTexParser(bibfile.read()) res_dict = bib.get_entry_dict() expected_dict = { 'Cesar2013': { 'keyword': 'keyword1, keyword2', 'ENTRYTYPE': 'article', 'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word', 'year': '2013', 'journal': 'Nice Journal', 'ID': 'Cesar2013', 'pages': '12-23', 'title': 'A great title', 'comments': 'A comment', 'author': 'Jean César', 'volume': '12', 'month': 'jan' } } self.assertEqual(res_dict, expected_dict) self.assertEqual(bib.preambles, ["Blah blah"])
def editEntry(entry, file_id='both'): bibtex = backend.getBibtex(entry, file_id) if bibtex is False: tools.warning("Entry "+entry+" does not exist.") return False if file_id == 'file': filename = entry else: filename = bibtex['file'] new_bibtex = checkBibtex(filename, tools.parsed2Bibtex(bibtex)) # Tag update if new_bibtex['tag'] != bibtex['tag']: print("Editing tag, moving file.") new_name = backend.getNewName(new_bibtex['file'], new_bibtex, new_bibtex['tag']) while os.path.exists(new_name): tools.warning("file "+new_name+" already exists.") default_rename = new_name.replace(tools.getExtension(new_name), " (2)" + tools.getExtension(new_name)) rename = tools.rawInput("New name ["+default_rename+"]? ") if rename == '': new_name = default_rename else: new_name = rename new_bibtex['file'] = new_name try: shutil.move(bibtex['file'], new_bibtex['file']) except shutil.Error: tools.warning('Unable to move file '+bibtex['file']+' to ' + new_bibtex['file'] + ' according to tag edit.') try: if not os.listdir(os.path.dirname(bibtex['file'])): os.rmdir(os.path.dirname(bibtex['file'])) except OSError: tools.warning("Unable to delete empty tag dir " + os.path.dirname(bibtex['file'])) try: with open(config.get("folder")+'index.bib', 'r', encoding='utf-8') \ as fh: index = BibTexParser(fh.read()) index = index.get_entry_dict() except (TypeError, IOError): tools.warning("Unable to open index file.") return False index[new_bibtex['id']] = new_bibtex backend.bibtexRewrite(index) return True
def parseBibTex(bibtexStr): """ Parse a bibtex file and return a dictioray. """ from bibtexparser.bparser import BibTexParser from StringIO import StringIO f = StringIO() f.write(bibtexStr) f.seek(0, 0) parser = BibTexParser(f) return parser.get_entry_dict()
def parseBibTex(bibtexStr): """ Parse a bibtex file and return a dictioray. """ from bibtexparser.bparser import BibTexParser from StringIO import StringIO f = StringIO() f.write(bibtexStr) f.seek(0, 0) parser = BibTexParser(f) return parser.get_entry_dict()
def getEntries(): """Returns the list of all entries in the bibtex index""" try: with open(config.get("folder")+'index.bib', 'r', encoding='utf-8') \ as fh: bibtex = BibTexParser(fh.read()) bibtex = bibtex.get_entry_dict() except (TypeError, IOError): tools.warning("Unable to open index file.") return False return list(bibtex.keys())
def load_bibtex(bibfile, customizer=None): # Open and parse the BibTeX file in `bibfile` using # `bibtexparser` with open(bibfile, 'r') as bib_file: bp = BibTexParser(bib_file.read(), customization=customizer) # Get a dictionary of dictionaries of key, value pairs from the # BibTeX file. The structure is # {ID:{authors:...},ID:{authors:...}}. refsdict = bp.get_entry_dict() return refsdict
def add(): with open('hcomp13.bib', 'r') as bibfile: bp = BibTexParser(bibfile.read()) records_dict = bp.get_entry_dict() # use dic to find out count = 0 for thing in records_dict.keys(): entry = Entry(number = count, title = records_dict[thing]['title'], author = records_dict[thing]['author'], year = records_dict[thing]['year']) db.session.add(entry) db.session.commit() count += 1 return jsonify(ouput="add")
def bibtexEdit(ident, modifs): """Update ident key in bibtex file, modifications are in modifs dict""" try: with open(config.get("folder")+'index.bib', 'r', encoding='utf-8') \ as fh: bibtex = BibTexParser(fh.read()) bibtex = bibtex.get_entry_dict() except (IOError, TypeError): tools.warning("Unable to open index file.") return False for key in modifs.keys(): bibtex[ident][key] = modifs[key] bibtexRewrite(bibtex)
def openFile(ident): try: with open(config.get("folder")+'index.bib', 'r', encoding='utf-8') \ as fh: bibtex = BibTexParser(fh.read()) bibtex = bibtex.get_entry_dict() except (TypeError, IOError): tools.warning("Unable to open index file.") return False if ident not in list(bibtex.keys()): return False else: subprocess.Popen(['xdg-open', bibtex[ident]['file']]) return True
def parseBibTex(bibtexStr): """ Parse a bibtex file and return a dictionary. """ import bibtexparser if hasattr(bibtexparser, 'loads'): return bibtexparser.loads(bibtexStr).entries_dict # For older bibtexparser version 0.5 from bibtexparser.bparser import BibTexParser from StringIO import StringIO f = StringIO() f.write(bibtexStr) f.seek(0, 0) parser = BibTexParser(f) return parser.get_entry_dict()
def parseBibTex(bibtexStr): """ Parse a bibtex file and return a dictionary. """ import bibtexparser if hasattr(bibtexparser, 'loads'): return bibtexparser.loads(bibtexStr).entries_dict # For older bibtexparser version 0.5 from bibtexparser.bparser import BibTexParser from StringIO import StringIO f = StringIO() f.write(bibtexStr) f.seek(0, 0) parser = BibTexParser(f) return parser.get_entry_dict()
def get_bibtex_entries(filename, unicode=True): """ Parse a bibtex file and return the content :param filename: bibtex filepath :param unicode: If True, unicode style, if False, latex style :returns: a dictionnary; key=ID, content=entry """ if unicode: _customizations = _customizations_unicode else: _customizations = _customizations_latex with open(filename, 'r') as bibfile: biblio = BibTexParser(bibfile.read(), customization=_customizations) entries = biblio.get_entry_list() return biblio.get_entry_dict()
def get_bibtex_entries(filename, unicode=True): """ Parse a bibtex file and return the content :param filename: bibtex filepath :param unicode: If True, unicode style, if False, latex style :returns: a dictionnary; key=ID, content=entry """ if unicode: _customizations = _customizations_unicode else: _customizations = _customizations_latex with open(filename, 'r') as bibfile: biblio = BibTexParser(bibfile.read(), customization=_customizations) entries = biblio.get_entry_list() return biblio.get_entry_dict()
def arXiv2Bib(arxiv): """Returns bibTeX string of metadata for a given arXiv id arxiv is an arxiv id """ bibtex = arxiv_metadata.arxiv2bib([arxiv]) for bib in bibtex: if isinstance(bib, arxiv_metadata.ReferenceErrorInfo): continue else: fetched_bibtex = BibTexParser(bib.bibtex()) fetched_bibtex = fetched_bibtex.get_entry_dict() fetched_bibtex = fetched_bibtex[list(fetched_bibtex.keys())[0]] try: del(fetched_bibtex['file']) except KeyError: pass return tools.parsed2Bibtex(fetched_bibtex) return ''
def test_homogenizes_fields(self): self.maxDiff = None with io.open('bibtexparser/tests/data/article_homogenize.bib', 'r', encoding='utf-8') as bibfile: bib = BibTexParser(bibfile.read(), homogenize_fields=True) expected_dict = { 'Cesar2013': { 'keyword': 'keyword1, keyword2', 'ENTRYTYPE': 'article', 'abstract': 'This is an abstract. This line should be ' 'long enough to test\nmultilines... and with ' 'a french érudit word', 'year': '2013', 'journal': 'Nice Journal', 'ID': 'Cesar2013', 'pages': '12-23', 'title': 'An amazing title', 'comments': 'A comment', 'author': 'Jean César', 'volume': '12', 'month': 'jan', 'url': "http://my.link/to-content", 'subject': "Some topic of interest", 'editor': "Edith Or", } } self.assertEqual(bib.get_entry_dict(), expected_dict)
def deleteFile(filename): """Delete a file based on its filename""" try: with open(config.get("folder")+'index.bib', 'r', encoding='utf-8') \ as fh: bibtex = BibTexParser(fh.read()) bibtex = bibtex.get_entry_dict() except (TypeError, IOError): tools.warning("Unable to open index file.") return False found = False for key in list(bibtex.keys()): try: if os.path.samefile(bibtex[key]['file'], filename): found = True try: os.remove(bibtex[key]['file']) except (KeyError, OSError): tools.warning("Unable to delete file associated to id " + key+" : "+bibtex[key]['file']) try: if not os.listdir(os.path.dirname(filename)): os.rmdir(os.path.dirname(filename)) except OSError: tools.warning("Unable to delete empty tag dir " + os.path.dirname(filename)) try: del(bibtex[key]) except KeyError: tools.warning("No associated bibtex entry in index for " + "file " + bibtex[key]['file']) except (KeyError, OSError): pass if found: bibtexRewrite(bibtex) elif os.path.isfile(filename): os.remove(filename) return found
def load_bibtex(bibpath, customizer=None): if os.path.isfile(bibpath): # Open and parse the BibTeX file in `bibpath` using `bibtexparser` if not bibpath.endswith(".bib"): print("INFO: Skipping {} - No .bib extension.".format(bibpath)) return {} else: bp = BibTexParser(open(bibpath, 'r').read(), customization=customizer) # Get a dictionary of dictionaries of key, value pairs from the # BibTeX file. The structure is {ID:{authors:...},ID:{authors:...}}. refsdict = bp.get_entry_dict() return refsdict elif os.path.isdir(bibpath): # Create a joint refsdict for all bibtex files inside this directory refsdict = {} for name in os.listdir(bibpath): # Recursively process all files and subdirectories inpath = os.path.join(bibpath, name) refdict = load_bibtex(inpath, customizer=customizer) refsdict.update(refdict) return refsdict
def main(): parser = ArgumentParser() parser.add_argument("target",help="The bib file to abbreviate.") parser.add_argument("-o","--output",help="The output file name. If missing, output will be sent to stdout.") parser.add_argument("-r","--reverse",help="Reverse the process and unabbreviate journal names.",action="store_true") parser.add_argument("-a","--abbreviations",help="Path to a file of abbreviations in the form (one per line): Journal of Biological Science = J. Sci. Biol.") parser.add_argument("-v","--verbose",action="store_true") args = parser.parse_args() level = logging.WARNING if not args.verbose else logging.INFO logger.setLevel(level) input = open(args.target,"r") output = open(args.output,"w") if args.output else sys.stdout refs_bp = BibTexParser(input.read(),customization=homogeneize_latex_encoding) refs = refs_bp.get_entry_dict() abbrevs = load_abbrevs(determine_path()+"/journal_files/journal_abbreviations_general.txt",reverse=args.reverse) for ref in refs: if 'journal' in refs[ref]: # Assume that if it has a journal key, then it needs abbreviating. I'm doing this # instead of testing for type==article in case I've forgotten about a case where # type != article but there's a journal field. # Also, journal names with one word ('Nature') don't require abbreviation. if len(refs[ref]['journal'].split(' ')) > 1: journal = refs[ref]['journal'].lower() # Handle any difficult characters. TODO: check that this list is complete. journal_clean = re.sub('[{}]','',journal) try: refs[ref]['journal'] = abbrevs[journal_clean] logger.info('%s replaced with %s for key %s' % (journal,abbrevs[journal_clean],ref)) except KeyError: logger.error('%s not found in abbreviations!' % (journal_clean)) output_bib = to_bibtex(refs_bp) output.write(output_bib)
def test_article(self): with codecs.open("bibtexparser/tests/data/article.bib", "r", "utf-8") as bibfile: bib = BibTexParser(bibfile.read()) res_list = bib.get_entry_list() res_dict = bib.get_entry_dict() expected_list = [ { "keyword": "keyword1, keyword2", "ENTRYTYPE": "article", "abstract": "This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word", "year": "2013", "journal": "Nice Journal", "ID": "Cesar2013", "pages": "12-23", "title": "An amazing title", "comments": "A comment", "author": "Jean César", "volume": "12", "month": "jan", } ] expected_dict = { "Cesar2013": { "keyword": "keyword1, keyword2", "ENTRYTYPE": "article", "abstract": "This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word", "year": "2013", "journal": "Nice Journal", "ID": "Cesar2013", "pages": "12-23", "title": "An amazing title", "comments": "A comment", "author": "Jean César", "volume": "12", "month": "jan", } } self.assertEqual(res_list, expected_list) self.assertEqual(res_dict, expected_dict)
def test_Parsing(self): bibtex = """ @article{delaRosaTrevin2013, title = "Xmipp 3.0: An improved software suite for image processing in electron microscopy ", journal = "Journal of Structural Biology ", volume = "184", number = "2", pages = "321 - 328", year = "2013", issn = "1047-8477", doi = "http://dx.doi.org/10.1016/j.jsb.2013.09.015", url = "http://www.sciencedirect.com/science/article/pii/S1047847713002566", author = "J.M. de la Rosa-Trevín and J. Otón and R. Marabini and A. Zaldívar and J. Vargas and J.M. Carazo and C.O.S. Sorzano", keywords = "Electron microscopy, Single particles analysis, Image processing, Software package " } @incollection{Sorzano2013, title = "Semiautomatic, High-Throughput, High-Resolution Protocol for Three-Dimensional Reconstruction of Single Particles in Electron Microscopy", booktitle = "Nanoimaging", year = "2013", isbn = "978-1-62703-136-3", volume = "950", series = "Methods in Molecular Biology", editor = "Sousa, Alioscka A. and Kruhlak, Michael J.", doi = "10.1007/978-1-62703-137-0_11", url = "http://dx.doi.org/10.1007/978-1-62703-137-0_11", publisher = "Humana Press", keywords = "Single particle analysis; Electron microscopy; Image processing; 3D reconstruction; Workflows", author = "Sorzano, CarlosOscar and Rosa Trevín, J.M. and Otón, J. and Vega, J.J. and Cuenca, J. and Zaldívar-Peraza, A. and Gómez-Blanco, J. and Vargas, J. and Quintana, A. and Marabini, Roberto and Carazo, JoséMaría", pages = "171-193", } """ f = StringIO() f.write(bibtex) f.seek(0, 0) parser = BibTexParser(f) from pyworkflow.utils import prettyDict prettyDict(parser.get_entry_dict())
def read_bib(self, bibfile): with codecs.open(bibfile, 'r', encoding=self.encoding) as bfil: bibtex = bfil.read() parsed = BibTexParser(bibtex, customization=self.btex_custom) return parsed.get_entry_dict()
vstr = d[key]['volume'] if ('pages' in d[key]): vstr = vstr + ':' + d[key]['pages'] print(vstr) print('(' + d[key]['year'] + ')', '\\\\') print('}') print('') fname = 'citations.bib' bibfile = open(fname, 'r') bp = BibTexParser(bibfile.read()) bibfile.close() allarticles = bp.get_entry_dict() myarticles = {} coarticles = {} for x in list(allarticles.keys()): if allarticles[x]['ENTRYTYPE'] == 'article': if allarticles[x]['author'].split(',')[0] == 'Schramm': myarticles[x] = allarticles[x] else: coarticles[x] = allarticles[x] print('\\section{First author publications}', '\n') printArticles(myarticles) print('\\section{Co-author publications}', '\n') printArticles(coarticles)
def read_bib(self, bibfile): with codecs.open(bibfile, 'r', encoding=self.encoding) as bfil: bibtex = bfil.read() parsed = BibTexParser(bibtex, customization=self.btex_custom) return parsed.get_entry_dict()
def to_bibtex(data): bibtex = '' for entry in sorted(data.keys()): bibtex += ('@' + data[entry]['ENTRYTYPE'] + '{' + data[entry]['ID'] + ",\n") for field in [ i for i in sorted(data[entry]) if i not in ['ENTRYTYPE', 'ID'] ]: bibtex += " " + field + " = {" + data[entry][field] + "},\n" bibtex += "}\n\n" return bibtex bp_dict = bp.get_entry_dict() cond_dict = {} entries = [ 'chapter', 'publisher', 'author', 'year', 'booktitle', 'title', 'volume', 'ENTRYTYPE', 'number', 'month', 'volume', 'pages', 'year', 'journal', 'ID' ] for key in keys: inentry = bp_dict[key] outentry = {} # Process bibtex entry: for inkey in inentry.keys(): if inkey in entries: outentry[inkey] = inentry[inkey]
def addFile(src, filetype, manual, autoconfirm, tag): """ Add a file to the library """ doi = False arxiv = False isbn = False if not manual: try: if filetype == 'article' or filetype is None: id_type, article_id = fetcher.findArticleID(src) if id_type == "DOI": doi = article_id elif id_type == "arXiv": arxiv = article_id if filetype == 'book' or (doi is False and arxiv is False and filetype is None): isbn = fetcher.findISBN(src) except KeyboardInterrupt: doi = False arxiv = False isbn = False if doi is False and isbn is False and arxiv is False: if filetype is None: tools.warning("Could not determine the DOI nor the arXiv id nor " + "the ISBN for "+src+". Switching to manual entry.") doi_arxiv_isbn = '' while(doi_arxiv_isbn not in ['doi', 'arxiv', 'isbn', 'manual', 'skip']): doi_arxiv_isbn = (tools.rawInput("DOI / arXiv " + "/ ISBN / manual / skip? "). lower()) if doi_arxiv_isbn == 'doi': doi = tools.rawInput('DOI? ') elif doi_arxiv_isbn == 'arxiv': arxiv = tools.rawInput('arXiv id? ') elif doi_arxiv_isbn == 'isbn': isbn = tools.rawInput('ISBN? ') elif doi_arxiv_isbn == 'skip': return False elif filetype == 'article': tools.warning("Could not determine the DOI nor the arXiv id for " + src+", switching to manual entry.") doi_arxiv = '' while doi_arxiv not in ['doi', 'arxiv', 'manual', 'skip']: doi_arxiv = (tools.rawInput("DOI / arXiv / manual / skip? "). lower()) if doi_arxiv == 'doi': doi = tools.rawInput('DOI? ') elif doi_arxiv == 'arxiv': arxiv = tools.rawInput('arXiv id? ') elif doi_arxiv == 'skip': return False elif filetype == 'book': isbn_manual = '' while isbn_manual not in ['isbn', 'manual', 'skip']: isbn_manual = tools.rawInput("ISBN / manual / skip? ").lower() if isbn_manual == 'isbn': isbn = (tools.rawInput('ISBN? '). replace(' ', ''). replace('-', '')) elif isbn_manual == 'skip': return False elif doi is not False: print("DOI for "+src+" is "+doi+".") elif arxiv is not False: print("ArXiv id for "+src+" is "+arxiv+".") elif isbn is not False: print("ISBN for "+src+" is "+isbn+".") if doi is not False and doi != '': # Add extra \n for bibtexparser bibtex = fetcher.doi2Bib(doi).strip().replace(',', ",\n")+"\n" elif arxiv is not False and arxiv != '': bibtex = fetcher.arXiv2Bib(arxiv).strip().replace(',', ",\n")+"\n" elif isbn is not False and isbn != '': # Idem bibtex = fetcher.isbn2Bib(isbn).strip()+"\n" else: bibtex = '' bibtex = BibTexParser(bibtex) bibtex = bibtex.get_entry_dict() if len(bibtex) > 0: bibtex_name = list(bibtex.keys())[0] bibtex = bibtex[bibtex_name] bibtex_string = tools.parsed2Bibtex(bibtex) else: bibtex_string = '' if not autoconfirm: bibtex = checkBibtex(src, bibtex_string) if not autoconfirm: tag = tools.rawInput("Tag for this paper (leave empty for default) ? ") else: tag = args.tag bibtex['tag'] = tag new_name = backend.getNewName(src, bibtex, tag) while os.path.exists(new_name): tools.warning("file "+new_name+" already exists.") default_rename = new_name.replace(tools.getExtension(new_name), " (2)"+tools.getExtension(new_name)) rename = tools.rawInput("New name ["+default_rename+"]? ") if rename == '': new_name = default_rename else: new_name = rename bibtex['file'] = new_name try: shutil.copy2(src, new_name) except shutil.Error: new_name = False sys.exit("Unable to move file to library dir " + config.get("folder")+".") # Remove first page of IOP papers try: if 'IOP' in bibtex['publisher'] and bibtex['type'] == 'article': tearpages.tearpage(new_name) except (KeyError, shutil.Error, IOError): pass backend.bibtexAppend(bibtex) return new_name
def checkBibtex(filename, bibtex_string): print("The bibtex entry found for "+filename+" is:") bibtex = BibTexParser(bibtex_string) bibtex = bibtex.get_entry_dict() try: bibtex = bibtex[list(bibtex.keys())[0]] # Check entries are correct assert bibtex['title'] if bibtex['type'] == 'article': assert bibtex['authors'] elif bibtex['type'] == 'book': assert bibtex['author'] assert bibtex['year'] # Print the bibtex and confirm print(tools.parsed2Bibtex(bibtex)) check = tools.rawInput("Is it correct? [Y/n] ") except KeyboardInterrupt: sys.exit() except (IndexError, KeyError, AssertionError): check = 'n' try: old_filename = bibtex['file'] except KeyError: old_filename = False while check.lower() == 'n': with tempfile.NamedTemporaryFile(suffix=".tmp") as tmpfile: tmpfile.write(bibtex_string.encode('utf-8')) tmpfile.flush() subprocess.call([EDITOR, tmpfile.name]) tmpfile.seek(0) bibtex = BibTexParser(tmpfile.read().decode('utf-8')+"\n") bibtex = bibtex.get_entry_dict() try: bibtex = bibtex[list(bibtex.keys())[0]] except (IndexError, KeyError): tools.warning("Invalid bibtex entry") bibtex_string = '' tools.rawInput("Press Enter to go back to editor.") continue if('authors' not in bibtex and 'title' not in bibtex and 'year' not in bibtex): tools.warning("Invalid bibtex entry") bibtex_string = '' tools.rawInput("Press Enter to go back to editor.") continue if old_filename is not False and 'file' not in bibtex: tools.warning("Invalid bibtex entry. No filename given.") tools.rawInput("Press Enter to go back to editor.") check = 'n' else: bibtex_string = tools.parsed2Bibtex(bibtex) print("\nThe bibtex entry for "+filename+" is:") print(bibtex_string) check = tools.rawInput("Is it correct? [Y/n] ") if old_filename is not False and old_filename != bibtex['file']: try: print("Moving file to new location…") shutil.move(old_filename, bibtex['file']) except shutil.Error: tools.warning("Unable to move file "+old_filename+" to " + bibtex['file']+". You should check it manually.") return bibtex