def get_entries(self, *bib_files): entries = [] parser = Parser() for bibfname in bib_files: bib_cache = bibcache.BibCache("new", bibfname) try: cached_entries = bib_cache.get() entries.extend(cached_entries) continue except: pass try: bibf = codecs.open(bibfname, 'r', 'UTF-8', 'ignore') # 'ignore' to be safe except IOError: print("Cannot open bibliography file %s !" % (bibfname, )) sublime.status_message("Cannot open bibliography file %s !" % (bibfname, )) continue else: bib_data = parser.parse(bibf.read()) print('Loaded %d bibitems' % (len(bib_data))) bib_entries = [] for key in bib_data: entry = bib_data[key] if entry.entry_type in ('xdata', 'comment', 'string'): continue # purge some unnecessary fields from the bib entry to save # some space and time reloading for k in [ 'abstract', 'annotation', 'annote', 'execute', 'langidopts', 'options' ]: if k in entry: del entry[k] bib_entries.append(EntryWrapper(entry)) try: bib_cache.set(bib_entries) fmt_entries = bib_cache.get() entries.extend(fmt_entries) except: traceback.print_exc() print("Using bibliography without caching it") entries.extend(bib_entries) finally: try: bibf.close() except: pass print("Found %d total bib entries" % (len(entries), )) return entries
def get_entries(self, *bib_files): entries = [] for bibfname in bib_files: bib_cache = bibcache.BibCache("trad", bibfname) try: cached_entries = bib_cache.get() entries.extend(cached_entries) continue except: pass try: bibf = codecs.open(bibfname, 'r', 'UTF-8', 'ignore') # 'ignore' to be safe except IOError: print("Cannot open bibliography file %s !" % (bibfname, )) sublime.status_message("Cannot open bibliography file %s !" % (bibfname, )) continue else: bib_data = bibf.readlines() bib_entries = [] entry = {} for line in bib_data: line = line.strip() # Let's get rid of irrelevant lines first if line == "" or line[0] == '%': continue if line.lower()[0:8] == "@comment": continue if line.lower()[0:7] == "@string": continue if line.lower()[0:9] == "@preamble": continue if line[0] == "@": if 'keyword' in entry: bib_entries.append(entry) entry = {} kp_match = kp.search(line) if kp_match: entry['keyword'] = kp_match.group(1) else: print(u"Cannot process this @ line: " + line) print(u"Previous keyword (if any): " + entry.get('keyword', '')) continue # Now test for title, author, etc. # Note: we capture only the first line, but that's OK for our purposes multip_match = multip.search(line) if multip_match: key = multip_match.group(1).lower() value = codecs.decode(multip_match.group(2), 'latex') if key == 'title': value = value.replace('{\\textquoteright}', '').replace('{', '').replace( '}', '') entry[key] = value continue # at the end, we have a single record if 'keyword' in entry: bib_entries.append(entry) print('Loaded %d bibitems' % (len(bib_entries))) try: bib_cache.set(bib_entries) fmt_entries = bib_cache.get() entries.extend(fmt_entries) except: traceback.print_exc() print("Using bibliography without caching it") entries.extend(bib_entries) finally: try: bibf.close() except: pass print("Found %d total bib entries" % (len(entries), )) return entries