def _create_formatted_entries(formatted_cache_name, bib_entries, cache_time): # create the formatted entries autocomplete_format = get_setting("cite_autocomplete_format") panel_format = get_setting("cite_panel_format") meta_data = { "cache_time": cache_time, "version": _VERSION, "autocomplete_format": autocomplete_format, "panel_format": panel_format } formatted_entries = [ { "keyword": entry["keyword"], "<prefix_match>": bibformat.create_prefix_match_str(entry), "<panel_formatted>": [ bibformat.format_entry(s, entry) for s in panel_format ], "<autocomplete_formatted>": bibformat.format_entry(autocomplete_format, entry) } for entry in bib_entries ] cache.write_global(formatted_cache_name, (meta_data, formatted_entries)) return formatted_entries
def write_fmt(bib_name, bib_file, bib_entries): """ Writes the entries resulting from the bibliography into the cache. The entries are pre-formatted to improve the time for the cite completion command. These pre-formatted entries are returned and should be used in the to improve the time and be consistent with the return values. Arguments: bib_name -- the (unique) name of the bibliography bib_file -- the bibliography file, which resulted in the entries bib_entries -- the entries, which are parsed from the bibliography Returns: The pre-formatted entries, which should be passed to the cite completions """ cache_name, formatted_cache_name = _cache_name(bib_name, bib_file) current_time = time.time() # write the full unformatted bib entries into the cache together # with a time stamp print("Writing bibliography into cache {0}".format(cache_name)) cache.write_global(cache_name, (current_time, bib_entries)) # create and cache the formatted entries formatted_entries = _create_formatted_entries(formatted_cache_name, bib_entries, current_time) return formatted_entries
def get_entries(self, *bib_files): entries = [] parser = Parser() for bibfname in bib_files: cache_name = "bib_" + hashlib.md5( bibfname.encode("utf8")).hexdigest() try: modified_time = os.path.getmtime(bibfname) (cached_time, cached_entries) = cache.read_global(cache_name) if modified_time <= cached_time: entries.extend(cached_entries) continue except: pass try: bibf = codecs.open(bibfname, 'r', 'UTF-8', 'ignore') # 'ignore' to be safe except IOError: print("Cannot open bibliography file %s !" % (bibfname, )) sublime.status_message("Cannot open bibliography file %s !" % (bibfname, )) continue else: bib_data = parser.parse(bibf.read()) print('Loaded %d bibitems' % (len(bib_data))) for key in bib_data: entry = bib_data[key] if entry.entry_type in ('xdata', 'comment', 'string'): continue entries.append(EntryWrapper(entry)) try: current_time = time.time() cache.write_global(cache_name, (current_time, entries)) except: print('Error occurred while trying to write to cache {0}'. format(cache_name)) traceback.print_exc() finally: try: bibf.close() except: pass print("Found %d total bib entries" % (len(entries), )) return entries
def get_entries(self, *bib_files): entries = [] parser = Parser() for bibfname in bib_files: cache_name = "bib_" + hashlib.md5(bibfname.encode("utf8")).hexdigest() try: modified_time = os.path.getmtime(bibfname) (cached_time, cached_entries) = cache.read_global(cache_name) if modified_time <= cached_time: entries.extend(cached_entries) continue except: pass try: bibf = codecs.open(bibfname, 'r', 'UTF-8', 'ignore') # 'ignore' to be safe except IOError: print("Cannot open bibliography file %s !" % (bibfname,)) sublime.status_message("Cannot open bibliography file %s !" % (bibfname,)) continue else: bib_data = parser.parse(bibf.read()) print ('Loaded %d bibitems' % (len(bib_data))) for key in bib_data: entry = bib_data[key] if entry.entry_type in ('xdata', 'comment', 'string'): continue entries.append(EntryWrapper(entry)) try: current_time = time.time() cache.write_global(cache_name, (current_time, entries)) except: print('Error occurred while trying to write to cache {0}'.format( cache_name )) traceback.print_exc() finally: try: bibf.close() except: pass print("Found %d total bib entries" % (len(entries),)) return entries
def get_entries(self, *bib_files): entries = [] for bibfname in bib_files: cache_name = "tradbib_" + hashlib.md5(bibfname.encode("utf8")).hexdigest() try: modified_time = os.path.getmtime(bibfname) (cached_time, cached_entries) = cache.read_global(cache_name) if modified_time <= cached_time: entries.extend(cached_entries) continue except: pass try: bibf = codecs.open(bibfname, 'r', 'UTF-8', 'ignore') # 'ignore' to be safe except IOError: print("Cannot open bibliography file %s !" % (bibfname,)) sublime.status_message("Cannot open bibliography file %s !" % (bibfname,)) continue else: bib_data = bibf.readlines() entry = {} for line in bib_data: line = line.strip() # Let's get rid of irrelevant lines first if line == "" or line[0] == '%': continue if line.lower()[0:8] == "@comment": continue if line.lower()[0:7] == "@string": continue if line.lower()[0:9] == "@preamble": continue if line[0] == "@": if 'keyword' in entry: entries.append(entry) entry = {} kp_match = kp.search(line) if kp_match: entry['keyword'] = kp_match.group(1) else: print("Cannot process this @ line: " + line) print( "Previous keyword (if any): " + entry.get('keyword', '') ) continue # Now test for title, author, etc. # Note: we capture only the first line, but that's OK for our purposes multip_match = multip.search(line) if multip_match: key = multip_match.group(1).lower() value = multip_match.group(2) if key == 'title': value = value.replace( '{\\textquoteright}', '' ).replace('{', '').replace('}', '') entry[key] = value continue # at the end, we have a single record if 'keyword' in entry: entries.append(entry) print ('Loaded %d bibitems' % (len(entries))) try: current_time = time.time() cache.write_global(cache_name, (current_time, entries)) except: print('Error occurred while trying to write to cache {0}'.format( cache_name )) traceback.print_exc() finally: try: bibf.close() except: pass print("Found %d total bib entries" % (len(entries),)) return entries
def get_entries(self, *bib_files): entries = [] for bibfname in bib_files: cache_name = "tradbib_" + hashlib.md5( bibfname.encode("utf8")).hexdigest() try: modified_time = os.path.getmtime(bibfname) (cached_time, cached_entries) = cache.read_global(cache_name) if modified_time <= cached_time: entries.extend(cached_entries) continue except: pass try: bibf = codecs.open(bibfname, 'r', 'UTF-8', 'ignore') # 'ignore' to be safe except IOError: print("Cannot open bibliography file %s !" % (bibfname, )) sublime.status_message("Cannot open bibliography file %s !" % (bibfname, )) continue else: bib_data = bibf.readlines() entry = {} for line in bib_data: line = line.strip() # Let's get rid of irrelevant lines first if line == "" or line[0] == '%': continue if line.lower()[0:8] == "@comment": continue if line.lower()[0:7] == "@string": continue if line.lower()[0:9] == "@preamble": continue if line[0] == "@": if 'keyword' in entry: entries.append(entry) entry = {} kp_match = kp.search(line) if kp_match: entry['keyword'] = kp_match.group(1) else: print("Cannot process this @ line: " + line) print("Previous keyword (if any): " + entry.get('keyword', '')) continue # Now test for title, author, etc. # Note: we capture only the first line, but that's OK for our purposes multip_match = multip.search(line) if multip_match: key = multip_match.group(1).lower() value = multip_match.group(2) if key == 'title': value = value.replace('{\\textquoteright}', '').replace('{', '').replace( '}', '') entry[key] = value continue # at the end, we have a single record if 'keyword' in entry: entries.append(entry) print('Loaded %d bibitems' % (len(entries))) try: current_time = time.time() cache.write_global(cache_name, (current_time, entries)) except: print('Error occurred while trying to write to cache {0}'. format(cache_name)) traceback.print_exc() finally: try: bibf.close() except: pass print("Found %d total bib entries" % (len(entries), )) return entries