def _create_formatted_entries(self, bib_entries): # create the formatted entries autocomplete_format = get_setting("cite_autocomplete_format") panel_format = get_setting("cite_panel_format") meta_data = frozendict(cache_time=long(time.time()), version=_VERSION, autocomplete_format=autocomplete_format, panel_format=panel_format) formatted_entries = tuple( frozendict( **{ "keyword": entry["keyword"], "<prefix_match>": bibformat.create_prefix_match_str(entry), "<panel_formatted>": tuple( bibformat.format_entry(s, entry) for s in panel_format), "<autocomplete_formatted>": bibformat.format_entry(autocomplete_format, entry) }) for entry in bib_entries) return meta_data, formatted_entries
def _create_formatted_entries(self, bib_entries): # create the formatted entries autocomplete_format = get_setting("cite_autocomplete_format") panel_format = get_setting("cite_panel_format") meta_data = frozendict( cache_time=long(time.time()), version=_VERSION, autocomplete_format=autocomplete_format, panel_format=panel_format ) formatted_entries = tuple( frozendict(**{ "keyword": entry["keyword"], "<prefix_match>": bibformat.create_prefix_match_str(entry), "<panel_formatted>": tuple( bibformat.format_entry(s, entry) for s in panel_format ), "<autocomplete_formatted>": bibformat.format_entry(autocomplete_format, entry) }) for entry in bib_entries ) return meta_data, formatted_entries
def set(self, key, obj): ''' set the cache value for the given key :param key: the key to store the value under :param obj: the value to store; note that obj *must* be picklable ''' if key is None: raise ValueError('key cannot be None') try: pickle.dumps(obj, protocol=-1) except pickle.PicklingError: raise ValueError('obj must be picklable') if isinstance(obj, list): obj = tuple(obj) elif isinstance(obj, dict): obj = frozendict(obj) elif isinstance(obj, set): obj = frozenset(obj) with self._write_lock: self._objects[key] = obj self._dirty = True self._schedule_save()
def _analyze_tex_file(tex_root, file_name=None, process_file_stack=[], ana=None, import_path=None): # init ana and the file name if not ana: ana = Analysis(tex_root) if not file_name: file_name = tex_root # if the file name has no extension use ".tex" elif not os.path.splitext(file_name)[1]: file_name += ".tex" # normalize the path file_name = os.path.normpath(file_name) # ensure not to go into infinite recursion if file_name in process_file_stack: print("File appears cyclic: ", file_name) print(process_file_stack) return ana if not import_path: base_path, _ = os.path.split(tex_root) else: base_path = import_path # store import path at the base path, such that it can be accessed if import_path: if file_name in ana._import_base_paths: if ana._import_base_paths[file_name] != import_path: print("Warning: '{0}' is imported twice. " "Cannot handle this correctly in the analysis.") else: ana._import_base_paths[file_name] = base_path # read the content from the file try: raw_content, content = _preprocess_file(file_name) except: print('Error occurred while preprocessing {0}'.format(file_name)) traceback.print_exc() return ana ana._content[file_name] = content ana._raw_content[file_name] = raw_content for m in _RE_COMMAND.finditer(content): g = m.group # insert all relevant information into this dict, which is based # on the group dict, i.e. all regex matches entryDict = m.groupdict() entryDict.update({ "file_name": file_name, "text": g(0), "start": m.start(), "end": m.end(), "region": sublime.Region(m.start(), m.end()) }) # insert the regions of the matches into the entry dict for k in m.groupdict().keys(): region_name = k + "_region" reg = m.regs[_RE_COMMAND.groupindex[k]] entryDict[region_name] = sublime.Region(reg[0], reg[1]) # create an object from the dict and insert it into the analysis entry = objectview(frozendict(entryDict)) ana._add_command(entry) # read child files if it is an input command if g("command") in _input_commands and g("args") is not None: process_file_stack.append(file_name) open_file = os.path.join(base_path, g("args")) _analyze_tex_file(tex_root, open_file, process_file_stack, ana) process_file_stack.pop() elif (g("command") in _import_commands and g("args") is not None and g("args2") is not None): if g("command").startswith("sub"): next_import_path = os.path.join(base_path, g("args")) else: next_import_path = g("args") # normalize the path next_import_path = os.path.normpath(next_import_path) open_file = os.path.join(next_import_path, g("args2")) process_file_stack.append(file_name) _analyze_tex_file(tex_root, open_file, process_file_stack, ana, import_path=next_import_path) process_file_stack.pop() # don't parse further than \end{document} if g("args") == "document" and g("command") == "end" or ana._finished: ana._finished = True break return ana
def _freeze(self): self._content = frozendict(**self._content) self._raw_content = frozendict(**self._raw_content) self._all_commands = tuple(c for c in self._all_commands) self.__frozen = True
def _analyze_tex_file(tex_root, file_name=None, process_file_stack=[], ana=None, import_path=None): # init ana and the file name if not ana: ana = Analysis(tex_root) if not file_name: file_name = tex_root # if the file name has no extension use ".tex" elif not os.path.splitext(file_name)[1]: file_name += ".tex" # normalize the path file_name = os.path.normpath(file_name) # ensure not to go into infinite recursion if file_name in process_file_stack: print("File appears cyclic: ", file_name) print(process_file_stack) return ana if not import_path: base_path, _ = os.path.split(tex_root) else: base_path = import_path # store import path at the base path, such that it can be accessed if import_path: if file_name in ana._import_base_paths: if ana._import_base_paths[file_name] != import_path: print( "Warning: '{0}' is imported twice. " "Cannot handle this correctly in the analysis." ) else: ana._import_base_paths[file_name] = base_path # read the content from the file try: raw_content, content = _preprocess_file(file_name) except: print('Error occurred while preprocessing {0}'.format(file_name)) traceback.print_exc() return ana ana._content[file_name] = content ana._raw_content[file_name] = raw_content for m in _RE_COMMAND.finditer(content): g = m.group # insert all relevant information into this dict, which is based # on the group dict, i.e. all regex matches entryDict = m.groupdict() entryDict.update({ "file_name": file_name, "text": g(0), "start": m.start(), "end": m.end(), "region": sublime.Region(m.start(), m.end()) }) # insert the regions of the matches into the entry dict for k in m.groupdict().keys(): region_name = k + "_region" reg = m.regs[_RE_COMMAND.groupindex[k]] entryDict[region_name] = sublime.Region(reg[0], reg[1]) # create an object from the dict and insert it into the analysis entry = objectview(frozendict(entryDict)) ana._add_command(entry) # read child files if it is an input command if g("command") in _input_commands and g("args") is not None: process_file_stack.append(file_name) open_file = os.path.join(base_path, g("args")) _analyze_tex_file(tex_root, open_file, process_file_stack, ana) process_file_stack.pop() elif (g("command") in _import_commands and g("args") is not None and g("args2") is not None): if g("command").startswith("sub"): next_import_path = os.path.join(base_path, g("args")) else: next_import_path = g("args") # normalize the path next_import_path = os.path.normpath(next_import_path) open_file = os.path.join(next_import_path, g("args2")) process_file_stack.append(file_name) _analyze_tex_file( tex_root, open_file, process_file_stack, ana, import_path=next_import_path) process_file_stack.pop() return ana