Ejemplo n.º 1
0
def find_labels_in_files(rootdir, src, labels):
    if not is_tex_file(src):
        src_tex_file = None
        for ext in get_tex_extensions():
            src_tex_file = ''.join((src, ext))
            if os.path.exists(os.path.join(rootdir, src_tex_file)):
                src = src_tex_file
                break
        if src != src_tex_file:
            print("Could not find file {0}".format(src))
            return

    file_path = os.path.normpath(os.path.join(rootdir, src))
    print("Searching file: " + repr(file_path))
    # The following was a mistake:
    #dir_name = os.path.dirname(file_path)
    # THe reason is that \input and \include reference files **from the directory
    # of the master file**. So we must keep passing that (in rootdir).

    # read src file and extract all label tags

    # We open with utf-8 by default. If you use a different encoding, too bad.
    # If we really wanted to be safe, we would read until \begin{document},
    # then stop. Hopefully we wouldn't encounter any non-ASCII chars there.
    # But for now do the dumb thing.
    try:
        src_file = codecs.open(file_path, "r", "UTF-8")
    except IOError:
        sublime.status_message(
            "LaTeXTools WARNING: cannot find included file " + file_path)
        print(
            "WARNING! I can't find it! Check your \\include's and \\input's.")
        return

    src_content = re.sub("%.*", "", src_file.read())
    src_file.close()

    # If the file uses inputenc with a DIFFERENT encoding, try re-opening
    # This is still not ideal because we may still fail to decode properly, but still...
    m = re.search(r"\\usepackage\[(.*?)\]\{inputenc\}", src_content)
    if m and (m.group(1) not in ["utf8", "UTF-8", "utf-8"]):
        print("reopening with encoding " + m.group(1))
        f = None
        try:
            f = codecs.open(file_path, "r", m.group(1))
            src_content = re.sub("%.*", "", f.read())
        except:
            print("Uh-oh, could not read file " + file_path +
                  " with encoding " + m.group(1))
        finally:
            if f and not f.closed:
                f.close()

    labels += re.findall(r'\\label\{([^{}]+)\}', src_content)

    # search through input tex files recursively
    for f in re.findall(r'\\(?:input|include|subfile)\{([^\{\}]+)\}',
                        src_content):
        find_labels_in_files(rootdir, f, labels)
def plugin_loaded():
    # get additional entries from the settings
    _setting_entries = get_setting("fillall_helper_entries", [])
    _filter_invalid_entries(_setting_entries)
    _fillall_entries.extend(_setting_entries)

    _fillall_entries.extend([
        {
            "regex": r'(?:edulcni|tupni)\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"]
        },
        {
            "regex": r'(?:\][^{}\[\]]*\[)?scihpargedulcni\\',
            "extensions": get_setting("image_types", [
                "pdf", "png", "jpeg", "jpg", "eps"
            ])
        },
        {
            "regex": r'(?:\][^{}\[\]]*\[)?ecruoserbibdda\\',
            "extensions": ["bib"]
        },
        {
            "regex": r'yhpargoilbib\\',
            "extensions": ["bib"],
            "strip_extensions": [".bib"],
            "comma_separated": True
        }
    ])

    # update the fields of the entries
    _update_input_entries(_fillall_entries)

    _fillall_entries.extend([
        {
            "regex": r'([^{}\[\]]*)\{(?:\][^{}\[\]]*\[)?ssalctnemucod\\',
            "type": "cached",
            "cache_name": "cls"
        },
        {
            "regex": r'([^{}\[\]]*)\{(?:\][^{}\[\]]*\[)?egakcapesu\\',
            "type": "cached",
            "cache_name": "pkg"
        },
        {
            "regex": r'([^{}\[\]]*)\{elytsyhpargoilbib\\',
            "type": "cached",
            "cache_name": "bst"
        }
    ])

    global _TEX_INPUT_GROUP_MAPPING, TEX_INPUT_FILE_REGEX
    _TEX_INPUT_GROUP_MAPPING = dict((i, v) for i, v in enumerate(_fillall_entries))
    TEX_INPUT_FILE_REGEX = re.compile(
        "(?:{0})".format("|".join(entry["regex"] for entry in _fillall_entries))
    )
Ejemplo n.º 3
0
def get_tex_root(view):
    try:
        root = os.path.abspath(view.settings().get('TEXroot'))
        if os.path.isfile(root):
            print("Main file defined in project settings : " + root)
            return root
    except:
        pass

    texFile = view.file_name()
    root = texFile
    if texFile is None:
        # We are in an unnamed, unsaved file.
        # Read from the buffer instead.
        if view.substr(0) != '%':
            return None
        reg = view.find(r"^%[^\n]*(\n%[^\n]*)*", 0)
        if not reg:
            return None
        line_regs = view.lines(reg)
        lines = map(view.substr, line_regs)
        is_file = False

    else:
        # This works on ST2 and ST3, but does not automatically convert line endings.
        # We should be OK though.
        lines = codecs.open(texFile, "r", "UTF-8", "ignore")
        is_file = True

    for line in lines:
        if not line.startswith('%'):
            break
        else:
            # We have a comment match; check for a TEX root match
            tex_exts = '|'.join(
                [re.escape(ext) for ext in get_tex_extensions()])
            mroot = re.match(
                r"(?i)%\s*!TEX\s+root *= *(.*({0}))\s*$".format(tex_exts),
                line)
            if mroot:
                # we have a TEX root match
                # Break the match into path, file and extension
                # Create TEX root file name
                # If there is a TEX root path, use it
                # If the path is not absolute and a src path exists, pre-pend it
                root = mroot.group(1)
                if not os.path.isabs(root) and texFile is not None:
                    (texPath, texName) = os.path.split(texFile)
                    root = os.path.join(texPath, root)
                root = os.path.normpath(root)
                break

    if is_file:  # Not very Pythonic, but works...
        lines.close()

    return root
Ejemplo n.º 4
0
def find_labels_in_files(rootdir, src, labels):
    if not is_tex_file(src):
        src_tex_file = None
        for ext in get_tex_extensions():
            src_tex_file = ''.join((src, ext))
            if os.path.exists(os.path.join(rootdir, src_tex_file)):
                src = src_tex_file
                break
        if src != src_tex_file:
            print("Could not find file {0}".format(src))
            return

    file_path = os.path.normpath(os.path.join(rootdir, src))
    print ("Searching file: " + repr(file_path))
    # The following was a mistake:
    #dir_name = os.path.dirname(file_path)
    # THe reason is that \input and \include reference files **from the directory
    # of the master file**. So we must keep passing that (in rootdir).

    # read src file and extract all label tags

    # We open with utf-8 by default. If you use a different encoding, too bad.
    # If we really wanted to be safe, we would read until \begin{document},
    # then stop. Hopefully we wouldn't encounter any non-ASCII chars there. 
    # But for now do the dumb thing.
    try:
        src_file = codecs.open(file_path, "r", "UTF-8")
    except IOError:
        sublime.status_message("LaTeXTools WARNING: cannot find included file " + file_path)
        print ("WARNING! I can't find it! Check your \\include's and \\input's." )
        return

    src_content = re.sub("%.*", "", src_file.read())
    src_file.close()

    # If the file uses inputenc with a DIFFERENT encoding, try re-opening
    # This is still not ideal because we may still fail to decode properly, but still... 
    m = re.search(r"\\usepackage\[(.*?)\]\{inputenc\}", src_content)
    if m and (m.group(1) not in ["utf8", "UTF-8", "utf-8"]):
        print("reopening with encoding " + m.group(1))
        f = None
        try:
            f = codecs.open(file_path, "r", m.group(1))
            src_content = re.sub("%.*", "", f.read())
        except:
            print("Uh-oh, could not read file " + file_path + " with encoding " + m.group(1))
        finally:
            if f and not f.closed:
                f.close()

    labels += re.findall(r'\\label\{([^{}]+)\}', src_content)
    labels += re.findall(r'\\begin\{(?:definition|theorem|lemma|corollary|proof)\}\{[^}]*?\}\{([^}]*?)\}', src_content)

    # search through input tex files recursively
    for f in re.findall(r'\\(?:input|include)\{([^\{\}]+)\}', src_content):
        find_labels_in_files(rootdir, f, labels)
Ejemplo n.º 5
0
def get_tex_root(view):
	try:
		root = os.path.abspath(view.settings().get('TEXroot'))
		if os.path.isfile(root):
			print("Main file defined in project settings : " + root)
			return root
	except:
		pass


	texFile = view.file_name()
	root = texFile
	if texFile is None:
		# We are in an unnamed, unsaved file.
		# Read from the buffer instead.
		if view.substr(0) != '%':
			return None
		reg = view.find(r"^%[^\n]*(\n%[^\n]*)*", 0)
		if not reg:
			return None
		line_regs = view.lines(reg)
		lines = map(view.substr, line_regs)
		is_file = False

	else:
		# This works on ST2 and ST3, but does not automatically convert line endings.
		# We should be OK though.
		lines = codecs.open(texFile, "r", "UTF-8", "ignore")
		is_file = True

	for line in lines:
		if not line.startswith('%'):
			break
		else:
			# We have a comment match; check for a TEX root match
			tex_exts = '|'.join([re.escape(ext) for ext in get_tex_extensions()])
			mroot = re.match(r"(?i)%\s*!TEX\s+root *= *(.*({0}))\s*$".format(tex_exts), line)
			if mroot:
				# we have a TEX root match 
				# Break the match into path, file and extension
				# Create TEX root file name
				# If there is a TEX root path, use it
				# If the path is not absolute and a src path exists, pre-pend it
				root = mroot.group(1)
				if not os.path.isabs(root) and texFile is not None:
					(texPath, texName) = os.path.split(texFile)
					root = os.path.join(texPath,root)
				root = os.path.normpath(root)
				break

	if is_file: # Not very Pythonic, but works...
		lines.close()

	return root
Ejemplo n.º 6
0
def plugin_loaded():
    # get additional entries from the settings
    _setting_entries = get_setting("fillall_helper_entries", [])
    _filter_invalid_entries(_setting_entries)
    _fillall_entries.extend(_setting_entries)

    _fillall_entries.extend([{
        "regex":
        r'(?:edulcni|tupni)\\',
        "extensions": [e[1:] for e in get_tex_extensions()],
        "strip_extensions": [".tex"]
    }, {
        "regex":
        r'(?:\][^{}\[\]]*\[)?scihpargedulcni\\',
        "extensions":
        get_setting("image_types", ["pdf", "png", "jpeg", "jpg", "eps"])
    }, {
        "regex": r'(?:\][^{}\[\]]*\[)?ecruoserbibdda\\',
        "extensions": ["bib"]
    }, {
        "regex": r'yhpargoilbib\\',
        "extensions": ["bib"],
        "strip_extensions": [".bib"],
        "comma_separated": True
    }])

    # update the fields of the entries
    _update_input_entries(_fillall_entries)

    _fillall_entries.extend([{
        "regex": r'([^{}\[\]]*)\{(?:\][^{}\[\]]*\[)?ssalctnemucod\\',
        "type": "cached",
        "cache_name": "cls"
    }, {
        "regex": r'([^{}\[\]]*)\{(?:\][^{}\[\]]*\[)?egakcapesu\\',
        "type": "cached",
        "cache_name": "pkg"
    }, {
        "regex": r'([^{}\[\]]*)\{elytsyhpargoilbib\\',
        "type": "cached",
        "cache_name": "bst"
    }])

    global _TEX_INPUT_GROUP_MAPPING, TEX_INPUT_FILE_REGEX
    _TEX_INPUT_GROUP_MAPPING = dict(
        (i, v) for i, v in enumerate(_fillall_entries))
    TEX_INPUT_FILE_REGEX = re.compile("(?:{0})".format("|".join(
        entry["regex"] for entry in _fillall_entries)))
def find_bib_files(rootdir, src, bibfiles):
    if not is_tex_file(src):
        src_tex_file = None
        for ext in get_tex_extensions():
            src_tex_file = ''.join((src, ext))
            if os.path.exists(os.path.join(rootdir, src_tex_file)):
                src = src_tex_file
                break
        if src != src_tex_file:
            print("Could not find file {0}".format(src))
            return

    file_path = os.path.normpath(os.path.join(rootdir,src))
    print("Searching file: " + repr(file_path))
    # See latex_ref_completion.py for why the following is wrong:
    #dir_name = os.path.dirname(file_path)

    # read src file and extract all bibliography tags
    try:
        src_file = codecs.open(file_path, "r", 'UTF-8')
    except IOError:
        sublime.status_message("LaTeXTools WARNING: cannot open included file " + file_path)
        print ("WARNING! I can't find it! Check your \\include's and \\input's.")
        return

    src_content = re.sub("%.*","",src_file.read())
    src_file.close()

    m = re.search(r"\\usepackage\[(.*?)\]\{inputenc\}", src_content)
    if m:
        f = None
        try:
            f = codecs.open(file_path, "r", m.group(1))
            src_content = re.sub("%.*", "", f.read())
        except:
            pass
        finally:
            if f and not f.closed:
                f.close()

    # While these commands only allow a single resource as their argument...
    resources = re.findall(r'\\addbibresource(?:\[[^\]]+\])?\{([^\}]+\.bib)\}', src_content)
    resources += re.findall(r'\\addglobalbib(?:\[[^\]]+\])?\{([^\}]+\.bib)\}', src_content)
    resources += re.findall(r'\\addsectionbib(?:\[[^\]]+\])?\{([^\}]+\.bib)\}', src_content)

    # ... these can have a comma-separated list of resources as their argument.
    multi_resources = re.findall(r'\\begin\{refsection\}\[([^\]]+)\]', src_content)
    multi_resources += re.findall(r'\\bibliography\{([^\}]+)\}', src_content)
    multi_resources += re.findall(r'\\nobibliography\{([^\}]+)\}', src_content)

    for multi_resource in multi_resources:
        for res in multi_resource.split(','):
            res = res.strip()
            if res[-4:].lower() != '.bib':
                res = res + '.bib'
            resources.append(res)

    # extract absolute filepath for each bib file
    for res in resources:
        # We join with rootdir, the dir of the master file
        candidate_file = os.path.normpath(os.path.join(rootdir, res))
        # if the file doesn't exist, search the default tex paths
        if not os.path.exists(candidate_file):
            candidate_file = kpsewhich(res, 'mlbib')

        if candidate_file is not None and os.path.exists(candidate_file):
            bibfiles.append(candidate_file)

    # search through input tex files recursively
    for f in re.findall(r'\\(?:input|include|subfile)\{[^\}]+\}',src_content):
        input_f = re.search(r'\{([^\}]+)', f).group(1)
        find_bib_files(rootdir, input_f, bibfiles)
Ejemplo n.º 8
0
def parse_completions(view, line):
    # reverse line, copied from latex_cite_completions, very cool :)
    line = line[::-1]

    # Do matches!
    search = TEX_INPUT_FILE_REGEX.match(line)

    installed_cls = []
    installed_bst = []
    installed_pkg = []
    input_file_types = None

    if search is not None:
        (include_filter, input_filter, image_filter, addbib_filter, bib_filter,
         cls_filter, pkg_filter, bst_filter) = search.groups()
    else:
        return '', []

    # it isn't always correct to include the extension in the output filename
    # esp. with \bibliography{}; here we provide a mechanism to permit this
    filter_exts = []

    if include_filter is not None:
        # if is \include
        prefix = include_filter[::-1]
        # filter the . from the start of the extention
        input_file_types = [e[1:] for e in get_tex_extensions()]
        # only cut off the .tex extension
        filter_exts = ['.tex']
    elif input_filter is not None:
        # if is \input search type set to tex
        prefix = input_filter[::-1]
        # filter the . from the start of the extension
        input_file_types = [e[1:] for e in get_tex_extensions()]
        # only cut off the .tex extension
        filter_exts = ['.tex']
    elif image_filter is not None:
        # if is \includegraphics
        prefix = image_filter[::-1]
        # Load image types from configurations
        # In order to user input, "image_types" must be set in
        # LaTeXTools.sublime-settings configuration file or the
        # project settings for the current view.
        input_file_types = get_setting('image_types',
                                       ['pdf', 'png', 'jpeg', 'jpg', 'eps'])
    elif addbib_filter is not None or bib_filter is not None:
        # For bibliography
        if addbib_filter is not None:
            prefix = addbib_filter[::-1]
        else:
            prefix = ''
            bib_filter[::-1]
            filter_exts = ['.bib']
        input_file_types = ['bib']
    elif cls_filter is not None or pkg_filter is not None or bst_filter is not None:
        # for packages, classes and bsts
        if _ST3:
            cache_path = os.path.normpath(
                os.path.join(sublime.cache_path(), "LaTeXTools"))
        else:
            cache_path = os.path.normpath(
                os.path.join(sublime.packages_path(), "User"))

        pkg_cache_file = os.path.normpath(
            os.path.join(
                cache_path,
                'pkg_cache.cache' if _ST3 else 'latextools_pkg_cache.cache'))

        cache = None
        if not os.path.exists(pkg_cache_file):
            gen_cache = sublime.ok_cancel_dialog(
                "Cache files for installed packages, " +
                "classes and bibliographystyles do not exists, " +
                "would you like to generate it? After generating complete, please re-run this completion action!"
            )

            if gen_cache:
                sublime.active_window().run_command("latex_gen_pkg_cache")
                completions = []
        else:
            with open(pkg_cache_file) as f:
                cache = json.load(f)

        if cache is not None:
            if cls_filter is not None:
                installed_cls = cache.get("cls")
            elif bst_filter is not None:
                installed_bst = cache.get("bst")
            else:
                installed_pkg = cache.get("pkg")

        prefix = ''
    else:
        prefix = ''

    if len(installed_cls) > 0:
        completions = installed_cls
    elif len(installed_bst) > 0:
        completions = installed_bst
    elif len(installed_pkg) > 0:
        completions = installed_pkg
    elif input_file_types is not None:
        root = getTeXRoot.get_tex_root(view)
        if root:
            completions = get_file_list(root, input_file_types, filter_exts)
        else:
            # file is unsaved
            completions = []

    return prefix, completions
def plugin_loaded():
    # get additional entries from the settings
    _setting_entries = get_setting("fillall_helper_entries", [])
    _filter_invalid_entries(_setting_entries)
    _fillall_entries.extend(_setting_entries)

    _fillall_entries.extend([
        # input/include
        {
            "regex": r'(?:edulcni|tupni)\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"]
        },
        # includegraphics
        {
            "regex": r'(?:\][^{}\[\]]*\[)?scihpargedulcni\\',
            "extensions": get_setting("image_types", [
                "pdf", "png", "jpeg", "jpg", "eps"
            ])
        },
        # import/subimport
        {
            "regex": r'\*?(?:tropmibus)\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"],
            "post_process": "path_only"
        },
        {
            "regex": r'\}[^{}\[\]]*\{\*?(?:tropmi|morftupni|morfedulcni)?bus\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"],
            "post_regex": (
                r'\\sub(?:import|includefrom|inputfrom)\*?'
                r'\{([^{}\[\]]*)\}\{[^\}]*?$'
            ),
            "folder": "$base/$_1"
        },
        {
            "regex": r'\}[^{}\[\]]*\{\*?(?:tropmi|morftupni|morfedulcni)\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"],
            "post_regex": (
                r'\\(?:import|includefrom|inputfrom)\*?'
                r'\{([^{}\[\]]*)\}\{[^\}]*?$'
            ),
            "folder": "$_1"
        },
        {
            "regex": r'(?:\][^{}\[\]]*\[)?ecruoserbibdda\\',
            "extensions": ["bib"]
        },
        {
            "regex": r'yhpargoilbib\\',
            "extensions": ["bib"],
            "strip_extensions": [".bib"],
            "comma_separated": True
        }
    ])

    # update the fields of the entries
    _update_input_entries(_fillall_entries)

    _fillall_entries.extend([
        {
            "regex": r'([^{}\[\]]*)\{(?:\][^{}\[\]]*\[)?ssalctnemucod\\',
            "type": "cached",
            "cache_name": "cls"
        },
        {
            "regex": r'([^{}\[\]]*)\{(?:\][^{}\[\]]*\[)?egakcapesu\\',
            "type": "cached",
            "cache_name": "pkg"
        },
        {
            "regex": r'([^{}\[\]]*)\{elytsyhpargoilbib\\',
            "type": "cached",
            "cache_name": "bst"
        }
    ])

    global _TEX_INPUT_GROUP_MAPPING, TEX_INPUT_FILE_REGEX
    _TEX_INPUT_GROUP_MAPPING = dict((i, v) for i, v in enumerate(_fillall_entries))
    TEX_INPUT_FILE_REGEX = re.compile(
        "(?:{0})".format("|".join(entry["regex"] for entry in _fillall_entries))
    )
def find_bib_files(rootdir, src, bibfiles):
    if not is_tex_file(src):
        src_tex_file = None
        for ext in get_tex_extensions():
            src_tex_file = ''.join((src, ext))
            if os.path.exists(os.path.join(rootdir, src_tex_file)):
                src = src_tex_file
                break
        if src != src_tex_file:
            print("Could not find file {0}".format(src))
            return

    file_path = os.path.normpath(os.path.join(rootdir,src))
    print("Searching file: " + repr(file_path))
    # See latex_ref_completion.py for why the following is wrong:
    #dir_name = os.path.dirname(file_path)

    # read src file and extract all bibliography tags
    try:
        src_file = codecs.open(file_path, "r", 'UTF-8')
    except IOError:
        sublime.status_message("LaTeXTools WARNING: cannot open included file " + file_path)
        print ("WARNING! I can't find it! Check your \\include's and \\input's.")
        return

    src_content = re.sub("%.*","",src_file.read())
    src_file.close()

    m = re.search(r"\\usepackage\[(.*?)\]\{inputenc\}", src_content)
    if m:
        f = None
        try:
            f = codecs.open(file_path, "r", m.group(1))
            src_content = re.sub("%.*", "", f.read())
        except:
            pass
        finally:
            if f and not f.closed:
                f.close()

    # While these commands only allow a single resource as their argument...
    resources = re.findall(r'\\addbibresource(?:\[[^\]]+\])?\{([^\}]+\.bib)\}', src_content)
    resources += re.findall(r'\\addglobalbib(?:\[[^\]]+\])?\{([^\}]+\.bib)\}', src_content)
    resources += re.findall(r'\\addsectionbib(?:\[[^\]]+\])?\{([^\}]+\.bib)\}', src_content)

    # ... these can have a comma-separated list of resources as their argument.
    multi_resources = re.findall(r'\\begin\{refsection\}\[([^\]]+)\]', src_content)
    multi_resources += re.findall(r'\\bibliography\{([^\}]+)\}', src_content)
    multi_resources += re.findall(r'\\nobibliography\{([^\}]+)\}', src_content)

    for multi_resource in multi_resources:
        for res in multi_resource.split(','):
            res = res.strip()
            if res[-4:].lower() != '.bib':
                res = res + '.bib'
            resources.append(res)

    # extract absolute filepath for each bib file
    for res in resources:
        # We join with rootdir, the dir of the master file
        candidate_file = os.path.normpath(os.path.join(rootdir, res))
        # if the file doesn't exist, search the default tex paths
        if not os.path.exists(candidate_file):
            candidate_file = kpsewhich(res, 'mlbib')

        if candidate_file is not None and os.path.exists(candidate_file):
            bibfiles.append(candidate_file)

    # search through input tex files recursively
    for f in re.findall(r'\\(?:input|include)\{[^\}]+\}',src_content):
        input_f = re.search(r'\{([^\}]+)', f).group(1)
        find_bib_files(rootdir, input_f, bibfiles)
Ejemplo n.º 11
0
def find_bib_files(rootdir, src, bibfiles):
    if not is_tex_file(src):
        src_tex_file = None
        for ext in get_tex_extensions():
            src_tex_file = ''.join((src, ext))
            if os.path.exists(os.path.join(rootdir, src_tex_file)):
                src = src_tex_file
                break
        if src != src_tex_file:
            print("Could not find file {0}".format(src))
            return

    file_path = os.path.normpath(os.path.join(rootdir, src))
    print("Searching file: " + repr(file_path))
    # See latex_ref_completion.py for why the following is wrong:
    #dir_name = os.path.dirname(file_path)

    # read src file and extract all bibliography tags
    try:
        src_file = codecs.open(file_path, "r", 'UTF-8')
    except IOError:
        sublime.status_message(
            "LaTeXTools WARNING: cannot open included file " + file_path)
        print(
            "WARNING! I can't find it! Check your \\include's and \\input's.")
        return

    src_content = re.sub("%.*", "", src_file.read())
    src_file.close()

    m = re.search(r"\\usepackage\[(.*?)\]\{inputenc\}", src_content)
    if m:
        f = None
        try:
            f = codecs.open(file_path, "r", m.group(1))
            src_content = re.sub("%.*", "", f.read())
        except:
            pass
        finally:
            if f and not f.closed:
                f.close()

    bibtags = re.findall(r'\\bibliography\{[^\}]+\}', src_content)
    bibtags += re.findall(r'\\nobibliography\{[^\}]+\}', src_content)
    bibtags += re.findall(r'\\addbibresource\{[^\}]+.bib\}', src_content)

    # extract absolute filepath for each bib file
    for tag in bibtags:
        bfiles = re.search(r'\{([^\}]+)', tag).group(1).split(',')
        for bf in bfiles:
            if bf[-4:].lower() != '.bib':
                bf = bf + '.bib'
            # We join with rootdir, the dir of the master file
            candidate_file = os.path.normpath(os.path.join(rootdir, bf))
            # if the file doesn't exist, search the default tex paths
            if not os.path.exists(candidate_file):
                candidate_file = kpsewhich(bf, 'mlbib')

            if candidate_file is not None and os.path.exists(candidate_file):
                bibfiles.append(candidate_file)

    # search through input tex files recursively
    for f in re.findall(r'\\(?:input|include)\{[^\}]+\}', src_content):
        input_f = re.search(r'\{([^\}]+)', f).group(1)
        find_bib_files(rootdir, input_f, bibfiles)
Ejemplo n.º 12
0
def parse_completions(view, line):
    # reverse line, copied from latex_cite_completions, very cool :)
    line = line[::-1]

    # Do matches!
    search = TEX_INPUT_FILE_REGEX.match(line)

    installed_cls = []
    installed_bst = []
    installed_pkg = []
    input_file_types = None

    if search is not None:
        (   include_filter,
            input_filter,
            image_filter,
            svg_filter,
            addbib_filter,
            bib_filter,
            cls_filter,
            pkg_filter,
            bst_filter) = search.groups()
    else:
        return '', []

    # it isn't always correct to include the extension in the output filename
    # esp. with \bibliography{}; here we provide a mechanism to permit this
    filter_exts = []

    if include_filter is not None:
        # if is \include
        prefix = include_filter[::-1]
        # filter the . from the start of the extention
        input_file_types = [e[1:] for e in get_tex_extensions()]
        # only cut off the .tex extension
        filter_exts = ['.tex']
    elif input_filter is not None:
        # if is \input search type set to tex
        prefix = input_filter[::-1]
        # filter the . from the start of the extension
        input_file_types = [e[1:] for e in get_tex_extensions()]
        # only cut off the .tex extension
        filter_exts = ['.tex']
    elif image_filter is not None:
        # if is \includegraphics
        prefix = image_filter[::-1]
        # Load image types from configurations
        # In order to user input, "image_types" must be set in
        # LaTeXTools.sublime-settings configuration file or the
        # project settings for the current view.
        input_file_types = get_setting('image_types', [
                'pdf', 'png', 'jpeg', 'jpg', 'eps'
            ])
    elif svg_filter is not None:
        # if is \includesvg
        prefix = svg_filter[::-1]
        # include only svg files
        input_file_types = ['svg']
        # cut off the svg extention
        filter_exts = ['.svg']
    elif addbib_filter is not None or bib_filter is not None:
        # For bibliography
        if addbib_filter is not None:
            prefix = addbib_filter[::-1]
        else:
            prefix = ''
            bib_filter[::-1]
            filter_exts = ['.bib']
        input_file_types = ['bib']
    elif cls_filter is not None or pkg_filter is not None or bst_filter is not None:
        # for packages, classes and bsts
        if _ST3:
            cache_path = os.path.normpath(
                os.path.join(
                    sublime.cache_path(),
                    "LaTeXTools"
                ))
        else:
            cache_path = os.path.normpath(
                os.path.join(
                    sublime.packages_path(),
                    "User"
                ))

        pkg_cache_file = os.path.normpath(
            os.path.join(cache_path, 'pkg_cache.cache' if _ST3 else 'latextools_pkg_cache.cache'))

        cache = None
        if not os.path.exists(pkg_cache_file):
            gen_cache = sublime.ok_cancel_dialog("Cache files for installed packages, "
                + "classes and bibliographystyles do not exists, "
                + "would you like to generate it? After generating complete, please re-run this completion action!"
            )

            if gen_cache:
                sublime.active_window().run_command("latex_gen_pkg_cache")
                completions = []
        else:
            with open(pkg_cache_file) as f:
                cache = json.load(f)

        if cache is not None:
            if cls_filter is not None:
                installed_cls = cache.get("cls")
            elif bst_filter is not None:
                installed_bst = cache.get("bst")
            else:
                installed_pkg = cache.get("pkg")

        prefix = ''
    else:
        prefix = ''

    if len(installed_cls) > 0:
        completions = installed_cls
    elif len(installed_bst) > 0:
        completions = installed_bst
    elif len(installed_pkg) > 0:
        completions = installed_pkg
    elif input_file_types is not None:
        root = getTeXRoot.get_tex_root(view)
        if root:
            completions = get_file_list(root, input_file_types, filter_exts)
        else:
            # file is unsaved
            completions = []

    return prefix, completions
Ejemplo n.º 13
0
def plugin_loaded():
    # get additional entries from the settings
    _setting_entries = get_setting("fillall_helper_entries", [])
    _filter_invalid_entries(_setting_entries)
    _fillall_entries.extend(_setting_entries)

    _fillall_entries.extend([
        # input/include
        {
            "regex": r'(?:edulcni|tupni)\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"]
        },
        # includegraphics
        {
            "regex":
            r'(?:\][^{}\[\]]*\[)?scihpargedulcni\\',
            "extensions":
            get_setting("image_types", ["pdf", "png", "jpeg", "jpg", "eps"])
        },
        # import/subimport
        {
            "regex": r'\*?(?:tropmibus)\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"],
            "post_process": "path_only"
        },
        {
            "regex":
            r'\}[^{}\[\]]*\{\*?(?:tropmi|morftupni|morfedulcni)?bus\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"],
            "post_regex": (r'\\sub(?:import|includefrom|inputfrom)\*?'
                           r'\{([^{}\[\]]*)\}\{[^\}]*?$'),
            "folder":
            "$base/$_1"
        },
        {
            "regex":
            r'\}[^{}\[\]]*\{\*?(?:tropmi|morftupni|morfedulcni)\\',
            "extensions": [e[1:] for e in get_tex_extensions()],
            "strip_extensions": [".tex"],
            "post_regex": (r'\\(?:import|includefrom|inputfrom)\*?'
                           r'\{([^{}\[\]]*)\}\{[^\}]*?$'),
            "folder":
            "$_1"
        },
        {
            "regex": r'(?:\][^{}\[\]]*\[)?ecruoserbibdda\\',
            "extensions": ["bib"]
        },
        {
            "regex": r'yhpargoilbib\\',
            "extensions": ["bib"],
            "strip_extensions": [".bib"],
            "comma_separated": True
        }
    ])

    # update the fields of the entries
    _update_input_entries(_fillall_entries)

    _fillall_entries.extend([{
        "regex": r'([^{}\[\]]*)\{(?:\][^{}\[\]]*\[)?ssalctnemucod\\',
        "type": "cached",
        "cache_name": "cls"
    }, {
        "regex": r'([^{}\[\]]*)\{(?:\][^{}\[\]]*\[)?egakcapesu\\',
        "type": "cached",
        "cache_name": "pkg"
    }, {
        "regex": r'([^{}\[\]]*)\{elytsyhpargoilbib\\',
        "type": "cached",
        "cache_name": "bst"
    }])

    global _TEX_INPUT_GROUP_MAPPING, TEX_INPUT_FILE_REGEX
    _TEX_INPUT_GROUP_MAPPING = dict(
        (i, v) for i, v in enumerate(_fillall_entries))
    TEX_INPUT_FILE_REGEX = re.compile("(?:{0})".format("|".join(
        entry["regex"] for entry in _fillall_entries)))
Ejemplo n.º 14
0
def find_bib_files(rootdir, src, bibfiles):
    if not is_tex_file(src):
        src_tex_file = None
        for ext in get_tex_extensions():
            src_tex_file = ''.join((src, ext))
            if os.path.exists(os.path.join(rootdir, src_tex_file)):
                src = src_tex_file
                break
        if src != src_tex_file:
            print("Could not find file {0}".format(src))
            return

    file_path = os.path.normpath(os.path.join(rootdir,src))
    print("Searching file: " + repr(file_path))
    # See latex_ref_completion.py for why the following is wrong:
    #dir_name = os.path.dirname(file_path)

    # read src file and extract all bibliography tags
    try:
        src_file = codecs.open(file_path, "r", 'UTF-8')
    except IOError:
        sublime.status_message("LaTeXTools WARNING: cannot open included file " + file_path)
        print ("WARNING! I can't find it! Check your \\include's and \\input's.")
        return

    src_content = re.sub("%.*","",src_file.read())
    src_file.close()

    m = re.search(r"\\usepackage\[(.*?)\]\{inputenc\}", src_content)
    if m:
        f = None
        try:
            f = codecs.open(file_path, "r", m.group(1))
            src_content = re.sub("%.*", "", f.read())
        except:
            pass
        finally:
            if f and not f.closed:
                f.close()

    bibtags =  re.findall(r'\\bibliography\{[^\}]+\}', src_content)
    bibtags += re.findall(r'\\nobibliography\{[^\}]+\}', src_content)
    bibtags += re.findall(r'\\addbibresource\{[^\}]+.bib\}', src_content)

    # extract absolute filepath for each bib file
    for tag in bibtags:
        bfiles = re.search(r'\{([^\}]+)', tag).group(1).split(',')
        for bf in bfiles:
            if bf[-4:].lower() != '.bib':
                bf = bf + '.bib'
            # We join with rootdir, the dir of the master file
            candidate_file = os.path.normpath(os.path.join(rootdir,bf))
            # if the file doesn't exist, search the default tex paths
            if not os.path.exists(candidate_file):
                candidate_file = kpsewhich(bf, 'mlbib')

            if candidate_file is not None and os.path.exists(candidate_file):
                bibfiles.append(candidate_file)

    # search through input tex files recursively
    for f in re.findall(r'\\(?:input|include)\{[^\}]+\}',src_content):
        input_f = re.search(r'\{([^\}]+)', f).group(1)
        find_bib_files(rootdir, input_f, bibfiles)