def run(self):
     """Process .bib files, set file dependencies, and create a
     node that is to be transformed to the entries of the
     bibliography.
     """
     env = self.state.document.settings.env
     # create id and cache for this node
     # this id will be stored with the node
     # and is used to look up additional data in env.bibtex_cache
     # (implementation note: new_serialno only guarantees unique
     # ids within a single document, but we need the id to be
     # unique across all documents, so we also include the docname
     # in the id)
     id_ = "bibtex-bibliography-%s-%s" % (env.docname, env.new_serialno("bibtex"))
     if "filter" in self.options:
         if "all" in self.options:
             env.app.warn(standout(":filter: overrides :all:"))
         if "notcited" in self.options:
             env.app.warn(standout(":filter: overrides :notcited:"))
         if "cited" in self.options:
             env.app.warn(standout(":filter: overrides :cited:"))
         try:
             filter_ = ast.parse(self.options["filter"])
         except SyntaxError:
             env.app.warn(
                 standout("syntax error in :filter: expression") + " (" + self.options["filter"] + "); "
                 "the option will be ignored"
             )
             filter_ = ast.parse("cited")
     elif "all" in self.options:
         filter_ = ast.parse("True")
     elif "notcited" in self.options:
         filter_ = ast.parse("not cited")
     else:
         # the default filter: include only cited entries
         filter_ = ast.parse("cited")
     bibcache = BibliographyCache(
         list_=self.options.get("list", "citation"),
         enumtype=self.options.get("enumtype", "arabic"),
         start=self.options.get("start", 1),
         style=self.options.get("style", env.app.config.bibtex_default_style),
         filter_=filter_,
         encoding=self.options.get("encoding", "latex+" + self.state.document.settings.input_encoding),
         curly_bracket_strip=("disable-curly-bracket-strip" not in self.options),
         labelprefix=self.options.get("labelprefix", ""),
         keyprefix=self.options.get("keyprefix", ""),
         labels={},
         bibfiles=[],
     )
     if bibcache.list_ not in set(["bullet", "enumerated", "citation"]):
         env.app.warn("unknown bibliography list type '{0}'.".format(bibcache.list_))
     for bibfile in self.arguments[0].split():
         # convert to normalized absolute path to ensure that the same file
         # only occurs once in the cache
         bibfile = os.path.normpath(env.relfn2path(bibfile.strip())[1])
         self.process_bibfile(bibfile, bibcache.encoding)
         env.note_dependency(bibfile)
         bibcache.bibfiles.append(bibfile)
     env.bibtex_cache.set_bibliography_cache(env.docname, id_, bibcache)
     return [bibliography("", ids=[id_])]
    def process_bibfile(self, bibfile, encoding):
        """Check if ``env.bibtex_cache.bibfiles[bibfile]`` is still
        up to date. If not, parse the *bibfile* (see
        :meth:`update_bibfile_cache`), and store parsed data in the
        bibtex cache.

        :param bibfile: The bib file name.
        :type bibfile: ``str``
        :return: The parsed bibliography data.
        :rtype: :class:`pybtex.database.BibliographyData`
        """
        env = self.state.document.settings.env
        cache = env.bibtex_cache.bibfiles
        # get modification time of bibfile
        try:
            mtime = os.path.getmtime(bibfile)
        except OSError:
            logger.warning(
                standout("could not open bibtex file {0}.".format(bibfile)))
            cache[bibfile] = BibfileCache(  # dummy cache
                mtime=-float("inf"), data=BibliographyData())
            return cache[bibfile].data
        # get cache and check if it is still up to date
        # if it is not up to date, parse the bibtex file
        # and store it in the cache
        logger.info(
            bold("checking for {0} in bibtex cache... ".format(bibfile)),
            nonl=True)
        try:
            bibfile_cache = cache[bibfile]
        except KeyError:
            logger.info("not found")
            self.update_bibfile_cache(bibfile, mtime, encoding)
        else:
            if mtime != bibfile_cache.mtime:
                logger.info("out of date")
                self.update_bibfile_cache(bibfile, mtime, encoding)
            else:
                logger.info('up to date')
        return cache[bibfile].data
Example #3
0
    def process_bibfile(self, bibfile, encoding):
        """Check if ``env.bibtex_cache.bibfiles[bibfile]`` is still
        up to date. If not, parse the *bibfile* (see
        :meth:`update_bibfile_cache`), and store parsed data in the
        bibtex cache.

        :param bibfile: The bib file name.
        :type bibfile: ``str``
        :return: The parsed bibliography data.
        :rtype: :class:`pybtex.database.BibliographyData`
        """
        env = self.state.document.settings.env
        cache = env.bibtex_cache.bibfiles
        # get modification time of bibfile
        try:
            mtime = os.path.getmtime(bibfile)
        except OSError:
            env.app.warn(
                standout("could not open bibtex file {0}.".format(bibfile)))
            cache[bibfile] = BibfileCache(  # dummy cache
                mtime=-float("inf"), data=BibliographyData())
            return cache[bibfile].data
        # get cache and check if it is still up to date
        # if it is not up to date, parse the bibtex file
        # and store it in the cache
        env.app.info(
            bold("checking for {0} in bibtex cache... ".format(bibfile)),
            nonl=True)
        try:
            bibfile_cache = cache[bibfile]
        except KeyError:
            env.app.info("not found")
            self.update_bibfile_cache(bibfile, mtime, encoding)
        else:
            if mtime != bibfile_cache.mtime:
                env.app.info("out of date")
                self.update_bibfile_cache(bibfile, mtime, encoding)
            else:
                env.app.info('up to date')
        return cache[bibfile].data
Example #4
0
def process_bibfile(cache, bibfile, encoding):
    """Check if ``cache[bibfile]`` is still up to date. If not, parse
    the *bibfile*, and store parsed data in the bibtex cache.

    :param cache: The cache for all bib files.
    :type cache: ``dict``
    :param bibfile: The bib file name.
    :type bibfile: ``str``
    :return: The parsed bibliography data.
    :rtype: :class:`pybtex.database.BibliographyData`
    """
    # get modification time of bibfile
    try:
        mtime = os.path.getmtime(bibfile)
    except OSError:
        logger.warning(
            standout("could not open bibtex file {0}.".format(bibfile)))
        cache[bibfile] = BibfileCache(  # dummy cache
            mtime=-float("inf"), data=BibliographyData())
        return cache[bibfile].data
    # get cache and check if it is still up to date
    # if it is not up to date, parse the bibtex file
    # and store it in the cache
    logger.info(
        bold("checking for {0} in bibtex cache... ".format(bibfile)),
        nonl=True)
    try:
        bibfile_cache = cache[bibfile]
    except KeyError:
        logger.info("not found")
        cache[bibfile] = BibfileCache(
            mtime=mtime, data=parse_bibfile(bibfile, encoding))
    else:
        if mtime != bibfile_cache.mtime:
            logger.info("out of date")
            cache[bibfile] = BibfileCache(
                mtime=mtime, data=parse_bibfile(bibfile, encoding))
        else:
            logger.info('up to date')
    return cache[bibfile].data
Example #5
0
    def process_bibfile(self, bibfile, encoding):
        """Check if ``env.bibgloss_cache.bibfiles[bibfile]`` is still
        up to date. If not, parse the *bibfile*, and store parsed data in the
        bibtex cache.

        Parameters
        ----------
        bibfile: str
            The bib file name.

        """
        env = self.state.document.settings.env
        cache = env.bibgloss_cache.bibfiles
        # get modification time of bibfile
        try:
            mtime = os.path.getmtime(bibfile)
        except OSError:
            logger.warning(
                standout("could not open bibtex file {0}.".format(bibfile)))
            cache[bibfile] = BibfileCache(  # dummy cache
                mtime=-float("inf"), data=BibGlossDB())
            return
        # get cache and check if it is still up to date
        # if it is not up to date, parse the bibtex file
        # and store it in the cache
        logger.info(bold(
            "checking for {0} in bibtex cache... ".format(bibfile)),
                    nonl=True)
        try:
            bibfile_cache = cache[bibfile]
        except KeyError:
            logger.info("not found")
            self.update_bibfile_cache(bibfile, mtime, encoding)
        else:
            if mtime != bibfile_cache.mtime:
                logger.info("out of date")
                self.update_bibfile_cache(bibfile, mtime, encoding)
            else:
                logger.info("up to date")
Example #6
0
def create_role(app, tag_filename, rootdir):
    #Tidy up the root directory path
    if not rootdir.endswith(('/', '\\')):
        rootdir = join(rootdir, os.sep)

    try:
        tag_file = ET.parse(tag_filename)

        cache_name = os.path.basename(tag_filename)

        app.info(bold('Checking tag file cache for %s: ' % cache_name),
                 nonl=True)
        if not hasattr(app.env, 'doxylink_cache'):
            # no cache present at all, initialise it
            app.info('No cache at all, rebuilding...')
            mapping = parse_tag_file(tag_file)
            app.env.doxylink_cache = {
                cache_name: {
                    'mapping': mapping,
                    'mtime': os.path.getmtime(tag_filename)
                }
            }
        elif not app.env.doxylink_cache.get(cache_name):
            # Main cache is there but the specific sub-cache for this tag file is not
            app.info('Sub cache is missing, rebuilding...')
            mapping = parse_tag_file(tag_file)
            app.env.doxylink_cache[cache_name] = {
                'mapping': mapping,
                'mtime': os.path.getmtime(tag_filename)
            }
        elif app.env.doxylink_cache[cache_name]['mtime'] < os.path.getmtime(
                tag_filename):
            # tag file has been modified since sub-cache creation
            app.info('Sub-cache is out of date, rebuilding...')
            mapping = parse_tag_file(tag_file)
            app.env.doxylink_cache[cache_name] = {
                'mapping': mapping,
                'mtime': os.path.getmtime(tag_filename)
            }
        else:
            #The cache is up to date
            app.info('Sub-cache is up-to-date')
    except IOError:
        tag_file = None
        app.warn(
            standout(
                'Could not open tag file %s. Make sure your `doxylink` config variable is set correctly.'
                % tag_filename))

    def find_doxygen_link(name,
                          rawtext,
                          text,
                          lineno,
                          inliner,
                          options={},
                          content=[]):
        text = utils.unescape(text)
        # from :name:`title <part>`
        has_explicit_title, title, part = split_explicit_title(text)
        warning_messages = []
        if tag_file:
            url = find_url(tag_file, part)
            try:
                url = find_url2(app.env.doxylink_cache[cache_name]['mapping'],
                                part)
            except LookupError as error:
                warning_messages.append(
                    'Error while parsing `%s`. Is not a well-formed C++ function call or symbol. If this is not the case, it is a doxylink bug so please report it. Error reported was: %s'
                    % (part, error))
            if url:

                #If it's an absolute path then the link will work regardless of the document directory
                #Also check if it is a URL (i.e. it has a 'scheme' like 'http' or 'file')
                if os.path.isabs(rootdir) or urlparse.urlparse(rootdir).scheme:
                    full_url = join(rootdir, url['file'])
                #But otherwise we need to add the relative path of the current document to the root source directory to the link
                else:
                    relative_path_to_docsrc = os.path.relpath(
                        app.env.srcdir,
                        os.path.dirname(inliner.document.current_source))
                    full_url = join(
                        relative_path_to_docsrc, '/', rootdir, url['file']
                    )  #We always use the '/' here rather than os.sep since this is a web link avoids problems like documentation/.\../library/doc/ (mixed slashes)

                if url['kind'] == 'function' and app.config.add_function_parentheses and not normalise(
                        title)[1]:
                    title = join(title, '()')

                pnode = nodes.reference(title,
                                        title,
                                        internal=False,
                                        refuri=full_url)
                return [pnode], []
            #By here, no match was found
            warning_messages.append(
                'Could not find match for `%s` in `%s` tag file' %
                (part, tag_filename))
        else:
            warning_messages.append(
                'Could not find match for `%s` because tag file not found' %
                (part))

        pnode = nodes.inline(rawsource=title, text=title)
        return [pnode], [
            inliner.reporter.warning(message, line=lineno)
            for message in warning_messages
        ]

    return find_doxygen_link
Example #7
0
 def run(self):
     """Process .bib files, set file dependencies, and create a
     node that is to be transformed to the entries of the
     bibliography.
     """
     env = self.state.document.settings.env
     # create id and cache for this node
     # this id will be stored with the node
     # and is used to look up additional data in env.bibtex_cache
     # (implementation note: new_serialno only guarantees unique
     # ids within a single document, but we need the id to be
     # unique across all documents, so we also include the docname
     # in the id)
     id_ = 'bibtex-bibliography-%s-%s' % (env.docname,
                                          env.new_serialno('bibtex'))
     if "filter" in self.options:
         if "all" in self.options:
             logger.warning(standout(":filter: overrides :all:"))
         if "notcited" in self.options:
             logger.warning(standout(":filter: overrides :notcited:"))
         if "cited" in self.options:
             logger.warning(standout(":filter: overrides :cited:"))
         try:
             filter_ = ast.parse(self.options["filter"])
         except SyntaxError:
             logger.warning(
                 standout("syntax error in :filter: expression") + " (" +
                 self.options["filter"] + "); "
                 "the option will be ignored")
             filter_ = ast.parse("cited")
     elif "all" in self.options:
         filter_ = ast.parse("True")
     elif "notcited" in self.options:
         filter_ = ast.parse("not cited")
     else:
         # the default filter: include only cited entries
         filter_ = ast.parse("cited")
     bibcache = BibliographyCache(
         list_=self.options.get("list", "citation"),
         enumtype=self.options.get("enumtype", "arabic"),
         start=self.options.get("start", 1),
         style=self.options.get("style",
                                env.app.config.bibtex_default_style),
         filter_=filter_,
         encoding=self.options.get(
             'encoding', self.state.document.settings.input_encoding),
         labelprefix=self.options.get("labelprefix", ""),
         keyprefix=self.options.get("keyprefix", ""),
         labels={},
         # convert to normalized absolute path to ensure that the same file
         # only occurs once in the cache
         bibfiles=[
             normpath_bibfile(env, bibfile)
             for bibfile in self.arguments[0].split()
         ],
     )
     if (bibcache.list_ not in set(["bullet", "enumerated", "citation"])):
         logger.warning("unknown bibliography list type '{0}'.".format(
             bibcache.list_))
     for bibfile in bibcache.bibfiles:
         process_bibfile(env.bibtex_cache.bibfiles, bibfile,
                         bibcache.encoding)
         env.note_dependency(bibfile)
     env.bibtex_cache.bibliographies[env.docname][id_] = bibcache
     return [bibliography('', ids=[id_])]
Example #8
0
def create_role(app, tag_filename, rootdir):
    #Tidy up the root directory path
    if not rootdir.endswith(('/', '\\')):
        rootdir = join(rootdir, os.sep)

    try:
        tag_file = ET.parse(tag_filename)

        cache_name = os.path.basename(tag_filename)

        app.info(bold('Checking tag file cache for %s: ' % cache_name), nonl=True)
        if not hasattr(app.env, 'doxylink_cache'):
            # no cache present at all, initialise it
            app.info('No cache at all, rebuilding...')
            mapping = parse_tag_file(tag_file)
            app.env.doxylink_cache = { cache_name : {'mapping' : mapping, 'mtime' : os.path.getmtime(tag_filename)}}
        elif not app.env.doxylink_cache.get(cache_name):
            # Main cache is there but the specific sub-cache for this tag file is not
            app.info('Sub cache is missing, rebuilding...')
            mapping = parse_tag_file(tag_file)
            app.env.doxylink_cache[cache_name] = {'mapping' : mapping, 'mtime' : os.path.getmtime(tag_filename)}
        elif app.env.doxylink_cache[cache_name]['mtime'] < os.path.getmtime(tag_filename):
            # tag file has been modified since sub-cache creation
            app.info('Sub-cache is out of date, rebuilding...')
            mapping = parse_tag_file(tag_file)
            app.env.doxylink_cache[cache_name] = {'mapping' : mapping, 'mtime' : os.path.getmtime(tag_filename)}
        else:
            #The cache is up to date
            app.info('Sub-cache is up-to-date')
    except IOError:
        tag_file = None
        app.warn(standout('Could not open tag file %s. Make sure your `doxylink` config variable is set correctly.' % tag_filename))

    def find_doxygen_link(name, rawtext, text, lineno, inliner, options={}, content=[]):
        text = utils.unescape(text)
        # from :name:`title <part>`
        has_explicit_title, title, part = split_explicit_title(text)
        warning_messages = []
        if tag_file:
            url = find_url(tag_file, part)
            try:
                url = find_url2(app.env.doxylink_cache[cache_name]['mapping'], part)
            except LookupError as error:
                warning_messages.append('Error while parsing `%s`. Is not a well-formed C++ function call or symbol. If this is not the case, it is a doxylink bug so please report it. Error reported was: %s' % (part, error))
            if url:

                #If it's an absolute path then the link will work regardless of the document directory
                #Also check if it is a URL (i.e. it has a 'scheme' like 'http' or 'file')
                if os.path.isabs(rootdir) or urlparse(rootdir).scheme:
                    full_url = join(rootdir, url['file'])
                #But otherwise we need to add the relative path of the current document to the root source directory to the link
                else:

                    current_sourceTMP = str(inliner.document.current_source)
                    relative_path_to_docsrc = os.path.relpath(app.env.srcdir,
                                               os.path.dirname(current_sourceTMP))
                    full_url = join(relative_path_to_docsrc, '/', rootdir, url['file']) #We always use the '/' here rather than os.sep since this is a web link avoids problems like documentation/.\../library/doc/ (mixed slashes)

                if url['kind'] == 'function' and app.config.add_function_parentheses and not normalise(title)[1]:
                    title = join(title, '()')

                pnode = nodes.reference(title, title, internal=False, refuri=full_url)
                return [pnode], []
            #By here, no match was found
            warning_messages.append('Could not find match for `%s` in `%s` tag file' % (part, tag_filename))
        else:
            warning_messages.append('Could not find match for `%s` because tag file not found' % (part))

        pnode = nodes.inline(rawsource=title, text=title)
        return [pnode], [inliner.reporter.warning(message, line=lineno) for message in warning_messages]

    return find_doxygen_link
Example #9
0
 def run(self):
     """Process .bib files, set file dependencies, and create a
     node that is to be transformed to the entries of the
     bibliography.
     """
     env = self.state.document.settings.env
     cache = env.bibtex_cache.bibliographies
     # create id and cache for this node
     # this id will be stored with the node
     # and is used to look up additional data in env.bibtex_cache
     # (implementation note: new_serialno only guarantees unique
     # ids within a single document, but we need the id to be
     # unique across all documents, so we also include the docname
     # in the id)
     id_ = 'bibtex-bibliography-%s-%s' % (env.docname,
                                          env.new_serialno('bibtex'))
     if "filter" in self.options:
         if "all" in self.options:
             env.app.warn(standout(":filter: overrides :all:"))
         if "notcited" in self.options:
             env.app.warn(standout(":filter: overrides :notcited:"))
         if "cited" in self.options:
             env.app.warn(standout(":filter: overrides :cited:"))
         try:
             filter_ = ast.parse(self.options["filter"])
         except SyntaxError:
             env.app.warn(
                 standout("syntax error in :filter: expression") + " (" +
                 self.options["filter"] + "); "
                 "the option will be ignored")
             filter_ = ast.parse("cited")
     elif "all" in self.options:
         filter_ = ast.parse("True")
     elif "notcited" in self.options:
         filter_ = ast.parse("not cited")
     else:
         # the default filter: include only cited entries
         filter_ = ast.parse("cited")
     info = BibliographyCache(
         docname=env.docname,
         list_=self.options.get("list", "citation"),
         enumtype=self.options.get("enumtype", "arabic"),
         start=self.options.get("start", 1),
         style=self.options.get("style", "plain"),
         filter_=filter_,
         encoding=self.options.get(
             'encoding',
             'latex+' + self.state.document.settings.input_encoding),
         curly_bracket_strip=('disable-curly-bracket-strip'
                              not in self.options),
         labelprefix=self.options.get("labelprefix", ""),
     )
     if (info.list_ not in set(["bullet", "enumerated", "citation"])):
         env.app.warn("unknown bibliography list type '{0}'.".format(
             info.list_))
     for bibfile in self.arguments[0].split():
         # convert to normalized absolute path to ensure that the same file
         # only occurs once in the cache
         bibfile = os.path.normpath(env.relfn2path(bibfile.strip())[1])
         self.process_bibfile(bibfile, info.encoding)
         env.note_dependency(bibfile)
         info.bibfiles.append(bibfile)
     cache[id_] = info
     return [bibliography('', ids=[id_])]
Example #10
0
def create_role(app, tag_filename, rootdir):
    # Tidy up the root directory path
    if not rootdir.endswith(('/', '\\')):
        rootdir = join(rootdir, os.sep)

    try:
        tag_file = ET.parse(tag_filename)

        cache_name = os.path.basename(tag_filename)

        report_info(app.env,
                    bold('Checking tag file cache for %s: ' % cache_name))
        if not hasattr(app.env, 'doxylink_cache'):
            # no cache present at all, initialise it
            report_info(app.env, 'No cache at all, rebuilding...')
            mapping = SymbolMap(tag_file)
            app.env.doxylink_cache = {
                cache_name: {
                    'mapping': mapping,
                    'mtime': os.path.getmtime(tag_filename)
                }
            }
        elif not app.env.doxylink_cache.get(cache_name):
            # Main cache is there but the specific sub-cache for this tag file is not
            report_info(app.env, 'Sub cache is missing, rebuilding...')
            mapping = SymbolMap(tag_file)
            app.env.doxylink_cache[cache_name] = {
                'mapping': mapping,
                'mtime': os.path.getmtime(tag_filename)
            }
        elif app.env.doxylink_cache[cache_name]['mtime'] < os.path.getmtime(
                tag_filename):
            # tag file has been modified since sub-cache creation
            report_info(app.env, 'Sub-cache is out of date, rebuilding...')
            mapping = SymbolMap(tag_file)
            app.env.doxylink_cache[cache_name] = {
                'mapping': mapping,
                'mtime': os.path.getmtime(tag_filename)
            }
        elif not app.env.doxylink_cache[cache_name].get(
                'version') or app.env.doxylink_cache[cache_name].get(
                    'version') != __version__:
            # sub-cache doesn't have a version or the version doesn't match
            report_info(
                app.env,
                'Sub-cache schema version doesn\'t match, rebuilding...')
            mapping = SymbolMap(tag_file)
            app.env.doxylink_cache[cache_name] = {
                'mapping': mapping,
                'mtime': os.path.getmtime(tag_filename)
            }
        else:
            # The cache is up to date
            report_info(app.env, 'Sub-cache is up-to-date')
    except FileNotFoundError:
        tag_file = None
        report_warning(
            app.env,
            standout(
                'Could not find tag file %s. Make sure your `doxylink` config variable is set correctly.'
                % tag_filename))

    def find_doxygen_link(name,
                          rawtext,
                          text,
                          lineno,
                          inliner,
                          options={},
                          content=[]):
        # from :name:`title <part>`
        has_explicit_title, title, part = split_explicit_title(text)
        part = utils.unescape(part)
        warning_messages = []
        if not tag_file:
            warning_messages.append(
                'Could not find match for `%s` because tag file not found' %
                part)
            return [nodes.inline(title, title)], []

        try:
            url = app.env.doxylink_cache[cache_name]['mapping'][part]
        except LookupError as error:
            inliner.reporter.warning(
                'Could not find match for `%s` in `%s` tag file. Error reported was %s'
                % (part, tag_filename, error),
                line=lineno)
            return [nodes.inline(title, title)], []
        except ParseException as error:
            inliner.reporter.warning(
                'Error while parsing `%s`. Is not a well-formed C++ function call or symbol.'
                'If this is not the case, it is a doxylink bug so please report it.'
                'Error reported was: %s' % (part, error),
                line=lineno)
            return [nodes.inline(title, title)], []

        # If it's an absolute path then the link will work regardless of the document directory
        # Also check if it is a URL (i.e. it has a 'scheme' like 'http' or 'file')
        if os.path.isabs(rootdir) or urllib.parse.urlparse(rootdir).scheme:
            full_url = join(rootdir, url.file)
        # But otherwise we need to add the relative path of the current document to the root source directory to the link
        else:
            relative_path_to_docsrc = os.path.relpath(
                app.env.srcdir,
                os.path.dirname(inliner.document.attributes['source']))
            full_url = join(
                relative_path_to_docsrc, '/', rootdir, url.file
            )  # We always use the '/' here rather than os.sep since this is a web link avoids problems like documentation/.\../library/doc/ (mixed slashes)

        if url.kind == 'function' and app.config.add_function_parentheses and normalise(
                title)[1] == '' and not has_explicit_title:
            title = join(title, '()')

        pnode = nodes.reference(title, title, internal=False, refuri=full_url)
        pnode.set_class('reference-' + name)
        return [pnode], []

    return find_doxygen_link
Example #11
0
    def run(self):
        """Process .bib files, set file dependencies, and create a
        node that is to be transformed to the entries of the
        glossary.
        """
        env = self.state.document.settings.env
        # create id and cache for this node
        # this id will be stored with the node
        # and is used to look up additional data in env.bibgloss_cache
        # (implementation note: new_serialno only guarantees unique
        # ids within a single document, but we need the id to be
        # unique across all documents, so we also include the docname
        # in the id)
        id_ = "bibtex-bibglossary-%s-%s" % (env.docname,
                                            env.new_serialno("bibgloss"))

        # set filter option
        if "filter" in self.options:
            if "all" in self.options:
                logger.warning(standout(":filter: overrides :all:"))
            if "notcited" in self.options:
                logger.warning(standout(":filter: overrides :notcited:"))
            if "cited" in self.options:
                logger.warning(standout(":filter: overrides :cited:"))
            try:
                filter_ = ast.parse(self.options["filter"])
            except SyntaxError:
                logger.warning(
                    standout("syntax error in :filter: expression") + " (" +
                    self.options["filter"] + "); "
                    "the option will be ignored")
                filter_ = ast.parse("cited")
        elif "all" in self.options:
            filter_ = ast.parse("True")
        elif "notcited" in self.options:
            filter_ = ast.parse("not cited")
        else:
            # the default filter: include only cited entries
            filter_ = ast.parse("cited")

        style = self.options.get("style",
                                 env.app.config.bibgloss_default_style)
        if style not in self._allowed_styles:
            logger.warning(
                "style '{}' not in allowed styles, defaulting to '{}'".format(
                    style, self._default_style))
            style = self._default_style

        bibcache = BibliographyCache(
            style=style,
            unsorted=("unsorted" in self.options),
            filter_=filter_,
            encoding=self.options.get(
                "encoding", self.state.document.settings.input_encoding),
            keyprefix=self.options.get("keyprefix", ""),
            labels={},
            plurals={},
            bibfiles=[],
        )

        for bibfile in self.arguments[0].split():
            # convert to normalized absolute path to ensure that the same file
            # only occurs once in the cache
            bibfile = os.path.normpath(env.relfn2path(bibfile.strip())[1])
            # if the bibfile has been supplied with no extension, guess path
            bibfile = BibGlossDB().guess_path(bibfile) or bibfile
            self.process_bibfile(bibfile, bibcache.encoding)
            env.note_dependency(bibfile)
            bibcache.bibfiles.append(bibfile)
        env.bibgloss_cache.set_bibliography_cache(env.docname, id_, bibcache)
        return [BibGlossaryNode("", ids=[id_])]