Beispiel #1
0
def glossary_directive(name, arguments, options, content, lineno,
                       content_offset, block_text, state, state_machine):
    """Glossary with cross-reference targets for :dfn: roles."""
    env = state.document.settings.env
    node = addnodes.glossary()
    state.nested_parse(content, content_offset, node)

    # the content should be definition lists
    dls = [child for child in node if isinstance(child, nodes.definition_list)]
    # now, extract definition terms to enable cross-reference creation
    for dl in dls:
        dl['classes'].append('glossary')
        for li in dl.children:
            if not li.children or not isinstance(li[0], nodes.term):
                continue
            termtext = li.children[0].astext()
            new_id = 'term-' + nodes.make_id(termtext)
            if new_id in env.gloss_entries:
                new_id = 'term-' + str(len(env.gloss_entries))
            env.gloss_entries.add(new_id)
            li[0]['names'].append(new_id)
            li[0]['ids'].append(new_id)
            state.document.settings.env.note_reftarget('term', termtext.lower(),
                                                       new_id)
    return [node]
Beispiel #2
0
def glossary_directive(name, arguments, options, content, lineno,
                       content_offset, block_text, state, state_machine):
    """Glossary with cross-reference targets for :dfn: roles."""
    env = state.document.settings.env
    node = addnodes.glossary()
    state.nested_parse(content, content_offset, node)

    # the content should be definition lists
    dls = [child for child in node if isinstance(child, nodes.definition_list)]
    # now, extract definition terms to enable cross-reference creation
    for dl in dls:
        dl['classes'].append('glossary')
        for li in dl.children:
            if not li.children or not isinstance(li[0], nodes.term):
                continue
            termtext = li.children[0].astext()
            new_id = 'term-' + nodes.make_id(termtext)
            if new_id in env.gloss_entries:
                new_id = 'term-' + str(len(env.gloss_entries))
            env.gloss_entries.add(new_id)
            li[0]['names'].append(new_id)
            li[0]['ids'].append(new_id)
            state.document.settings.env.note_reftarget('term',
                                                       termtext.lower(),
                                                       new_id)
    return [node]
Beispiel #3
0
    def run(self):
        env = self.state.document.settings.env
        node = addnodes.glossary()
        node.document = self.state.document
        self.state.nested_parse(self.content, self.content_offset, node)

        # the content should be definition lists
        dls = [child for child in node
               if isinstance(child, nodes.definition_list)]
        # now, extract definition terms to enable cross-reference creation
        new_dl = nodes.definition_list()
        new_dl['classes'].append('glossary')
        items = []
        for dl in dls:
            for li in dl.children:
                if not li.children or not isinstance(li[0], nodes.term):
                    continue
                termtext = li.children[0].astext()
                new_id = 'term-' + nodes.make_id(termtext)
                if new_id in env.gloss_entries:
                    new_id = 'term-' + str(len(env.gloss_entries))
                env.gloss_entries.add(new_id)
                li[0]['names'].append(new_id)
                li[0]['ids'].append(new_id)
                env.note_reftarget('term', termtext.lower(), new_id)
                # add an index entry too
                indexnode = addnodes.index()
                indexnode['entries'] = [('single', termtext, new_id, termtext)]
                li.insert(0, indexnode)
                items.append((termtext, li))
        if 'sorted' in self.options:
            items.sort(key=lambda x: x[0].lower())
        new_dl.extend(item[1] for item in items)
        node.children = [new_dl]
        return [node]
Beispiel #4
0
    def run(self):
        env = self.state.document.settings.env
        objects = env.domaindata['std']['objects']
        gloss_entries = env.temp_data.setdefault('gloss_entries', set())
        node = addnodes.glossary()
        node.document = self.state.document
        self.state.nested_parse(self.content, self.content_offset, node)

        # the content should be definition lists
        dls = [child for child in node
               if isinstance(child, nodes.definition_list)]
        # now, extract definition terms to enable cross-reference creation
        new_dl = nodes.definition_list()
        new_dl['classes'].append('glossary')
        items = []
        for dl in dls:
            for li in dl.children:
                if not li.children or not isinstance(li[0], nodes.term):
                    continue
                termtext = li.children[0].astext()
                new_id = 'term-' + nodes.make_id(termtext)
                if new_id in gloss_entries:
                    new_id = 'term-' + str(len(gloss_entries))
                gloss_entries.add(new_id)
                li[0]['names'].append(new_id)
                li[0]['ids'].append(new_id)
                objects['term', termtext.lower()] = env.docname, new_id
                # add an index entry too
                indexnode = addnodes.index()
                indexnode['entries'] = [('single', termtext, new_id, termtext)]
                li.insert(0, indexnode)
                items.append((termtext, li))
        if 'sorted' in self.options:
            items.sort(key=lambda x: unicodedata.normalize('NFD', x[0].lower()))
        new_dl.extend(item[1] for item in items)
        node.children = [new_dl]
        return [node]
Beispiel #5
0
Datei: std.py Projekt: th0/test2
    def run(self):
        env = self.state.document.settings.env
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = []
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith('.. '):
                    continue
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary term must be preceded by empty line',
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary terms must not be separated by empty '
                            'lines', source=source, line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary seems to be misformatted, check '
                            'indentation', source=source, line=lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(self.state.reporter.system_message(
                        2, 'glossary seems to be misformatted, check '
                        'indentation', source=source, line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []
            ids = []
            for line, source, lineno in terms:
                # parse the term with inline markup
                res = self.state.inline_text(line, lineno)
                system_messages.extend(res[1])

                # get a text-only representation of the term and register it
                # as a cross-reference target
                tmp = nodes.paragraph('', '', *res[0])
                tmp.source = source
                tmp.line = lineno
                new_id, termtext, new_termnodes = \
                    make_termnodes_from_paragraph_node(env, tmp)
                ids.append(new_id)
                termtexts.append(termtext)
                termnodes.extend(new_termnodes)

            term = make_term_from_paragraph_node(termnodes, ids)
            term += system_messages

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1],
                                        defnode)

            items.append((termtexts,
                          nodes.definition_list_item('', term, defnode)))

        if 'sorted' in self.options:
            items.sort(key=lambda x:
                       unicodedata.normalize('NFD', x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Beispiel #6
0
    def run(self):
        # type: () -> List[nodes.Node]
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = [
        ]  # type: List[Tuple[List[Tuple[unicode, unicode, int]], ViewList]]
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith('.. '):
                    continue
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(
                            self.state.reporter.system_message(
                                2,
                                'glossary term must be preceded by empty line',
                                source=source,
                                line=lineno))
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(
                            self.state.reporter.system_message(
                                2,
                                'glossary terms must not be separated by empty '
                                'lines',
                                source=source,
                                line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(
                            self.state.reporter.system_message(
                                2, 'glossary seems to be misformatted, check '
                                'indentation',
                                source=source,
                                line=lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(
                        self.state.reporter.system_message(
                            2, 'glossary seems to be misformatted, check '
                            'indentation',
                            source=source,
                            line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []  # type: List[unicode]
            for line, source, lineno in terms:
                parts = split_term_classifiers(line)
                # parse the term with inline markup
                # classifiers (parts[1:]) will not be shown on doctree
                textnodes, sysmsg = self.state.inline_text(parts[0], lineno)

                # use first classifier as a index key
                term = make_glossary_term(self.env, textnodes, parts[1],
                                          source, lineno)
                term.rawsource = line
                system_messages.extend(sysmsg)
                termtexts.append(term.astext())
                termnodes.append(term)

            termnodes.extend(system_messages)

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1],
                                        defnode)
            termnodes.append(defnode)
            items.append(
                (termtexts, nodes.definition_list_item('', *termnodes)))

        if 'sorted' in self.options:
            items.sort(
                key=lambda x: unicodedata.normalize('NFD', x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Beispiel #7
0
    def run(self):
        # type: () -> List[nodes.Node]
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = []  # type: List[Tuple[List[Tuple[str, str, int]], StringList]]
        in_definition = True
        was_empty = True
        messages = []  # type: List[nodes.Node]
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith('.. '):
                    continue
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary term must be preceded by empty line',
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], StringList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary terms must not be separated by empty '
                            'lines', source=source, line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary seems to be misformatted, check '
                            'indentation', source=source, line=lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(self.state.reporter.system_message(
                        2, 'glossary seems to be misformatted, check '
                        'indentation', source=source, line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []          # type: List[str]
            termnodes = []          # type: List[nodes.Node]
            system_messages = []    # type: List[nodes.Node]
            for line, source, lineno in terms:
                parts = split_term_classifiers(line)
                # parse the term with inline markup
                # classifiers (parts[1:]) will not be shown on doctree
                textnodes, sysmsg = self.state.inline_text(parts[0], lineno)

                # use first classifier as a index key
                term = make_glossary_term(self.env, textnodes, parts[1], source, lineno)
                term.rawsource = line
                system_messages.extend(sysmsg)
                termtexts.append(term.astext())
                termnodes.append(term)

            termnodes.extend(system_messages)

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1],
                                        defnode)
            termnodes.append(defnode)
            items.append((termtexts,
                          nodes.definition_list_item('', *termnodes)))

        if 'sorted' in self.options:
            items.sort(key=lambda x:
                       unicodedata.normalize('NFD', x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Beispiel #8
0
    def run(self):
        env = self.state.document.settings.env
        objects = env.domaindata['std']['objects']
        gloss_entries = env.temp_data.setdefault('gloss_entries', set())
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = []
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary term must be preceded by empty line',
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary terms must not be separated by empty '
                            'lines', source=source, line=lineno))
                    entries[-1][0].append((line, source, lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                entries[-1][1].append(line[indent_len:], source, lineno)
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []
            ids = []
            for line, source, lineno in terms:
                # parse the term with inline markup
                res = self.state.inline_text(line, lineno)
                system_messages.extend(res[1])

                # get a text-only representation of the term and register it
                # as a cross-reference target
                tmp = nodes.paragraph('', '', *res[0])
                termtext = tmp.astext()
                new_id = 'term-' + nodes.make_id(termtext)
                if new_id in gloss_entries:
                    new_id = 'term-' + str(len(gloss_entries))
                gloss_entries.add(new_id)
                ids.append(new_id)
                objects['term', termtext.lower()] = env.docname, new_id
                termtexts.append(termtext)
                # add an index entry too
                indexnode = addnodes.index()
                indexnode['entries'] = [('single', termtext, new_id, 'main')]
                termnodes.append(indexnode)
                termnodes.extend(res[0])
                termnodes.append(addnodes.termsep())
            # make a single "term" node with all the terms, separated by termsep
            # nodes (remove the dangling trailing separator)
            term = nodes.term('', '', *termnodes[:-1])
            term['ids'].extend(ids)
            term['names'].extend(ids)
            term += system_messages

            defnode = nodes.definition()
            self.state.nested_parse(definition, definition.items[0][1], defnode)

            items.append((termtexts,
                          nodes.definition_list_item('', term, defnode)))

        if 'sorted' in self.options:
            items.sort(key=lambda x:
                       unicodedata.normalize('NFD', x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Beispiel #9
0
    def run(self) -> List[Node]:
        node = addnodes.glossary()
        node.document = self.state.document
        node['sorted'] = ('sorted' in self.options)

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries: List[Tuple[List[Tuple[str, str, int]], StringList]] = []
        in_definition = True
        in_comment = False
        was_empty = True
        messages: List[Node] = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith('.. '):
                    in_comment = True
                    continue
                else:
                    in_comment = False

                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.warning(
                            _('glossary term must be preceded by empty line'),
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], StringList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.warning(
                            _('glossary terms must not be separated by empty lines'),
                            source=source, line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(self.state.reporter.warning(
                            _('glossary seems to be misformatted, check indentation'),
                            source=source, line=lineno))
            elif in_comment:
                pass
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(self.state.reporter.warning(
                        _('glossary seems to be misformatted, check indentation'),
                        source=source, line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items: List[nodes.definition_list_item] = []
        for terms, definition in entries:
            termnodes: List[Node] = []
            system_messages: List[Node] = []
            for line, source, lineno in terms:
                parts = split_term_classifiers(line)
                # parse the term with inline markup
                # classifiers (parts[1:]) will not be shown on doctree
                textnodes, sysmsg = self.state.inline_text(parts[0], lineno)

                # use first classifier as a index key
                term = make_glossary_term(self.env, textnodes, parts[1], source, lineno,
                                          node_id=None, document=self.state.document)
                term.rawsource = line
                system_messages.extend(sysmsg)
                termnodes.append(term)

            termnodes.extend(system_messages)

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1],
                                        defnode)
            termnodes.append(defnode)
            items.append(nodes.definition_list_item('', *termnodes))

        dlist = nodes.definition_list('', *items)
        dlist['classes'].append('glossary')
        node += dlist
        return messages + [node]
    def run(self):
        env = self.state.document.settings.env
        objects = env.domaindata['std']['objects']
        gloss_entries = env.temp_data.setdefault('gloss_entries', set())
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = []
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary term must be preceded by empty line',
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary terms must not be separated by empty '
                            'lines', source=source, line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary seems to be misformatted, check '
                        'indentation', source=source, line=lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(self.state.reporter.system_message(
                        2, 'glossary seems to be misformatted, check '
                    'indentation', source=source, line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []
            ids = []
            for line, source, lineno in terms:
                # parse the term with inline markup
                res = self.state.inline_text(line, lineno)
                system_messages.extend(res[1])

                # get a text-only representation of the term and register it
                # as a cross-reference target
                tmp = nodes.paragraph('', '', *res[0])
                termtext = tmp.astext()
                new_id = 'term-' + nodes.make_id(termtext)
                if new_id in gloss_entries:
                    new_id = 'term-' + str(len(gloss_entries))
                gloss_entries.add(new_id)
                ids.append(new_id)
                objects['term', termtext.lower()] = env.docname, new_id
                termtexts.append(termtext)
                # add an index entry too
                indexnode = addnodes.index()
                indexnode['entries'] = [('single', termtext, new_id, 'main')]
                termnodes.append(indexnode)
                termnodes.extend(res[0])
                termnodes.append(addnodes.termsep())
            # make a single "term" node with all the terms, separated by termsep
            # nodes (remove the dangling trailing separator)
            term = nodes.term('', '', *termnodes[:-1])
            term['ids'].extend(ids)
            term['names'].extend(ids)
            term += system_messages

            defnode = nodes.definition()
            self.state.nested_parse(definition, definition.items[0][1], defnode)

            items.append((termtexts,
                          nodes.definition_list_item('', term, defnode)))

        if 'sorted' in self.options:
            items.sort(key=lambda x:
                       unicodedata.normalize('NFD', x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Beispiel #11
0
    def run(self):


        def _make_termnodes_from_paragraph_node(env, node, new_id=None):
            gloss_entries = env.temp_data.setdefault('gloss_entries', set())
            objects = env.domaindata['std']['objects']

            termtext = node.astext()
            if new_id is None:
                new_id = nodes.make_id('term-' + termtext)
            if new_id in gloss_entries:
                new_id = 'term-' + str(len(gloss_entries))
            gloss_entries.add(new_id)
            objects['term', termtext.lower()] = env.docname, new_id

            # add an index entry too
            indexnode = addnodes.index()
            indexnode['entries'] = [('single', termtext, new_id, 'main')]
            new_termnodes = []
            new_termnodes.append(indexnode)
            new_termnodes.extend(node.children)
            new_termnodes.append(addnodes.termsep())
            for termnode in new_termnodes:
                termnode.source, termnode.line = node.source, node.line

            return new_id, termtext, new_termnodes


        def make_term_from_paragraph_node(termnodes, ids):
            # make a single "term" node with all the terms, separated by termsep
            # nodes (remove the dangling trailing separator)
            term = nodes.term('', '', *termnodes[:-1])
            term.source, term.line = termnodes[0].source, termnodes[0].line
            term.rawsource = term.astext()
            term['ids'].extend(ids)
            term['names'].extend(ids)
            return term



        # %JFE+[
        if len(self.arguments) >= 1:
            self.glossaryName = self.arguments[0]
        else:
            self.glossaryName = ''

        # %JFE+]

        env = self.state.document.settings.env
        node = addnodes.glossary()     # %JFE= TODO:
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = []
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith('.. '):
                    continue
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary term must be preceded by empty line',
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary terms must not be separated by empty '
                            'lines', source=source, line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary seems to be misformatted, check '
                            'indentation', source=source, line=lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(self.state.reporter.system_message(
                        2, 'glossary seems to be misformatted, check '
                        'indentation', source=source, line=lineno))
            was_empty = False

        # for (headers,body) in entries:
        #    for (term, source, line) in headers:
        #        print term
        #    for line in body:
        #        print '  ',line
        #    print '-'*50

        # TODO: save this to a file in env.doctreedir

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []
            ids = []
            for line, source, lineno in terms:
                # parse the term with inline markup
                res = self.state.inline_text(line, lineno)
                system_messages.extend(res[1])

                # get a text-only representation of the term and register it
                # as a cross-reference target
                tmp = nodes.paragraph('', '', *res[0])
                tmp.source = source
                tmp.line = lineno
                new_id, termtext, new_termnodes = \
                    _make_termnodes_from_paragraph_node(env, tmp)
                ids.append(new_id)
                termtexts.append(termtext)
                termnodes.extend(new_termnodes)

            term = make_term_from_paragraph_node(termnodes, ids)
            term += system_messages

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1],
                                        defnode)

            items.append((termtexts,
                          nodes.definition_list_item('', term, defnode)))

        if 'sorted' in self.options:
            items.sort(key=lambda x:
                       unicodedata.normalize('NFD', x[0][0].lower()))
        # for item in items:
        #    for x in item:
        #        print type(x),'---',x
        #    print '.'*50
        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')        # %JFE= TODO:?
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Beispiel #12
0
    def run(self):
        env = self.state.document.settings.env
        objects = env.domaindata["std"]["objects"]
        gloss_entries = env.temp_data.setdefault("gloss_entries", set())
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = []
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append("", source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith(".. "):
                    continue
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(
                            self.state.reporter.system_message(
                                2, "glossary term must be preceded by empty line", source=source, line=lineno
                            )
                        )
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(
                            self.state.reporter.system_message(
                                2, "glossary terms must not be separated by empty " "lines", source=source, line=lineno
                            )
                        )
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(
                            self.state.reporter.system_message(
                                2, "glossary seems to be misformatted, check " "indentation", source=source, line=lineno
                            )
                        )
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(
                        self.state.reporter.system_message(
                            2, "glossary seems to be misformatted, check " "indentation", source=source, line=lineno
                        )
                    )
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []
            ids = []
            for line, source, lineno in terms:
                # parse the term with inline markup
                res = self.state.inline_text(line, lineno)
                system_messages.extend(res[1])

                # get a text-only representation of the term and register it
                # as a cross-reference target
                tmp = nodes.paragraph("", "", *res[0])
                termtext = tmp.astext()
                new_id = "term-" + nodes.make_id(termtext)
                if new_id in gloss_entries:
                    new_id = "term-" + str(len(gloss_entries))
                gloss_entries.add(new_id)
                ids.append(new_id)
                objects["term", termtext.lower()] = env.docname, new_id
                termtexts.append(termtext)
                # add an index entry too
                indexnode = addnodes.index()
                indexnode["entries"] = [("single", termtext, new_id, "main")]
                new_termnodes = []
                new_termnodes.append(indexnode)
                new_termnodes.extend(res[0])
                new_termnodes.append(addnodes.termsep())
                for termnode in new_termnodes:
                    termnode.source, termnode.line = source, lineno
                termnodes.extend(new_termnodes)
            # make a single "term" node with all the terms, separated by termsep
            # nodes (remove the dangling trailing separator)
            term = nodes.term("", "", *termnodes[:-1])
            term.source, term.line = termnodes[0].source, termnodes[0].line
            term.rawsource = term.astext()
            term["ids"].extend(ids)
            term["names"].extend(ids)
            term += system_messages

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1], defnode)

            items.append((termtexts, nodes.definition_list_item("", term, defnode)))

        if "sorted" in self.options:
            items.sort(key=lambda x: unicodedata.normalize("NFD", x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist["classes"].append("glossary")
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]