Beispiel #1
0
    def get_reference_node(self, ref):
        node = nodes.inline(' ', ' ', classes=[ref.type, 'bibcite'])

        namestyler = pybtex.style.names.plain.NameStyle()
        namestyler = pybtex.style.names.lastfirst.NameStyle()
        plaintext = pybtex.backends.plaintext.Backend()

        # Authors
        authors = ref.persons.get('author', [])
        for i, author in enumerate(authors):
            authortext = namestyler.format(
                author, abbr=True).format().render(plaintext)
            authortext = authortext.replace('{', '')
            authortext = authortext.replace('}', '')
            authortext = authortext.decode('latex')
            text = authortext

            text = text.strip()
            auth_node = latex_to_nodes(text)
            auth_node['classes'].append('author')
            node += auth_node

            if i + 1 < len(authors):
                node += nodes.inline(', ', ', ')
            else:
                ending = '%s  ' % ('' if text.endswith('.') else '.')
                node += nodes.inline(ending, ending)

        # Title
        title = ref.fields.get('title')
        if title is None:
            title = ref.fields.get('key')
        if title:
            title = title.decode('latex')
            title = title.replace('{', '')
            title = title.replace('}', '')
            node += nodes.inline(title, title, classes=['bib_title'])
            node += nodes.inline('.  ', '.  ')

        # @phdthesis
        if ref.type == 'phdthesis':
            school = ref.fields.get('school')
            school = school.decode('latex')
            text = 'PhD Thesis, %s, ' % school
            node += nodes.inline(text, text)

        # Publication
        pub = ref.fields.get('journal')
        if not pub:
            pub = ref.fields.get('booktitle')
        if pub:
            pub = pub.decode('latex')
            pub = pub.replace('{', '')
            pub = pub.replace('}', '')
            node += nodes.emphasis(pub, pub, classes=['publication'])
            node += nodes.inline(' ', ' ')

        vol = ref.fields.get('volume')
        pages = ref.fields.get('pages')
        year = ref.fields.get('year')
        url = ref.fields.get('url')

        if pub is None:
            howpub = ref.fields.get('howpublished')
            if howpub is not None and howpub.startswith('\url{'):
                url = howpub[5:-1]
                refnode = nodes.reference('', '', internal=False, refuri=url)
                refnode += nodes.Text(url, url)
                node += refnode
                if vol or pages or year:
                    node += nodes.inline(', ', ', ')

        if vol:
            vol = vol.decode('latex')
            node += nodes.inline(vol, vol, classes=['bib_vol'])
            node += nodes.inline(':', ':')

        if pages:
            pages = pages.decode('latex')
            node += nodes.inline(pages, pages, classes=['pages'])

        if year:
            year = year.decode('latex')
            node += nodes.inline(' (', ' (')
            node += nodes.inline(year, year, classes=['year'])
            node += nodes.inline(')', ')')

        if pub is not None and url:
            if url.startswith('{') and url.endswith('}'):
                url = url[1:-1]
            refnode = nodes.reference('', '', internal=False, refuri=url)
            node += nodes.inline(' ', ' ')
            refnode += nodes.Text(url, url)
            node += refnode

        node += nodes.inline('.', '.')
        return node
    def transform(self, node: nodes.field_list) -> None:
        """Transform a single field list *node*."""
        typemap = self.typemap

        entries = []  # type: List[Union[nodes.field, Tuple[Field, Any]]]
        groupindices = {}  # type: Dict[str, int]
        types = {}  # type: Dict[str, Dict]

        # step 1: traverse all fields and collect field types and content
        for field in cast(List[nodes.field], node):
            assert len(field) == 2
            field_name = cast(nodes.field_name, field[0])
            field_body = cast(nodes.field_body, field[1])
            try:
                # split into field type and argument
                fieldtype_name, fieldarg = field_name.astext().split(None, 1)
            except ValueError:
                # maybe an argument-less field type?
                fieldtype_name, fieldarg = field_name.astext(), ''
            typedesc, is_typefield = typemap.get(fieldtype_name, (None, None))

            # collect the content, trying not to keep unnecessary paragraphs
            if _is_single_paragraph(field_body):
                paragraph = cast(nodes.paragraph, field_body[0])
                content = paragraph.children
            else:
                content = field_body.children

            # sort out unknown fields
            if typedesc is None or typedesc.has_arg != bool(fieldarg):
                # either the field name is unknown, or the argument doesn't
                # match the spec; capitalize field name and be done with it
                new_fieldname = fieldtype_name[0:1].upper(
                ) + fieldtype_name[1:]
                if fieldarg:
                    new_fieldname += ' ' + fieldarg
                field_name[0] = nodes.Text(new_fieldname)
                entries.append(field)

                # but if this has a type then we can at least link it
                if (typedesc and is_typefield and content and len(content) == 1
                        and isinstance(content[0], nodes.Text)):
                    typed_field = cast(TypedField, typedesc)
                    target = content[0].astext()
                    xrefs = typed_field.make_xrefs(
                        typed_field.typerolename,
                        self.directive.domain,
                        target,
                        contnode=content[0],
                    )
                    if _is_single_paragraph(field_body):
                        paragraph = cast(nodes.paragraph, field_body[0])
                        paragraph.clear()
                        paragraph.extend(xrefs)
                    else:
                        field_body.clear()
                        field_body += nodes.paragraph('', '', *xrefs)

                continue

            typename = typedesc.name

            # if the field specifies a type, put it in the types collection
            if is_typefield:
                # filter out only inline nodes; others will result in invalid
                # markup being written out
                content = [
                    n for n in content if isinstance(n, nodes.Inline)
                    or isinstance(n, nodes.Text)
                ]
                if content:
                    types.setdefault(typename, {})[fieldarg] = content
                continue

            # also support syntax like ``:param type name:``
            if typedesc.is_typed:
                try:
                    argtype, argname = fieldarg.split(None, 1)
                except ValueError:
                    pass
                else:
                    types.setdefault(typename, {})[argname] = \
                        [nodes.Text(argtype)]
                    fieldarg = argname

            translatable_content = nodes.inline(field_body.rawsource,
                                                translatable=True)
            translatable_content.document = field_body.parent.document
            translatable_content.source = field_body.parent.source
            translatable_content.line = field_body.parent.line
            translatable_content += content

            # grouped entries need to be collected in one entry, while others
            # get one entry per field
            if typedesc.is_grouped:
                if typename in groupindices:
                    group = cast(Tuple[Field, List],
                                 entries[groupindices[typename]])
                else:
                    groupindices[typename] = len(entries)
                    group = (typedesc, [])
                    entries.append(group)
                new_entry = typedesc.make_entry(fieldarg,
                                                [translatable_content])
                group[1].append(new_entry)
            else:
                new_entry = typedesc.make_entry(fieldarg,
                                                [translatable_content])
                entries.append((typedesc, new_entry))

        # step 2: all entries are collected, construct the new field list
        new_list = nodes.field_list()
        for entry in entries:
            if isinstance(entry, nodes.field):
                # pass-through old field
                new_list += entry
            else:
                fieldtype, items = entry
                fieldtypes = types.get(fieldtype.name, {})
                env = self.directive.state.document.settings.env
                new_list += fieldtype.make_field(fieldtypes,
                                                 self.directive.domain,
                                                 items,
                                                 env=env)

        node.replace_self(new_list)
Beispiel #3
0
 def visit_number_reference(self, node):
     text = nodes.Text(node.get('title', '#'))
     self.visit_Text(text)
     raise nodes.SkipNode
Beispiel #4
0
    def run(self):
        key, difficulty, points = self.extract_exercise_arguments()

        env = self.state.document.settings.env
        name = "{}_{}".format(env.docname.replace('/', '_'), key)
        override = env.config.override

        classes = ['exercise']
        if 'class' in self.options:
            classes.extend(self.options['class'])

        # Add document nodes.
        args = {
            'class': ' '.join(classes),
            'data-aplus-exercise': 'yes',
            'data-aplus-active-element': 'out',
            'data-inputs': '' + self.options.get('inputs', ''),
        }

        if 'inputs' not in self.options:
            raise self.warning(
                "The input list for output '{:s}' is empty.".format(key))

        if 'type' in self.options:
            args['data-type'] = self.options['type']
        else:
            args['data-type'] = 'text'

        if 'scale-size' in self.options:
            args['data-scale'] = ''

        if 'title' in self.options:
            args['data-title'] = self.options['title']

        if 'width' in self.options:
            args['style'] = 'width:' + self.options['width'] + ';'

        if 'height' in self.options:
            if 'style' not in args:
                args['style'] = 'height:' + self.options['height'] + ';'
            else:
                args['style'] = args['style'] + 'height:' + self.options[
                    'height'] + ';'

        if 'clear' in self.options:
            args['style'] = args['style'] + 'clear:' + self.options[
                'clear'] + ';'

        node = aplus_nodes.html('div', args)
        paragraph = aplus_nodes.html('p', {})
        paragraph.append(
            nodes.Text(translations.get(env, 'submit_placeholder')))
        node.append(paragraph)

        key_title = "{} {}".format(translations.get(env, 'exercise'), key)

        # Load or create exercise configuration.
        if 'config' in self.options:
            path = os.path.join(env.app.srcdir, self.options['config'])
            if not os.path.exists(path):
                raise SphinxError('Missing config path {}'.format(
                    self.options['config']))
            data = yaml_writer.read(path)
            config_title = data.get('title', None)
        else:
            data = {'_external': True}
            if 'url' in self.options:
                data['url'] = self.options['url']
            config_title = None

        config_title = self.options.get('title', config_title)

        category = 'submit'
        data.update({
            'key':
            name,
            'title':
            env.config.submit_title.format(key_title=key_title,
                                           config_title=config_title),
            'category':
            'active elements',
            'max_submissions':
            self.options.get(
                'submissions',
                data.get('max_submissions',
                         env.config.ae_default_submissions)),
        })
        data.setdefault('status', self.options.get('status', 'unlisted'))
        if category in override:
            data.update(override[category])
            if 'url' in data:
                data['url'] = data['url'].format(key=name)

        node.write_yaml(env, name, data, 'exercise')

        return [node]
 def dispatch_default(self, entering, node, *args):
     if entering:
         self.document.reporter.warning(
             "markdown node with unknown tag: %s" % node.tag,
             nodes.Text(node.text))
Beispiel #6
0
    def assemble_doctree(self, docname, title, author, appendices):

        # FIXME: use the new inline_all_trees from Sphinx.
        # check how the LaTeX builder does it.

        self.docnames = set([docname])
        self.info(darkgreen(docname) + " ", nonl=1)

        def process_tree(docname, tree):
            tree = tree.deepcopy()
            for toctreenode in tree.traverse(addnodes.toctree):
                newnodes = []
                includefiles = map(str, toctreenode['includefiles'])
                for includefile in includefiles:
                    try:
                        self.info(darkgreen(includefile) + " ", nonl=1)
                        subtree = process_tree(
                            includefile, self.env.get_doctree(includefile))
                        self.docnames.add(includefile)
                    except Exception:
                        self.warn('%s: toctree contains ref to nonexisting file %r'\
                                                     % (docname, includefile))
                    else:
                        sof = addnodes.start_of_file(docname=includefile)
                        sof.children = subtree.children
                        newnodes.append(sof)
                toctreenode.parent.replace(toctreenode, newnodes)
            return tree

        tree = self.env.get_doctree(docname)
        tree = process_tree(docname, tree)

        self.docutils_languages = {}
        if self.config.language:
            self.docutils_languages[self.config.language] = \
                get_language_available(self.config.language)[2]

        if self.opts.get('pdf_use_index', self.config.pdf_use_index):
            # Add index at the end of the document

            # This is a hack. create_index creates an index from
            # ALL the documents data, not just this one.
            # So, we preserve a copy, use just what we need, then
            # restore it.
            #from pudb import set_trace; set_trace()
            t = copy(self.env.indexentries)
            try:
                self.env.indexentries = {
                    docname: self.env.indexentries[docname + '-gen']
                }
            except KeyError:
                self.env.indexentries = {}
                for dname in self.docnames:
                    self.env.indexentries[dname] = t.get(dname, [])
            genindex = self.env.create_index(self)
            self.env.indexentries = t
            # EOH (End Of Hack)

            if genindex:  # No point in creating empty indexes
                index_nodes = genindex_nodes(genindex)
                tree.append(
                    nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
                tree.append(index_nodes)

        # This is stolen from the HTML builder's prepare_writing function
        self.domain_indices = []
        # html_domain_indices can be False/True or a list of index names
        indices_config = self.config.pdf_domain_indices
        if indices_config and hasattr(self.env, 'domains'):
            for domain in self.env.domains.itervalues():
                for indexcls in domain.indices:
                    indexname = '%s-%s' % (domain.name, indexcls.name)
                    if isinstance(indices_config, list):
                        if indexname not in indices_config:
                            continue
                    # deprecated config value
                    if indexname == 'py-modindex' and \
                           not self.config.pdf_use_modindex:
                        continue
                    content, collapse = indexcls(domain).generate()
                    if content:
                        self.domain_indices.append(
                            (indexname, indexcls, content, collapse))

        # self.domain_indices contains a list of indices to generate, like
        # this:
        # [('py-modindex',
        #    <class 'sphinx.domains.python.PythonModuleIndex'>,
        #   [(u'p', [[u'parrot', 0, 'test', u'module-parrot', 'Unix, Windows',
        #   '', 'Analyze and reanimate dead parrots.']])], True)]

        # Now this in the HTML builder is passed onto write_domain_indices.
        # We handle it right here

        for indexname, indexcls, content, collapse in self.domain_indices:
            indexcontext = dict(
                indextitle=indexcls.localname,
                content=content,
                collapse_index=collapse,
            )
            # In HTML this is handled with a Jinja template, domainindex.html
            # We have to generate docutils stuff right here in the same way.
            self.info(' ' + indexname, nonl=1)
            print

            output = ['DUMMY', '=====', '', '.. _modindex:\n\n']
            t = indexcls.localname
            t += '\n' + '=' * len(t) + '\n'
            output.append(t)

            for letter, entries in content:
                output.append('.. cssclass:: heading4\n\n%s\n\n' % letter)
                for (name, grouptype, page, anchor, extra, qualifier,
                     description) in entries:
                    if qualifier:
                        q = '[%s]' % qualifier
                    else:
                        q = ''

                    if extra:
                        e = '(%s)' % extra
                    else:
                        e = ''
                    output.append('`%s <#%s>`_ %s %s' % (name, anchor, e, q))
                    output.append('    %s' % description)
                output.append('')

            dt = docutils.core.publish_doctree('\n'.join(output))[1:]
            dt.insert(0, nodes.raw(text='OddPageBreak twoColumn',
                                   format='pdf'))
            tree.extend(dt)

        if appendices:
            tree.append(
                nodes.raw(text='OddPageBreak %s' % self.page_template,
                          format='pdf'))
            self.info()
            self.info('adding appendixes...', nonl=1)
            for docname in appendices:
                self.info(darkgreen(docname) + " ", nonl=1)
                appendix = self.env.get_doctree(docname)
                appendix['docname'] = docname
                tree.append(appendix)
            self.info('done')

        self.info()
        self.info("resolving references...")
        #print tree
        #print '--------------'
        self.env.resolve_references(tree, docname, self)
        #print tree

        for pendingnode in tree.traverse(addnodes.pending_xref):
            # This needs work, need to keep track of all targets
            # so I don't replace and create hanging refs, which
            # crash
            if pendingnode.get('reftarget',None) == 'genindex'\
                and self.config.pdf_use_index:
                pendingnode.replace_self(
                    nodes.reference(text=pendingnode.astext(),
                                    refuri=pendingnode['reftarget']))
            # FIXME: probably need to handle dangling links to domain-specific indexes
            else:
                # FIXME: This is from the LaTeX builder and I still don't understand it
                # well, and doesn't seem to work

                # resolve :ref:s to distant tex files -- we can't add a cross-reference,
                # but append the document name
                docname = pendingnode['refdocname']
                sectname = pendingnode['refsectname']
                newnodes = [nodes.emphasis(sectname, sectname)]
                for subdir, title in self.titles:
                    if docname.startswith(subdir):
                        newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                        newnodes.append(nodes.emphasis(title, title))
                        newnodes.append(nodes.Text(')', ')'))
                        break
                else:
                    pass
                pendingnode.replace_self(newnodes)
            #else:
            #pass
        return tree
Beispiel #7
0
 def parser(self, name, arguments, options, content, lineo, content_offset,
            block_text, state, state_machine):
     block = literal_block()
     block['parser'] = content[0]
     block.children = [nodes.Text(u"\n".join(content[1:]))]
     return [block]
Beispiel #8
0
    def run(self):

        from docutils.parsers.rst.directives.body import CodeBlock, NumberLines

        if not self.document.settings.file_insertion_enabled:
            raise DirectiveError(2,
                                 'Directive "{}" disabled.'.format(self.name))

        source_dir = Path(self.document["source"]).absolute().parent
        include_arg = "".join(
            [s.strip() for s in self.arguments[0].splitlines()])

        if include_arg.startswith("<") and include_arg.endswith(">"):
            # # docutils "standard" includes
            path = Path(self.klass.standard_include_path).joinpath(
                include_arg[1:-1])
        else:
            # if using sphinx interpret absolute paths "correctly",
            # i.e. relative to source directory
            try:
                sphinx_env = self.document.settings.env
                _, include_arg = sphinx_env.relfn2path(self.arguments[0])
                sphinx_env.note_included(include_arg)
            except AttributeError:
                pass
            path = Path(include_arg)
        path = source_dir.joinpath(path)

        # read file
        encoding = self.options.get("encoding",
                                    self.document.settings.input_encoding)
        error_handler = self.document.settings.input_encoding_error_handler
        # tab_width = self.options.get("tab-width", self.document.settings.tab_width)
        try:
            file_content = path.read_text(encoding=encoding,
                                          errors=error_handler)
        except Exception as error:
            raise DirectiveError(
                4,
                'Directive "{}": error reading file: {}\n{}.'.format(
                    self.name, path, error),
            )

        # get required section of text
        startline = self.options.get("start-line", None)
        endline = self.options.get("end-line", None)
        file_content = "\n".join(file_content.splitlines()[startline:endline])
        startline = startline or 0
        for split_on_type in ["start-after", "end-before"]:
            split_on = self.options.get(split_on_type, None)
            if not split_on:
                continue
            split_index = file_content.find(split_on)
            if split_index < 0:
                raise DirectiveError(
                    4,
                    'Directive "{}"; option "{}": text not found "{}".'.format(
                        self.name, split_on_type, split_on),
                )
            if split_on_type == "start-after":
                startline += split_index + len(split_on)
                file_content = file_content[split_index + len(split_on):]
            else:
                file_content = file_content[:split_index]

        if "literal" in self.options:
            literal_block = nodes.literal_block(file_content,
                                                source=str(path),
                                                classes=self.options.get(
                                                    "class", []))
            literal_block.line = 1  # TODO don;t think this should be 1?
            self.add_name(literal_block)
            if "number-lines" in self.options:
                try:
                    startline = int(self.options["number-lines"] or 1)
                except ValueError:
                    raise DirectiveError(
                        3, ":number-lines: with non-integer "
                        "start value")
                endline = startline + len(file_content.splitlines())
                if file_content.endswith("\n"):
                    file_content = file_content[:-1]
                tokens = NumberLines([([], file_content)], startline, endline)
                for classes, value in tokens:
                    if classes:
                        literal_block += nodes.inline(value,
                                                      value,
                                                      classes=classes)
                    else:
                        literal_block += nodes.Text(value)
            else:
                literal_block += nodes.Text(file_content)
            return [literal_block]
        if "code" in self.options:
            self.options["source"] = str(path)
            state_machine = MockStateMachine(self.renderer, self.lineno)
            state = MockState(self.renderer, state_machine, self.lineno)
            codeblock = CodeBlock(
                name=self.name,
                arguments=[self.options.pop("code")],
                options=self.options,
                content=file_content.splitlines(),
                lineno=self.lineno,
                content_offset=0,
                block_text=file_content,
                state=state,
                state_machine=state_machine,
            )
            return codeblock.run()

        # Here we perform a nested render, but temporarily setup the document/reporter
        # with the correct document path and lineno for the included file.
        source = self.renderer.document["source"]
        rsource = self.renderer.reporter.source
        line_func = getattr(self.renderer.reporter, "get_source_and_line",
                            None)
        try:
            self.renderer.document["source"] = str(path)
            self.renderer.reporter.source = str(path)
            self.renderer.reporter.get_source_and_line = lambda l: (str(path),
                                                                    l)
            self.renderer.nested_render_text(file_content, startline + 1)
        finally:
            self.renderer.document["source"] = source
            self.renderer.reporter.source = rsource
            if line_func is not None:
                self.renderer.reporter.get_source_and_line = line_func
            else:
                del self.renderer.reporter.get_source_and_line
        return []
Beispiel #9
0
def process_todo_nodes(app, doctree, fromdocname):
    # type: (Sphinx, nodes.Node, unicode) -> None
    if not app.config['todo_include_todos']:
        for node in doctree.traverse(todo_node):
            node.parent.remove(node)

    # Replace all todolist nodes with a list of the collected todos.
    # Augment each todo with a backlink to the original location.
    env = app.builder.env

    if not hasattr(env, 'todo_all_todos'):
        env.todo_all_todos = []  # type: ignore

    for node in doctree.traverse(todolist):
        if node.get('ids'):
            content = [nodes.target()]
        else:
            content = []

        if not app.config['todo_include_todos']:
            node.replace_self(content)
            continue

        for todo_info in env.todo_all_todos:  # type: ignore
            para = nodes.paragraph(classes=['todo-source'])
            if app.config['todo_link_only']:
                description = _('<<original entry>>')
            else:
                description = (
                    _('(The <<original entry>> is located in %s, line %d.)') %
                    (todo_info['source'], todo_info['lineno']))
            desc1 = description[:description.find('<<')]
            desc2 = description[description.find('>>') + 2:]
            para += nodes.Text(desc1, desc1)

            # Create a reference
            newnode = nodes.reference('', '', internal=True)
            innernode = nodes.emphasis(_('original entry'),
                                       _('original entry'))
            try:
                newnode['refuri'] = app.builder.get_relative_uri(
                    fromdocname, todo_info['docname'])
                newnode['refuri'] += '#' + todo_info['target']['refid']
            except NoUri:
                # ignore if no URI can be determined, e.g. for LaTeX output
                pass
            newnode.append(innernode)
            para += newnode
            para += nodes.Text(desc2, desc2)

            todo_entry = todo_info['todo']
            # Remove targetref from the (copied) node to avoid emitting a
            # duplicate label of the original entry when we walk this node.
            if 'targetref' in todo_entry:
                del todo_entry['targetref']

            # (Recursively) resolve references in the todo content
            env.resolve_references(todo_entry, todo_info['docname'],
                                   app.builder)

            # Insert into the todolist
            content.append(todo_entry)
            content.append(para)

        node.replace_self(content)
Beispiel #10
0
    def run(self):
        env = self.state.document.settings.env
        conf = env.app.config.images_config

        #TODO get defaults from config
        group = self.options.get(
            'group',
            conf['default_group'] if conf['default_group'] else uuid.uuid4())
        classes = self.options.get('class', '')
        width = self.options.get('width', conf['default_image_width'])
        height = self.options.get('height', conf['default_image_height'])
        alt = self.options.get('alt', '')
        title = self.options.get('title',
                                 '' if conf['default_show_title'] else None)
        align = self.options.get('align', '')
        show_caption = self.options.get('show_caption', False)
        legacy_classes = self.options.get('legacy_class', '')

        #TODO get default from config
        download = self.options.get('download', conf['download'])

        # parse nested content
        #TODO: something is broken here, not parsed as expected
        description = nodes.paragraph()
        content = nodes.paragraph()
        content += [nodes.Text(u"%s" % x) for x in self.content]
        self.state.nested_parse(content, 0, description)

        img = image_node()

        if self.is_remote(self.arguments[0]):
            img['remote'] = True
            if download:
                img['uri'] = os.path.join(
                    '_images',
                    hashlib.sha1(self.arguments[0].encode()).hexdigest())
                img['remote_uri'] = self.arguments[0]
                env.remote_images[img['remote_uri']] = img['uri']
                env.images.add_file('', img['uri'])
            else:
                img['uri'] = self.arguments[0]
                img['remote_uri'] = self.arguments[0]
        else:
            img['uri'] = self.arguments[0]
            img['remote'] = False
            env.images.add_file('', img['uri'])

        img['content'] = description.astext()

        if title is None:
            img['title'] = ''
        elif title:
            img['title'] = title
        else:
            img['title'] = img['content']
            img['content'] = ''

        img['show_caption'] = show_caption
        img['legacy_classes'] = legacy_classes
        img['group'] = group
        img['size'] = (width, height)
        img['classes'] += classes
        img['alt'] = alt
        img['align'] = align
        return [img]
Beispiel #11
0
 def visit_number_reference(self, node):
     # type: (addnodes.number_reference) -> None
     text = nodes.Text(node.get('title', '#'))
     self.visit_Text(text)
     raise nodes.SkipNode
Beispiel #12
0
    def transform(self, node):
        """Transform a single field list *node*."""
        typemap = self.typemap

        entries = []
        groupindices = {}
        types = {}

        # step 1: traverse all fields and collect field types and content
        for field in node:
            fieldname, fieldbody = field
            try:
                # split into field type and argument
                fieldtype, fieldarg = fieldname.astext().split(None, 1)
            except ValueError:
                # maybe an argument-less field type?
                fieldtype, fieldarg = fieldname.astext(), ''
            typedesc, is_typefield = typemap.get(fieldtype, (None, None))

            # sort out unknown fields
            if typedesc is None or typedesc.has_arg != bool(fieldarg):
                # either the field name is unknown, or the argument doesn't
                # match the spec; capitalize field name and be done with it
                new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]
                if fieldarg:
                    new_fieldname += ' ' + fieldarg
                fieldname[0] = nodes.Text(new_fieldname)
                entries.append(field)
                continue

            typename = typedesc.name

            # collect the content, trying not to keep unnecessary paragraphs
            if _is_single_paragraph(fieldbody):
                content = fieldbody.children[0].children
            else:
                content = fieldbody.children

            # if the field specifies a type, put it in the types collection
            if is_typefield:
                # filter out only inline nodes; others will result in invalid
                # markup being written out
                content = [
                    n for n in content if isinstance(n, nodes.Inline)
                    or isinstance(n, nodes.Text)
                ]
                if content:
                    types.setdefault(typename, {})[fieldarg] = content
                continue

            # also support syntax like ``:param type name:``
            if typedesc.is_typed:
                try:
                    argtype, argname = fieldarg.split(None, 1)
                except ValueError:
                    pass
                else:
                    types.setdefault(typename, {})[argname] = \
                        [nodes.Text(argtype)]
                    fieldarg = argname

            translatable_content = nodes.inline(fieldbody.rawsource,
                                                translatable=True)
            translatable_content.source = fieldbody.parent.source
            translatable_content.line = fieldbody.parent.line
            translatable_content += content

            # grouped entries need to be collected in one entry, while others
            # get one entry per field
            if typedesc.is_grouped:
                if typename in groupindices:
                    group = entries[groupindices[typename]]
                else:
                    groupindices[typename] = len(entries)
                    group = [typedesc, []]
                    entries.append(group)
                entry = typedesc.make_entry(fieldarg, [translatable_content])
                group[1].append(entry)
            else:
                entry = typedesc.make_entry(fieldarg, [translatable_content])
                entries.append([typedesc, entry])

        # step 2: all entries are collected, construct the new field list
        new_list = nodes.field_list()
        for entry in entries:
            if isinstance(entry, nodes.field):
                # pass-through old field
                new_list += entry
            else:
                fieldtype, content = entry
                fieldtypes = types.get(fieldtype.name, {})
                new_list += fieldtype.make_field(fieldtypes, self.domain,
                                                 content)

        node.replace_self(new_list)
    def run(self):
        """Include a file as part of the content of this reST file."""
        if not self.state.document.settings.file_insertion_enabled:
            raise self.warning('"%s" directive disabled.' % self.name)
        source = self.state_machine.input_lines.source(
            self.lineno - self.state_machine.input_offset - 1)
        source_dir = os.path.dirname(os.path.abspath(source))
        path = directives.path(self.arguments[0])
        if path.startswith('<') and path.endswith('>'):
            path = os.path.join(self.standard_include_path, path[1:-1])
        path = os.path.normpath(os.path.join(source_dir, path))
        path = utils.relative_path(None, path)
        path = nodes.reprunicode(path)
        encoding = self.options.get(
            'encoding', self.state.document.settings.input_encoding)
        e_handler=self.state.document.settings.input_encoding_error_handler
        tab_width = self.options.get(
            'tab-width', self.state.document.settings.tab_width)
        try:
            self.state.document.settings.record_dependencies.add(path)
            include_file = io.FileInput(source_path=path,
                                        encoding=encoding,
                                        error_handler=e_handler)
        except UnicodeEncodeError as error:
            raise self.severe('Problems with "%s" directive path:\n'
                              'Cannot encode input file path "%s" '
                              '(wrong locale?).' %
                              (self.name, SafeString(path)))
        except IOError as error:
            raise self.severe('Problems with "%s" directive path:\n%s.' %
                      (self.name, ErrorString(error)))
        startline = self.options.get('start-line', None)
        endline = self.options.get('end-line', None)
        try:
            if startline or (endline is not None):
                lines = include_file.readlines()
                rawtext = ''.join(lines[startline:endline])
            else:
                rawtext = include_file.read()
        except UnicodeError as error:
            raise self.severe('Problem with "%s" directive:\n%s' %
                              (self.name, ErrorString(error)))
        # start-after/end-before: no restrictions on newlines in match-text,
        # and no restrictions on matching inside lines vs. line boundaries
        after_text = self.options.get('start-after', None)
        if after_text:
            # skip content in rawtext before *and incl.* a matching text
            after_index = rawtext.find(after_text)
            if after_index < 0:
                raise self.severe('Problem with "start-after" option of "%s" '
                                  'directive:\nText not found.' % self.name)
            rawtext = rawtext[after_index + len(after_text):]
        before_text = self.options.get('end-before', None)
        if before_text:
            # skip content in rawtext after *and incl.* a matching text
            before_index = rawtext.find(before_text)
            if before_index < 0:
                raise self.severe('Problem with "end-before" option of "%s" '
                                  'directive:\nText not found.' % self.name)
            rawtext = rawtext[:before_index]

        include_lines = statemachine.string2lines(rawtext, tab_width,
                                                  convert_whitespace=True)
        if 'literal' in self.options:
            # Convert tabs to spaces, if `tab_width` is positive.
            if tab_width >= 0:
                text = rawtext.expandtabs(tab_width)
            else:
                text = rawtext
            literal_block = nodes.literal_block(rawtext, source=path,
                                    classes=self.options.get('class', []))
            literal_block.line = 1
            self.add_name(literal_block)
            if 'number-lines' in self.options:
                try:
                    startline = int(self.options['number-lines'] or 1)
                except ValueError:
                    raise self.error(':number-lines: with non-integer '
                                     'start value')
                endline = startline + len(include_lines)
                if text.endswith('\n'):
                    text = text[:-1]
                tokens = NumberLines([([], text)], startline, endline)
                for classes, value in tokens:
                    if classes:
                        literal_block += nodes.inline(value, value,
                                                      classes=classes)
                    else:
                        literal_block += nodes.Text(value, value)
            else:
                literal_block += nodes.Text(text, text)
            return [literal_block]
        if 'code' in self.options:
            self.options['source'] = path
            codeblock = CodeBlock(self.name,
                                  [self.options.pop('code')], # arguments
                                  self.options,
                                  include_lines, # content
                                  self.lineno,
                                  self.content_offset,
                                  self.block_text,
                                  self.state,
                                  self.state_machine)
            return codeblock.run()
        self.state_machine.insert_input(include_lines, path)
        return []
Beispiel #14
0
 def apply(self, **kwargs):
     for node in self.document.traverse(exercise):
         node.children[0][0] = nodes.Text(get_exercise_title(
             self.env, node))
Beispiel #15
0
 def visit_text(self, mdnode):
     self.current_node.append(nodes.Text(mdnode.literal, mdnode.literal))
Beispiel #16
0
def write_autosummaries(app, doctree):
    for idx, node in enumerate(doctree.traverse(nodes.section)):

        immediate_autodoc_nodes = [
            n
            for n in node.traverse(addnodes.desc)
            if n.parent is node
            and n.attributes.get("objtype", None)
            in ("attribute", "data", "class", "function")
        ]
        if not immediate_autodoc_nodes:
            continue
        where = node.index(immediate_autodoc_nodes[0])

        immediate_autodoc_nodes = sorted(
            immediate_autodoc_nodes,
            key=lambda node: node[0].attributes["fullname"].lower(),
        )

        table = nodes.table("", classes=["longtable"])
        group = nodes.tgroup("", cols=2)

        table.append(group)
        group.append(nodes.colspec("", colwidth=10))
        group.append(nodes.colspec("", colwidth=90))

        header = nodes.thead("")
        header.append(
            nodes.row(
                "",
                nodes.entry("", nodes.Text("Object Name", "Object Name")),
                nodes.entry("", nodes.Text("Description", "Description")),
            )
        )
        group.append(header)

        body = nodes.tbody("")
        group.append(body)

        for ad_node in immediate_autodoc_nodes:

            # what = ad_node.attributes["objtype"]
            sig = ad_node.children[0]

            ids = sig.attributes.get("ids", [None])
            if not ids:
                continue

            refid = ids[0]
            if not refid:
                continue

            row = nodes.row("")

            obj = _track_autodoced.get(refid, None)

            if inspect.isfunction(obj):
                param_str = _quick_inspect_sig(*inspect.getfullargspec(obj))
            else:
                param_str = ""

            name_node = list(sig.traverse(addnodes.desc_name))
            if name_node:
                name_node = name_node[0]
            else:
                continue

            name_node = name_node.deepcopy()

            # nodes.literal(
            #    "", *[c.copy() for c in name_node.children]
            # )

            p = nodes.paragraph(
                "",
                "",
                # nodes.Text(what + " ", what + " "),
                nodes.reference(
                    "",
                    "",
                    name_node,
                    refid=refid,
                    classes=["reference", "internal"],
                ),
                nodes.Text(param_str, param_str),
            )

            row.append(
                nodes.entry("", p, classes=["nowrap", "autosummary-name"])
            )
            try:
                para = ad_node[1][0]
                if isinstance(para, nodes.paragraph):
                    text = para.deepcopy()
                else:
                    text = nodes.Text("", "")
            except IndexError:
                text = nodes.Text("", "")

            if ad_node.attributes.get("objtype") == "class":
                member_nodes = []

                for attr_desc in ad_node.traverse(addnodes.desc):
                    objtype = attr_desc.attributes.get("objtype")
                    if objtype not in ("classmethod", "method", "attribute"):
                        continue

                    attr_sig = attr_desc.children[0]

                    attr_ids = attr_sig.attributes.get("ids", [None])
                    if not attr_ids:
                        continue

                    attr_ref_id = attr_ids[0]
                    if not attr_ref_id:
                        continue

                    attr_name_node = list(
                        attr_desc.traverse(addnodes.desc_name)
                    )[0]
                    attr_name_node = attr_name_node.deepcopy()

                    if objtype in ("classmethod", "method"):
                        attr_name_node.append(nodes.Text("()"))

                    attr_ref = nodes.reference(
                        "",
                        "",
                        attr_name_node,
                        refid=attr_ref_id,
                        classes=["reference", "internal"],
                    )

                    member_nodes.append(attr_ref)

                if member_nodes:
                    method_list = nodes.paragraph("", "", member_nodes[0])

                    for ref in member_nodes[1:]:
                        method_list.append(nodes.Text(", "))
                        method_list.append(ref)

                    method_box = nodes.container(
                        "",
                        nodes.paragraph(
                            "", "", nodes.strong("", nodes.Text("Members"))
                        ),
                        method_list,
                        classes=["class-members"],
                    )

                    content = ad_node.traverse(addnodes.desc_content)
                    if content:
                        content = list(content)[0]
                        for i, n in enumerate(content.children):
                            if isinstance(n, (addnodes.index, addnodes.desc)):
                                content.insert(i - 1, method_box)
                                break

            entry = nodes.entry("", text)

            row.append(entry)

            body.append(row)

        if where > 0:
            node.insert(where, table)
Beispiel #17
0
 def visit_softbreak(self, _):
     self.current_node.append(nodes.Text('\n'))
def missing_reference(app, env, node, contnode):
    # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> None
    """Attempt to resolve a missing reference via intersphinx references."""
    target = node['reftarget']
    inventories = InventoryAdapter(env)
    objtypes = None  # type: List[unicode]
    if node['reftype'] == 'any':
        # we search anything!
        objtypes = [
            '%s:%s' % (domain.name, objtype)
            for domain in env.domains.values()
            for objtype in domain.object_types
        ]
        domain = None
    else:
        domain = node.get('refdomain')
        if not domain:
            # only objects in domains are in the inventory
            return
        objtypes = env.get_domain(domain).objtypes_for_role(node['reftype'])
        if not objtypes:
            return
        objtypes = ['%s:%s' % (domain, objtype) for objtype in objtypes]
    if 'std:cmdoption' in objtypes:
        # until Sphinx-1.6, cmdoptions are stored as std:option
        objtypes.append('std:option')
    to_try = [(inventories.main_inventory, target)]
    if domain:
        full_qualified_name = env.get_domain(domain).get_full_qualified_name(
            node)
        if full_qualified_name:
            to_try.append((inventories.main_inventory, full_qualified_name))
    in_set = None
    if ':' in target:
        # first part may be the foreign doc set name
        setname, newtarget = target.split(':', 1)
        if setname in inventories.named_inventory:
            in_set = setname
            to_try.append((inventories.named_inventory[setname], newtarget))
            if domain:
                node['reftarget'] = newtarget
                full_qualified_name = env.get_domain(
                    domain).get_full_qualified_name(node)
                if full_qualified_name:
                    to_try.append((inventories.named_inventory[setname],
                                   full_qualified_name))
    for inventory, target in to_try:
        for objtype in objtypes:
            if objtype not in inventory or target not in inventory[objtype]:
                continue
            proj, version, uri, dispname = inventory[objtype][target]
            if '://' not in uri and node.get('refdoc'):
                # get correct path in case of subdirectories
                uri = path.join(relative_path(node['refdoc'], '.'), uri)
            if version:
                reftitle = _('(in %s v%s)') % (proj, version)
            else:
                reftitle = _('(in %s)') % (proj, )
            newnode = nodes.reference('',
                                      '',
                                      internal=False,
                                      refuri=uri,
                                      reftitle=reftitle)
            if node.get('refexplicit'):
                # use whatever title was given
                newnode.append(contnode)
            elif dispname == '-' or \
                    (domain == 'std' and node['reftype'] == 'keyword'):
                # use whatever title was given, but strip prefix
                title = contnode.astext()
                if in_set and title.startswith(in_set + ':'):
                    newnode.append(
                        contnode.__class__(title[len(in_set) + 1:],
                                           title[len(in_set) + 1:]))
                else:
                    newnode.append(contnode)
            else:
                # else use the given display name (used for :ref:)
                newnode.append(contnode.__class__(dispname, dispname))
            return newnode
    # at least get rid of the ':' in the target if no explicit title given
    if in_set is not None and not node.get('refexplicit', True):
        if len(contnode) and isinstance(contnode[0], nodes.Text):
            contnode[0] = nodes.Text(newtarget, contnode[0].rawsource)
Beispiel #19
0
    def translate(self):
        visitor = PDFTranslator(self.document, self.builder)
        self.document.walkabout(visitor)
        lang = self.config.language or 'en'
        langmod = get_language_available(lang)[2]
        self.docutils_languages = {lang: langmod}

        # Generate Contents topic manually
        if self.use_toc:
            contents = nodes.topic(classes=['contents'])
            contents += nodes.title('')
            contents[0] += nodes.Text(langmod.labels['contents'])
            contents['ids'] = ['Contents']
            pending = nodes.topic()
            contents.append(pending)
            pending.details = {}
            self.document.insert(
                0, nodes.raw(text='SetPageCounter 1 arabic', format='pdf'))
            self.document.insert(
                0,
                nodes.raw(text='OddPageBreak %s' % self.page_template,
                          format='pdf'))
            self.document.insert(0, contents)
            self.document.insert(
                0, nodes.raw(text='SetPageCounter 1 lowerroman', format='pdf'))
            contTrans = PDFContents(self.document)
            contTrans.toc_depth = self.toc_depth
            contTrans.startnode = pending
            contTrans.apply()

        if self.use_coverpage:
            # Generate cover page

            # FIXME: duplicate from createpdf, refactor!

            # Find cover template, save it in cover_file
            def find_cover(name):
                cover_path = [
                    self.srcdir,
                    os.path.expanduser('~/.rst2pdf'),
                    os.path.join(self.PATH, 'templates')
                ]

                # Add the Sphinx template paths
                def add_template_path(path):
                    return os.path.join(self.srcdir, path)

                cover_path.extend(
                    map(add_template_path, self.config.templates_path))

                cover_file = None
                for d in cover_path:
                    if os.path.exists(os.path.join(d, name)):
                        cover_file = os.path.join(d, name)
                        break
                return cover_file

            cover_file = find_cover(self.config.pdf_cover_template)
            if cover_file is None:
                log.error("Can't find cover template %s, using default" %
                          self.custom_cover)
                cover_file = find_cover('sphinxcover.tmpl')

            # This is what's used in the python docs because
            # Latex does a manual linebreak. This sucks.
            authors = self.document.settings.author.split('\\')

            # Feed data to the template, get restructured text.
            cover_text = createpdf.renderTemplate(
                tname=cover_file,
                title=self.document.settings.title
                or visitor.elements['title'],
                subtitle='%s %s' % (_('version'), self.config.version),
                authors=authors,
                date=ustrftime(self.config.today_fmt or _('%B %d, %Y')))

            cover_tree = docutils.core.publish_doctree(cover_text)
            self.document.insert(0, cover_tree)

        sio = StringIO()

        if self.invariant:
            createpdf.patch_PDFDate()
            createpdf.patch_digester()

        createpdf.RstToPdf(
            sphinx=True,
            stylesheets=self.stylesheets,
            language=self.__language,
            breaklevel=self.breaklevel,
            breakside=self.breakside,
            fit_mode=self.fitmode,
            font_path=self.fontpath,
            inline_footnotes=self.inline_footnotes,
            highlightlang=self.highlightlang,
            splittables=self.splittables,
            style_path=self.style_path,
            basedir=self.srcdir,
            def_dpi=self.default_dpi,
            real_footnotes=self.real_footnotes,
            numbered_links=self.use_numbered_links,
            background_fit_mode=self.fit_background_mode,
            baseurl=self.baseurl,
            section_header_depth=self.section_header_depth).createPdf(
                doctree=self.document, output=sio, compressed=self.compressed)
        self.output = sio.getvalue()
Beispiel #20
0
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True

# -- Type hints configs ------------------------------------------------------

autodoc_typehints = "signature"

# -- A patch that turns-off cross refs for type annotations ------------------

import sphinx.domains.python
from docutils import nodes
from sphinx import addnodes

# replaces pending_xref node with desc_type for type annotations
sphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type(
    "", nodes.Text(t))

# -- Autosummary patch to get list of a classes, funcs automatically ----------

from importlib import import_module
from inspect import getmembers, isclass, isfunction
import sphinx.ext.autosummary
from sphinx.ext.autosummary import Autosummary
from docutils.parsers.rst import directives
from docutils.statemachine import StringList


class BetterAutosummary(Autosummary):
    """Autosummary with autolisting for modules.

    By default it tries to import all public names (__all__),
Beispiel #21
0
 def apply(self):
     if not len(self.document):
         # @@@ replace these DataErrors with proper system messages
         raise DataError('Document tree is empty.')
     header = self.document[0]
     if not isinstance(header, nodes.field_list) or \
           'rfc2822' not in header['classes']:
         raise DataError('Document does not begin with an RFC-2822 '
                         'header; it is not a CSEP.')
     csep = None
     for field in header:
         if field[0].astext().lower(
         ) == 'csep':  # should be the first field
             value = field[1].astext()
             try:
                 csep = int(value)
                 cvs_url = self.csep_cvs_url % csep
             except ValueError:
                 csep = value
                 cvs_url = None
                 msg = self.document.reporter.warning(
                     '"CSEP" header must contain an integer; "%s" is an '
                     'invalid value.' % csep,
                     base_node=field)
                 msgid = self.document.set_id(msg)
                 prb = nodes.problematic(value,
                                         value or '(none)',
                                         refid=msgid)
                 prbid = self.document.set_id(prb)
                 msg.add_backref(prbid)
                 if len(field[1]):
                     field[1][0][:] = [prb]
                 else:
                     field[1] += nodes.paragraph('', '', prb)
             break
     if csep is None:
         raise DataError('Document does not contain an RFC-2822 "CSEP" '
                         'header.')
     if csep == 0:
         # Special processing for CSEP 0.
         pending = nodes.pending(CSEPZero)
         self.document.insert(1, pending)
         self.document.note_pending(pending)
     if len(header) < 2 or header[1][0].astext().lower() != 'title':
         raise DataError('No title!')
     for field in header:
         name = field[0].astext().lower()
         body = field[1]
         if len(body) > 1:
             raise DataError('CSEP header field body contains multiple '
                             'elements:\n%s' % field.pformat(level=1))
         elif len(body) == 1:
             if not isinstance(body[0], nodes.paragraph):
                 raise DataError('CSEP header field body may only contain '
                                 'a single paragraph:\n%s' %
                                 field.pformat(level=1))
         elif name == 'last-modified':
             date = time.strftime(
                 '%d-%b-%Y',
                 time.localtime(os.stat(self.document['source'])[8]))
             if cvs_url:
                 body += nodes.paragraph(
                     '', '', nodes.reference('', date, refuri=cvs_url))
         else:
             # empty
             continue
         para = body[0]
         if name == 'author':
             for node in para:
                 if isinstance(node, nodes.reference):
                     node.parent.replace(node, mask_email(node))
         elif name == 'discussions-to':
             for node in para:
                 if isinstance(node, nodes.reference):
                     node.parent.replace(node, mask_email(node, csep))
         elif name in ('replaces', 'replaced-by', 'requires'):
             newbody = []
             space = nodes.Text(' ')
             for refcsep in re.split(',?\s+', body.astext()):
                 csepno = int(refcsep)
                 newbody.append(
                     nodes.reference(
                         refcsep,
                         refcsep,
                         refuri=(self.document.settings.csep_base_url +
                                 self.csep_url % csepno)))
                 newbody.append(space)
             para[:] = newbody[:-1]  # drop trailing space
         elif name == 'last-modified':
             utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
             if cvs_url:
                 date = para.astext()
                 para[:] = [nodes.reference('', date, refuri=cvs_url)]
         elif name == 'content-type':
             csep_type = para.astext()
             uri = self.document.settings.csep_base_url + self.csep_url % 03
             para[:] = [nodes.reference('', csep_type, refuri=uri)]
         elif name == 'version' and len(body):
             utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
Beispiel #22
0
def append_synopsis_section(state, section_synopsis, search_node, title, role, optional=False):
    env = state.document.settings.env
    clsname = env._kaa_current_class_name
    cls = env._kaa_current_class
    # Crawl through the nodes for section titled the given title ('Methods',
    # 'Properties', etc) and look for all the <desc> nodes, which contain
    # methods or attributes.  Construct a list called members whose first
    # element contains the name of the member, and whose last element contains
    # the first paragraph node of the description.
    members = []
    subsection_node = find_subsection_node(search_node, title)
    if subsection_node and subsection_node.children:
        desc_nodes = subsection_node.children[0].traverse(sphinx.addnodes.desc, descend=0, siblings=1)
    else:
        desc_nodes = []

    for node in desc_nodes:
        sig = node.first_child_matching_class(sphinx.addnodes.desc_signature)
        content = node.first_child_matching_class(sphinx.addnodes.desc_content)
        pidx = node.children[content].first_child_matching_class(nodes.paragraph)
#        name = node.children[sig]['ids'][0].split('.')[-1]
        
        nm = node.children[sig].first_child_matching_class(sphinx.addnodes.desc_name)
#        print node.children[sig].children[nm]
#        print node.children[sig].children[nm].astext()
#        print dir(node.children[sig].children[nm])
#        print sig
#        print node.children[sig]
        
        name = node.children[sig].children[nm].astext() 
        

        desc = nodes.Text('')
        if pidx is not None:
            desc = node.children[content].children[pidx].deepcopy()
        
        if subsection_node['title'] == 'Properties':
            prop = getattr(cls, name.split('.')[-1], None)
            perm = 'unknown'
            if prop:
                if prop.fset and not prop.fget:
                    perm = 'write-only'
                elif prop.fget and not prop.fset:
                    perm = 'read-only'
                else:
                    perm = 'read/write'
            desc = nodes.Text('')
            members.append((name, nodes.Text(perm), desc))
        else:
#            print desc # TODO: Parse first sentence only
            desc = nodes.Text('')
            members.append((name, desc))

    # If no members found and this section is optional (Class Attributes),
    # we're done.
    if not members and optional:
        return

    # Create a new synopsis section with the given title.
    syn = synopsis(title=title, has_members=bool(members))
    section_synopsis.append(syn)

    # Loop through all members and add rows to the synopsis section table.
    for info in members:
        row = nodes.row()
        syn.append(row)

        # First columns is a <th> with the member name, cross referenced
        # to the actual member on this page.
        name = info[0]
        col = td(heading=True)
        row.append(col)
        list = ViewList()
        if title == 'Signals':
            name = 'signals.' + name
        list.append(':%s:`~%s`' % (role, clsname + '.' + name), '')
        state.nested_parse(list, 0, col)

        # Add remaining columns from member info.
        for col_info in info[1:]:
            col = td()
            col.append(col_info)
            row.append(col)

        # Last column has 'desc' class (disables nowrap).
        col['classes'] = ['desc']
Beispiel #23
0
 def visit_image(self, node):
     if node.hasattr('alt'):
         self.parent.append(nodes.Text(node['alt']))
     raise nodes.SkipNode
Beispiel #24
0
def inline_code(inline):
    literal_node = nodes.literal()
    literal_node.append(nodes.Text(inline.c))
    return literal_node
 def visit_br(self, node):
     return nodes.Text('\n')
Beispiel #26
0
 def attach_type_suffixes(self, node, suffixes):
     for suffix in suffixes:
         node += nodes.Text(unicode(suffix))
Beispiel #27
0
def no_needs_found_paragraph():
    nothing_found = "No needs passed the filters"
    para = nodes.line()
    nothing_found_node = nodes.Text(nothing_found, nothing_found)
    para += nothing_found_node
    return para
Beispiel #28
0
         try:
             startline = int(self.options['number-lines'] or 1)
         except ValueError:
             raise self.error(':number-lines: with non-integer '
                              'start value')
         endline = startline + len(include_lines)
         if text.endswith('\n'):
             text = text[:-1]
         tokens = NumberLines([([], text)], startline, endline)
         for classes, value in tokens:
             if classes:
                 literal_block += nodes.inline(value,
                                               value,
                                               classes=classes)
             else:
                 literal_block += nodes.Text(value, value)
     else:
         literal_block += nodes.Text(text, text)
     return [literal_block]
 if 'code' in self.options:
     self.options['source'] = path
     codeblock = CodeBlock(
         self.name,
         [self.options.pop('code')],  # arguments
         self.options,
         include_lines,  # content
         self.lineno,
         self.content_offset,
         self.block_text,
         self.state,
         self.state_machine)
Beispiel #29
0
    def run(self):
        env = self.state.document.settings.env
        extcode_config = env.app.config.extcode

        if not extcode_config:
            if all(opt not in self.options for opt in self.extra_option_spec):
                return super(ExtCode, self).run()  # nothing to do special

        rendered_block = self.options.get('rendered-block',
                                          extcode_config.get('rendered-block'))

        line_annotations = {}
        annotations = self.options.get('annotations', [])
        annotationsmap = dict((k.astext(), v) for k, v in annotations)
        for i, c in enumerate(self.content):
            match = annotation_matcher(c)
            if match:
                self.content[i], label = match.groups()
                if label in annotationsmap:
                    line_annotations[i] = (label, annotationsmap[label])
                else:
                    #TODO: warning
                    line_annotations[i] = (label, None)

        # get literal from modified self.content
        literal = super(ExtCode, self).run()[0]
        # line_annotations attribute will be used for writer (not yet)
        literal['line_annotations'] = line_annotations

        wrapper = extcode(classes=['extcode'])
        set_source_info(self, wrapper)

        #check: can parse rst? and partial build?
        try:
            partial_doc = sandbox_rst_parser(u'\n'.join(self.content),
                                             env.doc2path(env.docname),
                                             env.settings)
            partial_out = sandbox_partial_builder(partial_doc, env)
        except Exception as e:
            env.warn(env.docname,
                     u'extcode: partial build failed: %s' % str(e),
                     lineno=self.lineno)
            partial_doc = None
            partial_out = None

        if literal['language'] == 'rst' and rendered_block:
            wrapper['classes'].append('extcode-layout-' + rendered_block)

            rendered = nodes.container()
            set_source_info(self, rendered)

            only_html = addnodes.only(expr='html')
            set_source_info(self, only_html)
            only_html += nodes.raw(partial_out,
                                   partial_out,
                                   format='html',
                                   classes=['extcode-rendered'])
            rendered += only_html

            if 'rendered-image' in self.options:
                only_xml = addnodes.only(expr='xml')
                set_source_info(self, only_xml)
                only_xml += nodes.image(self.options['rendered-image'],
                                        uri=self.options['rendered-image'])
                rendered += only_xml

            #FIXME: need translation support
            make_text = lambda t: nodes.inline(t, t)

            if rendered_block == 'horizontal':
                table = build_table(
                    [[make_text('literal'),
                      make_text('rendered')], [literal, rendered]], [1, 1],
                    head_rows=1,
                    attrs={'classes': ['extcode-layout']})
                table.setdefault('classes', []).append('extcode-layout')
                wrapper.append(table)

            elif rendered_block == 'vertical':
                table = build_table([[make_text('literal'), literal],
                                     [make_text('rendered'), rendered]],
                                    [2, 8],
                                    stub_columns=1,
                                    attrs={'classes': ['extcode-layout']})
                table.setdefault('classes', []).append('extcode-layout')
                wrapper.append(table)

            else:  # toggle, tab
                wrapper.append(literal)
                wrapper.append(rendered)
        else:
            wrapper.append(literal)

        if line_annotations and 'annotate-inline' in self.options:
            prefix = '... '  #TODO prefixi customization
            contents = []
            for i in range(0, len(self.content)):
                label, value = line_annotations.get(i, ('', None))
                line = nodes.line()
                if label and value:
                    #FIXME: label and explanation need translation support
                    abbr = nodes.abbreviation(
                        label, label
                    )  #TODO: label customization (i.e. render with number)
                    abbr['explanation'] = value.astext()
                    line.append(nodes.inline(prefix, prefix))
                    line.append(abbr)
                elif label:
                    line.append(nodes.inline(prefix, prefix))
                    line.append(nodes.Text(label, label))
                contents.append(line)
            overlay = nodes.line_block(classes=['extcode-overlay'])
            set_source_info(self, overlay)
            overlay.extend(contents)
            wrapper.append(overlay)

        if annotations and 'annotate-block' in self.options:
            annotations['classes'] = ['extcode-annotations']
            set_source_info(self, annotations)
            wrapper.append(annotations)

        return [wrapper]
Beispiel #30
0
def missing_reference(app, env, node, contnode):
    """Attempt to resolve a missing reference via intersphinx references."""
    target = node['reftarget']
    if node['reftype'] == 'any':
        # we search anything!
        objtypes = [
            '%s:%s' % (domain.name, objtype)
            for domain in env.domains.values()
            for objtype in domain.object_types
        ]
    elif node['reftype'] == 'doc':
        domain = 'std'  # special case
        objtypes = ['std:doc']
    else:
        domain = node.get('refdomain')
        if not domain:
            # only objects in domains are in the inventory
            return
        objtypes = env.domains[domain].objtypes_for_role(node['reftype'])
        if not objtypes:
            return
        objtypes = ['%s:%s' % (domain, objtype) for objtype in objtypes]
    to_try = [(env.intersphinx_inventory, target)]
    in_set = None
    if ':' in target:
        # first part may be the foreign doc set name
        setname, newtarget = target.split(':', 1)
        if setname in env.intersphinx_named_inventory:
            in_set = setname
            to_try.append(
                (env.intersphinx_named_inventory[setname], newtarget))
    for inventory, target in to_try:
        for objtype in objtypes:
            if objtype not in inventory or target not in inventory[objtype]:
                continue
            proj, version, uri, dispname = inventory[objtype][target]
            if '://' not in uri and node.get('refdoc'):
                # get correct path in case of subdirectories
                uri = path.join(relative_path(node['refdoc'], env.srcdir), uri)
            newnode = nodes.reference('',
                                      '',
                                      internal=False,
                                      refuri=uri,
                                      reftitle=_('(in %s v%s)') %
                                      (proj, version))
            if node.get('refexplicit'):
                # use whatever title was given
                newnode.append(contnode)
            elif dispname == '-' or \
                    (domain == 'std' and node['reftype'] == 'keyword'):
                # use whatever title was given, but strip prefix
                title = contnode.astext()
                if in_set and title.startswith(in_set + ':'):
                    newnode.append(
                        contnode.__class__(title[len(in_set) + 1:],
                                           title[len(in_set) + 1:]))
                else:
                    newnode.append(contnode)
            else:
                # else use the given display name (used for :ref:)
                newnode.append(contnode.__class__(dispname, dispname))
            return newnode
    # at least get rid of the ':' in the target if no explicit title given
    if in_set is not None and not node.get('refexplicit', True):
        if len(contnode) and isinstance(contnode[0], nodes.Text):
            contnode[0] = nodes.Text(newtarget, contnode[0].rawsource)