def _create_array_items_node(self, items, path): path = self._append_to_path(path, 'items') for combiner in ['anyOf', 'allOf', 'oneOf']: if combiner in items: return self._create_combiner(items, combiner, array=True, path=path) node_list = nodes.compound() if isinstance(items, list): text = "The first {} item{} in the list must be the following types:" node_list.append( nodes.line(text=text.format(len(items), 's' if len(items) > 1 else ''))) item_list = nodes.bullet_list() for i, it in enumerate(items): item_path = self._append_to_path(path, i) item_list.append( self._process_properties(it, top=True, path=item_path)) node_list.append(item_list) else: node_list.append( nodes.line( text= 'Items in the array are restricted to the following types:' )) node_list.append( self._process_properties(items, top=True, path=path)) return node_list
def run(self): data_type = self.arguments[0] condition = None if "required" in self.options: if self.options["required"] is True: condition = "required" elif self.options["required"] is False: condition = "optional" container = nodes.container() if condition is not None: container["classes"].extend([self.name, condition]) data_type_line = nodes.line(text=data_type) if self.is_primitive_type(data_type): data_type_line["classes"].extend([self.name, "datatype", "primitive"]) else: data_type_line["classes"].extend([self.name, "datatype"]) if condition is not None: condition_line = nodes.line(text=condition) condition_line["classes"].extend([self.name, "condition"]) container += [data_type_line, condition_line] else: container += [data_type_line] return [container]
def run(self): tree = self.state.document.settings.env.app.doxyxml table = nodes.table() tgroup = nodes.tgroup(cols = 2) tgroup += nodes.colspec(colwidth = 50) tgroup += nodes.colspec(colwidth = 50) # header tgroup += nodes.thead('', nodes.row('', *[nodes.entry('', nodes.line(text = c)) for c in ["Function", "Description"]])) # rows tbody = nodes.tbody() for c in self.content: name = c.strip() query = name.replace("&", " &") for elem in tree.findall("./compounddef/sectiondef/memberdef/[name='%s']" % query): args = ', '.join(e.text for e in elem.findall("./param/declname")) ref = addnodes.pending_xref('', refdomain = 'cpp', refexplicit = False, reftype = 'func', reftarget = 'kitty::' + name) ref += nodes.literal(text = '%s(%s)' % (name, args)) reft = nodes.paragraph() reft.extend([ref]) func = nodes.entry('', reft) desc = nodes.entry('', nodes.line(text = elem.findtext("./briefdescription/para"))) tbody += nodes.row('', func, desc) tgroup += tbody table += tgroup return [table]
def run(self): ans_class = 'answer' # ANSWER MODE if 'ANS' in os.environ or self.options.get('force', None): set_classes(self.options) self.assert_has_content() container = nodes.Element() self.add_name(container) self.state.nested_parse(self.content, self.content_offset, container) childs = [] + container.children for node in childs: try: node['classes'].append(ans_class) except TypeError: pass childs.extend(node.children) score = nodes.line(text="Max points: %s" % self.options.get('points', 1)) return [nodes.line(), score, nodes.line()] + container.children # EMPTY ANSWER ns = [ nodes.line(), nodes.line(text=self.options.get('text', 'Solution:')) ] ns.extend(nodes.paragraph() for x in xrange(int(self.options.get('height', 20)))) return ns
def run(self): l = nodes.bullet_list('ul-name') for line in self.content: #print line path, rest = line.split(' ', 1) line_text = rest text_nodes, messages = self.state.inline_text(line_text, self.lineno) line = nodes.line(line_text, '', *text_nodes) item = nodes.list_item(line_text, line) l.append(item) # Make the sub-list l2 = nodes.bullet_list('ul-name') for ext, title in [('.html', 'Normal text'), ('-big.html', 'Presentation mode'), #('-s5.html', 'Slide Show'), ]: line_text = '`%s <%s%s>`__'%(title.strip(), path, ext) #print line_text text_nodes, messages = self.state.inline_text(line_text, self.lineno) line = nodes.line(line_text, '', *text_nodes) #print type(line) item2 = nodes.list_item(line_text, line) l2.append(item2) item.append(l2) return [l, nodes.paragraph('hi')]
def meta_links_all(self, prefix="", postfix="", exclude=None): """ Documents all used link types for the current need automatically. :param prefix: prefix string :param postfix: postfix string :param exclude: list of extra link type names, which are excluded from output :return: docutils nodes """ exclude = exclude or [] data_container = [] for link_type in self.app.config.needs_extra_links: type_key = link_type["option"] if self.need[type_key] and type_key not in exclude: outgoing_line = nodes.line() outgoing_label = prefix + "{}:".format(link_type["outgoing"]) + postfix + " " outgoing_line += self._parse(outgoing_label) outgoing_line += self.meta_links(link_type["option"], incoming=False) data_container.append(outgoing_line) type_key = link_type["option"] + "_back" if self.need[type_key] and type_key not in exclude: incoming_line = nodes.line() incoming_label = prefix + "{}:".format(link_type["incoming"]) + postfix + " " incoming_line += self._parse(incoming_label) incoming_line += self.meta_links(link_type["option"], incoming=True) data_container.append(incoming_line) return data_container
def run(self): data_type = self.arguments[0] condition = None if "required" in self.options: if self.options["required"] is True: condition = "required" elif self.options["required"] is False: condition = "optional" container = nodes.container() if condition is not None: container["classes"].extend([self.name, condition]) data_type_line = nodes.line(text=data_type) if self.is_primitive_type(data_type): data_type_line["classes"].extend( [self.name, "datatype", "primitive"]) else: data_type_line["classes"].extend([self.name, "datatype"]) if condition is not None: condition_line = nodes.line(text=condition) condition_line["classes"].extend([self.name, "condition"]) container += [data_type_line, condition_line] else: container += [data_type_line] return [container]
def run(self): doc = ET.parse("doxyxml/xml/{}.xml".format(self.arguments[0])) table = nodes.table() tgroup = nodes.tgroup(cols = 2) tgroup += nodes.colspec(colwidth = 50) tgroup += nodes.colspec(colwidth = 50) # header colname = self.options.get('column', "Function") tgroup += nodes.thead('', nodes.row('', *[nodes.entry('', nodes.line(text = c)) for c in [colname, "Description"]])) # rows tbody = nodes.tbody() for target in self.content: for elem in doc.findall("./compounddef/sectiondef/memberdef/[name='%s']" % target): ref = nodes.reference('', target, internal = True) ref['refuri'] = '#{}'.format( elem.attrib["id"] ) reft = nodes.paragraph() reft.extend([ref]) func = nodes.entry('', reft) desc = nodes.entry('', nodes.line(text = elem.findtext("./briefdescription/para"))) tbody += nodes.row('', func, desc) tgroup += tbody table += tgroup return [table]
def reference_group(name, rawtext, text, lineno, inliner, options={}, content=[]): path = inliner.document.current_source path = str(pathlib.Path(path).relative_to(DOCS_SRC)) _refs = REFERENCES.get(path, {}).get(text, []) if not _refs: return ([], []) container = nodes.enumerated_list() for ref in _refs: output = [ *ref.get("link", Null()).as_nodes(), *ref.get("section", Null()).as_nodes(), *ref.get("user", Null()).as_nodes(), *ref.get("license", Null()).as_nodes(), ] if Null() == (quote := ref.get("quote", Null())): message = nodes.paragraph() for node in output: message.append(node) else: message = nodes.line_block("", nodes.line("", "", *output), nodes.line( "", "", *quote.as_nodes(), )) container.append(nodes.list_item('', message))
def _process_properties(self, schema, top=False, path=''): for combiner in ['anyOf', 'allOf', 'oneOf']: if combiner in schema: return self._create_combiner(schema, combiner, top=top, path=path) if 'properties' in schema: treenodes = asdf_tree() required = schema.get('required', []) for key, node in schema['properties'].items(): new_path = self._append_to_path(path, key) treenodes.append( self._create_property_node(key, node, key in required, path=new_path)) comment = nodes.line( text='This type is an object with the following properties:') return schema_properties(None, *[comment, treenodes], id=path) elif 'type' in schema: details = self._process_top_type(schema, path=path) return schema_properties(None, details, id=path) elif '$ref' in schema: ref = self._create_ref_node(schema['$ref']) return schema_properties(None, *[ref], id=path) elif 'tag' in schema: ref = self._create_ref_node(schema['tag']) return schema_properties(None, *[ref], id=path) else: text = nodes.emphasis( text='This node has no type definition (unrestricted)') return schema_properties(None, text, id=path)
def create_rows(self, content): # return content result = [] current_type = None for i in content: # print('---------', file=sys.stderr) # print('ibefore:'+ str(i), file=sys.stderr) if type(i) == nodes.line: if len(i.children) > 0: i = str(i.children[0]) else: i = "" # print('iafter:'+ str(i), file=sys.stderr) # r = [] # for x in re.split("[ ]+", i): # n = nodes.line(text=x) # r.append(n) # if len(r)<2: # r = [nodes.line(), nodes.line()] # print('r:'+str(r), file=sys.stderr) # result.append(r) n = nodes.line(text=i) # self.state.nested_parse(n, self.content_offset, n) result.append([n,]) return result
def run(self): try: fields = self.future_fields.get(timeout=30) except Queue.Empty: return [self.state_machine.reporter.error( "Timed out while fetching fields related to action [%s]" % self.arguments[0] )] if fields is None: return [self.state_machine.reporter.warning( "Could not find any field related to the action [%s]" % self.arguments[0] )] whitelist = set(self.options.get('only', '').split()) return [nodes.field_list('', *( nodes.field('', nodes.field_name(text=v['string'] or k), nodes.field_body('', # keep help formatting around (e.g. newlines for lists) nodes.line_block('', *( nodes.line(text=line) for line in v['help'].split('\n') )) ) ) for k, v in fields.iteritems() # if there's a whitelist, only display whitelisted fields if not whitelist or k in whitelist # only display if there's a help text if v.get('help') ))]
def _build_table(self, section, title, headers, description=None): """Creates a table with given title, headers and description :returns: Table body node """ table_section = self._create_section(section, title, title=title) if description: field = nodes.line('', description) table_section.append(field) table = nodes.table() tgroup = nodes.tgroup(len(headers)) table += tgroup table_section.append(table) for _ in headers: tgroup.append(nodes.colspec(colwidth=1)) # create header thead = nodes.thead() tgroup += thead self._create_table_row(headers, thead) tbody = nodes.tbody() tgroup += tbody # create body consisting of targets tbody = nodes.tbody() tgroup += tbody return tbody
def run(self): # create a list of document nodes to return doc_nodes = [] for arg in self.arguments: # grabbing a list of the code segments that contain the # title, source, and output of a test segment. codelist = get_test_source_code_for_feature(arg) for code in codelist: (title, src, output) = code # the title can be contained in a special title node title_node = nodes.line(title, title) # we want the body of test code to be formatted and code highlighted body = nodes.literal_block(src, src) body['language'] = 'python' # we want the output block to also be formatted similarly output_node = nodes.literal_block(output, output) # put the nodes we've created in the list, and return them doc_nodes.append(title_node) doc_nodes.append(body) doc_nodes.append(output_node) return doc_nodes
def build_toc(descinfo, env): """Return a desc table of contents node tree""" separator = "—" child_ids = descinfo["children"] if not child_ids: return None max_fullname_len = 0 max_summary_len = 0 rows = [] for fullname, refid, summary in ichild_ids(child_ids, env): max_fullname_len = max(max_fullname_len, len(fullname)) max_summary_len = max(max_summary_len, len(summary)) reference_node = toc_ref(fullname, refid) ref_entry_node = entry("", line("", "", reference_node)) sep_entry_node = entry("", Text(separator, "")) sum_entry_node = entry("", Text(summary, "")) row_node = row("", ref_entry_node, sep_entry_node, sum_entry_node) rows.append(row_node) col0_len = max_fullname_len + 2 # add error margin col1_len = len(separator) # no padding col2_len = max_summary_len + 10 # add error margin tbody_node = tbody("", *rows) col0_colspec_node = colspec(colwidth=col0_len) col1_colspec_node = colspec(colwidth=col1_len) col2_colspec_node = colspec(colwidth=col2_len) tgroup_node = tgroup( "", col0_colspec_node, col1_colspec_node, col2_colspec_node, tbody_node, cols=3 ) return TocTable("", tgroup_node, classes=["toc"])
def run(self): self.arguments = [''] indigorenderer_options = dict([(k,v) for k,v in self.options.items() if k in self.own_option_spec]) text = '\n'.join(self.content) (image_node,) = directives.images.Image.run(self) if isinstance(image_node, nodes.system_message): return [image_node, ] image_node.indigorenderer = dict(text=text, options=indigorenderer_options) if 'nocode' in self.options or len(text.strip()) == 0: return [image_node, ] blocks = [] if indigorenderer_options['indigoobjecttype'] == 'code': literal = nodes.literal_block(text, text, line=self.lineno) #literal['linenos'] = True literal['language'] = 'python' blocks = [literal] if 'downloads' in self.options: blocks.append(nodes.Text('Input: ')) for file in self.options['downloads'].split(','): download = addnodes.download_reference("", "") download += nodes.literal(file, file) download['reftarget'] = file blocks.append(download) blocks.append(nodes.Text(' ')) blocks.append(nodes.line()) blocks.append(image_node) return blocks
def render_indigorenderer_images(app, doctree): for img in doctree.traverse(nodes.image): if not hasattr(img, 'indigorenderer'): continue text = img.indigorenderer['text'] options = img.indigorenderer['options'] try: relative_paths, output = render_indigorenderer(app, text, options, os.path.dirname(doctree.attributes['source']), os.path.abspath(os.curdir)) imgnodes = [] if 'noimage' not in options: for relative_path in relative_paths: newimg = img.copy() newimg['uri'] = relative_path.replace('\\', '/') newimg['scale'] = 1.0 / float(len(relative_paths)) imgnodes.append(newimg) span = img.copy() span['uri'] = relative_uri(app.builder.env.docname, '_static') + '/span.png' imgnodes.append(span) if output: if 'noimage' not in options: newline = nodes.line() imgnodes.append(newline) if 'nooutputtitle' not in options: title = nodes.Text('Output:') imgnodes.append(title) literal = nodes.literal_block(output, output) literal['classes'] += ['output'] imgnodes.append(literal) img.replace_self(imgnodes) except IndigoRendererError, exc: app.builder.warn('indigorenderer error: ' + str(exc)) img.replace_self(nodes.literal_block(text, text)) continue
def process_needlist(app, doctree, fromdocname): """ Replace all needlist nodes with a list of the collected needs. Augment each need with a backlink to the original location. """ env = app.builder.env for node in doctree.traverse(Needlist): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ('ids', 'names', 'classes', 'dupnames'): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needfilter = env.need_all_needlists[id] all_needs = env.needs_all_needs content = [] all_needs = list(all_needs.values()) found_needs = procces_filters(all_needs, current_needfilter) line_block = nodes.line_block() for need_info in found_needs: para = nodes.line() description = "%s: %s" % (need_info["id"], need_info["title"]) if current_needfilter["show_status"] and need_info["status"] is not None: description += " (%s)" % need_info["status"] if current_needfilter["show_tags"] and need_info["tags"] is not None: description += " [%s]" % "; ".join(need_info["tags"]) title = nodes.Text(description, description) # Create a reference if not need_info["hide"]: ref = nodes.reference('', '') ref['refdocname'] = need_info['docname'] ref['refuri'] = app.builder.get_relative_uri( fromdocname, need_info['docname']) ref['refuri'] += '#' + need_info['target_node']['refid'] ref.append(title) para += ref else: para += title line_block.append(para) content.append(line_block) if len(content) == 0: content.append(no_needs_found_paragraph()) if current_needfilter["show_filters"]: content.append(used_filter_paragraph(current_needfilter)) node.replace_self(content)
def run(self): doc = ET.parse("doxyxml/xml/namespacetweedledum.xml") members = doc.findall("compounddef/sectiondef[@kind='func']/memberdef/detaileddescription/para/xrefsect/xrefdescription/[para='synthesis ']/../../../..") table = nodes.table() tgroup = nodes.tgroup(cols = 4) tgroup += nodes.colspec(colwidth = 50) tgroup += nodes.colspec(colwidth = 100) tgroup += nodes.colspec(colwidth = 50) tgroup += nodes.colspec(colwidth = 50) # header tgroup += nodes.thead('', nodes.row('', *[nodes.entry('', nodes.line(text = c)) for c in ["Function", "Description", "Expects", "Returns"]])) # body tbody = nodes.tbody() for member in members: text = member.find('name').text.strip() brief = member.find('briefdescription/para').text.strip() expects = "foo" returns = "bar" for e in member.findall('detaileddescription/para/xrefsect'): key = e.find('xreftitle').text.strip() value = e.find('xrefdescription/para').text.strip() if key == "algexpects": expects = value elif key == "algreturns": returns = value filename = os.path.basename(member.find('location').attrib['file'])[:-4] ref = nodes.reference('', text, internal = True) ref['refuri'] = 'synthesis/{}.html#{}'.format(filename, member.attrib["id"]) reft = nodes.paragraph() reft.extend([ref]) function = nodes.entry('', reft) description = nodes.entry('', nodes.line(text = brief)) expects = nodes.entry('', nodes.line(text = expects)) returns = nodes.entry('', nodes.line(text = returns)) tbody += nodes.row('', function, description, expects, returns) tgroup += tbody table += tgroup return [table]
def transform_content(self, contentnode): if not self._debug_enabled("add_debug_content"): return super().transform_content(contentnode) dbg_info = nodes.paragraph() contentnode.insert(0, dbg_info) src_line = nodes.line(text="Src: ") dbg_info += src_line lb = nodes.literal(text='\n'.join(self.get_signatures())) lb["language"] = "text" src_line += lb def list_join(joiner, values): new_values = [joiner] * (2 * len(values) - 1) new_values[0::2] = values return new_values names = [] placeholders = [] for n, p in self.names: names.extend(n) placeholders.extend(p) if names: decls = nodes.line(text="Declares: ") dbg_info += decls if not self.refname: decls += list_join( nodes.Text(","), [nodes.literal(text=n.strip()) for n in names]) else: decls += nodes.literal(text=names[0].strip()) decls += nodes.Text(" as ") decls += nodes.literal(text=self.refname) namespace = VerilogQualifiedIdentifier( self.parent_object.qualified_name[1:]) if len(namespace) > 0: decls += nodes.Text(" in ") decls += nodes.literal(text=str(namespace)) if placeholders: refs = nodes.line(text="Placeholders: ") dbg_info += refs refs += list_join( nodes.Text(","), [nodes.literal(text=n.strip()) for n in placeholders])
def _create_enum_node(self, enum_values): enum_nodes = nodes.compound() enum_nodes.append( nodes.line( text='Only the following values are valid for this node:')) markdown = '\n'.join(['* **{}**'.format(val) for val in enum_values]) enum_nodes.extend(self._markdown_to_nodes(markdown, '')) return enum_nodes
def make_item(id, tags): name = extracters.get_post_title(id) link = f'https://codereview.meta.stackexchange.com/q/{id}/42401' post = Post(link, name, name) if not tags: inner = nodes.paragraph() inner.append(post) else: inner = nodes.line_block( '', nodes.line('', '', post), nodes.line( '', '', *(MTag(t) for t in tags), ) ) return nodes.list_item('', inner)
def addDownloadsNodes(): if 'downloads' in self.options: blocks.append(nodes.Text('Input: ')) for file in self.options['downloads'].split(','): download = addnodes.download_reference("", "") download += nodes.literal(file, file) download['reftarget'] = file blocks.append(download) blocks.append(nodes.Text(' ')) blocks.append(nodes.line())
def addDownloadsNodes (): if 'downloads' in self.options: blocks.append(nodes.Text('Input: ')) for file in self.options['downloads'].split(','): download = addnodes.download_reference("", "") download += nodes.literal(file, file) download['reftarget'] = file blocks.append(download) blocks.append(nodes.Text(' ')) blocks.append(nodes.line())
def run(self): self.arguments = [''] indigorenderer_options = dict([(k, v) for k, v in self.options.items() if k in self.own_option_spec]) text = '\n'.join(self.content) if 'codename' in indigorenderer_options: registerCodeDict(indigorenderer_options['codename'], text) (image_node, ) = directives.images.Image.run(self) if isinstance(image_node, nodes.system_message): return [ image_node, ] image_node.indigorenderer = dict(text=text, options=indigorenderer_options) blocks = [] def addImagesNodes(): blocks.append(image_node) need_code = 'nocode' not in self.options and len(text.strip( )) > 0 and indigorenderer_options['indigoobjecttype'] == 'code' def addCodeNodes(): if need_code: literal = nodes.literal_block(text, text, line=self.lineno) #literal['linenos'] = True literal['language'] = 'python' blocks.append(literal) def addDownloadsNodes(): if 'downloads' in self.options: blocks.append(nodes.Text('Input: ')) for file in self.options['downloads'].split(','): download = addnodes.download_reference("", "") download += nodes.literal(file, file) download['reftarget'] = file blocks.append(download) blocks.append(nodes.Text(' ')) blocks.append(nodes.line()) if need_code: addCodeNodes() addDownloadsNodes() addImagesNodes() else: addImagesNodes() blocks.append(nodes.line()) addDownloadsNodes() return blocks
def add_bullet_point(app, fromdocname, docname, ref_name): # Create references line = nodes.line() line += nodes.Text(' • ', ' • ') newnode = nodes.reference('', '') innernode = nodes.emphasis(_(ref_name), _(ref_name)) newnode['refdocname'] = docname newnode['refuri'] = app.builder.get_relative_uri(fromdocname, docname) newnode.append(innernode) line += newnode return line
def run(self): self.assert_has_content() set_classes(self.options) container = nodes.Element() self.add_name(container) self.state.nested_parse(self.content, self.content_offset, container) # ANSWER MODE if 'ANS' in os.environ or self.options.get('force', None): if 'correct' in self.options: pfx = nodes.line(text=u'\u00A0\u25A3\u00A0\u00A0') else: pfx = nodes.line(text=u'\u00A0\u25A2\u00A0\u00A0') else: # EMPTY ANSWER pfx = nodes.line(text=u'\u00A0\u25A2\u00A0\u00A0') container.children[0].insert(0, pfx) return container.children
def _build_properties(self, k, v, definition): """Build schema property documentation :returns: None """ if isinstance(v, schema.Map): newdef = self._create_section(definition, k, term=k) if v.schema is None: # if it's a map for arbritary values, only include description field = nodes.line('', v.description) newdef.append(field) return newdeflist = self._create_def_list(newdef) sorted_schema = sorted(v.schema.items(), key=cmp_to_key(self._sort_by_type)) for key, value in sorted_schema: self._build_properties(key, value, newdeflist) elif isinstance(v, schema.List): newdef = self._create_section(definition, k, term=k) # identify next section as list properties field = nodes.line() emph = nodes.emphasis('', 'List properties:') field.append(emph) newdef.append(field) newdeflist = self._create_def_list(newdef) self._build_properties('**', v.schema['*'], newdeflist) else: newdef = self._create_section(definition, k, term=k) if 'description' in v: field = nodes.line('', v['description']) newdef.append(field) else: field = nodes.line('', '++') newdef.append(field)
def add_name_node(self, container, name): name_container = nodes.line() name_container["classes"] = ["parameter__name"] name_node = nodes.raw(name, '<code class="docutils">' + name + '</code>', format="html") name_container += [name_node] container += [name_container] return name_container
def run(self): self.arguments = [''] indigorenderer_options = dict([(k,v) for k,v in self.options.items() if k in self.own_option_spec]) text = '\n'.join(self.content) if 'codename' in indigorenderer_options: registerCodeDict(indigorenderer_options['codename'], text) (image_node,) = directives.images.Image.run(self) if isinstance(image_node, nodes.system_message): return [image_node, ] image_node.indigorenderer = dict(text=text, options=indigorenderer_options) blocks = [] def addImagesNodes (): blocks.append(image_node) need_code = 'nocode' not in self.options and len(text.strip()) > 0 and indigorenderer_options['indigoobjecttype'] == 'code' def addCodeNodes (): if need_code: literal = nodes.literal_block(text, text, line=self.lineno) #literal['linenos'] = True literal['language'] = 'python' blocks.append(literal) def addDownloadsNodes (): if 'downloads' in self.options: blocks.append(nodes.Text('Input: ')) for file in self.options['downloads'].split(','): download = addnodes.download_reference("", "") download += nodes.literal(file, file) download['reftarget'] = file blocks.append(download) blocks.append(nodes.Text(' ')) blocks.append(nodes.line()) if need_code: addCodeNodes() addDownloadsNodes() addImagesNodes() else: addImagesNodes() blocks.append(nodes.line()) addDownloadsNodes() return blocks
def run(self): api_name_line = nodes.line(text=self.arguments[0] + " ") api_name_line["classes"].extend(["api-name", "h2"]) if "version" in self.options: api_version_line = nodes.inline(text="v" + str(self.options["version"])) api_version_line["classes"].append("api-name__version") api_name_line += [api_version_line] return [api_name_line]
def run(self): self.assert_has_content() block = nodes.line_block(classes=self.options.get("class", [])) node_list = [block] for line_text in self.content: text_nodes, messages = self.state.inline_text(line_text.strip(), self.lineno + self.content_offset) line = nodes.line(line_text, "", *text_nodes) if line_text.strip(): line.indent = len(line_text) - len(line_text.lstrip()) block += line node_list.extend(messages) self.content_offset += 1 self.state.nest_line_block_lines(block) return node_list
def _build_backend_detail(self, matrix, content): detailstitle = nodes.subtitle(text="Backend Details") content.append(detailstitle) for key in six.iterkeys(matrix.backends): content.append( self._build_backend_detail_table(matrix.backends[key], matrix)) content.append(nodes.line()) return content
def visit_H2Node(self, node): self.h2Text = node.astext() self.h2Text = sphinxEncode(self.h2Text) strong = nodes.strong("") strong.children = node.children line = nodes.line("") line.append(strong) line_block = nodes.line_block("") line_block.append(line) node.children = [] node.append(line_block)
def run(self): self.assert_has_content() block = nodes.line_block(classes=self.options.get('class', [])) node_list = [block] for line_text in self.content: text_nodes, messages = self.state.inline_text( line_text.strip(), self.lineno + self.content_offset) line = nodes.line(line_text, '', *text_nodes) if line_text.strip(): line.indent = len(line_text) - len(line_text.lstrip()) block += line node_list.extend(messages) self.content_offset += 1 self.state.nest_line_block_lines(block) return node_list
def meta_links_all(self, prefix='', postfix='', exclude=None): """ Documents all used link types for the current need automatically. :param prefix: prefix string :param postfix: postfix string :param exclude: list of extra link type names, which are excluded from output :return: docutils nodes """ if exclude is None: exclude = [] data_container = [] for link_type in self.app.config.needs_extra_links: type_key = link_type['option'] if self.need[type_key] is not None and len( self.need[type_key]) > 0 and type_key not in exclude: outgoing_line = nodes.line() outgoing_label = prefix + '{}:'.format( link_type['outgoing']) + postfix + ' ' outgoing_line += self._parse(outgoing_label) outgoing_line += self.meta_links(link_type['option'], incoming=False) data_container.append(outgoing_line) type_key = link_type['option'] + '_back' if self.need[type_key] is not None and len( self.need[type_key]) > 0 and type_key not in exclude: incoming_line = nodes.line() incoming_label = prefix + '{}:'.format( link_type['incoming']) + postfix + ' ' incoming_line += self._parse(incoming_label) incoming_line += self.meta_links(link_type['option'], incoming=True) data_container.append(incoming_line) return data_container
def new_workflow_entry_section(self, workflow, ids) -> nodes.section: self.logger.verbose("Generating entry for {}".format(workflow.name)) targetname = nodes.fully_normalize_name(workflow.name) workflow_item = nodes.section(ids=[ids], names=[targetname]) workflow_item.append(nodes.title(text=workflow.name, ids=[ids])) if "nodescription" not in self.options and workflow.description: description_block = nodes.line_block() for line in workflow.description.split("\n"): new_line = nodes.line(text=line) description_block += new_line workflow_item.append(description_block) return workflow_item
def _build_backend_detail(self, matrix, content): detailstitle = nodes.subtitle(text="Backend Details") content.append(detailstitle) for key in six.iterkeys(matrix.backends): content.append( self._build_backend_detail_table( matrix.backends[key], matrix)) content.append(nodes.line()) return content
def line_block(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): if not content: warning = state_machine.reporter.warning( 'Content block expected for the "%s" directive; none found.' % name, nodes.literal_block(block_text, block_text), line=lineno) return [warning] block = nodes.line_block(classes=options.get('class', [])) node_list = [block] for line_text in content: text_nodes, messages = state.inline_text(line_text.strip(), lineno + content_offset) line = nodes.line(line_text, '', *text_nodes) if line_text.strip(): line.indent = len(line_text) - len(line_text.lstrip()) block += line node_list.extend(messages) content_offset += 1 state.nest_line_block_lines(block) return node_list
def handle_signature(self, sig, signode): """ Transform an event signature into RST nodes. Returns the name of the event """ m = event_sig_re.match(sig) if m is None: # todo: add warning raise ValueError (name, arglist) = m.groups() line0 = nodes.line('', u"Event: ") line0 += sphinx.addnodes.desc_name(name, '"' + name + '"') if arglist is None: return name signode += line0 signode += nodes.Text(u'callback') paramlist = addnodes.desc_parameterlist() arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup args = arglist.split(',') args.insert(0, u'std::string eventName') args.append(u'std::string subscriberIdentifier') for arg in args: arg = arg.strip() if not arg: continue param = addnodes.desc_parameter('', '', noemph=True) try: argtype, argname = arg.rsplit(' ', 1) except ValueError: # no argument name given, only the type argtype = arg _parse_type(param, argtype) else: _parse_type(param, argtype) param += nodes.emphasis(argname, ' '+argname) paramlist += param signode += paramlist return name
def contribute_property(self, parent, prop_key, prop, upd_para=None, id_pattern_prefix=None, sub_prop=False): if not id_pattern_prefix: id_pattern_prefix = '%s-prop' id_pattern = id_pattern_prefix + '-' + prop_key definition = self._prop_section(parent, prop_key, id_pattern) self._status_str(prop.support_status, definition) if not prop.implemented: para = nodes.line('', _('Not implemented.')) note = nodes.note('', para) definition.append(note) return if sub_prop and prop.type not in (properties.Schema.LIST, properties.Schema.MAP): if prop.required: para = nodes.line('', _('Required.')) definition.append(para) else: para = nodes.line('', _('Optional.')) definition.append(para) if prop.description: para = nodes.line('', prop.description) definition.append(para) type = nodes.line('', _('%s value expected.') % prop.type) definition.append(type) if upd_para is not None: definition.append(upd_para) else: if prop.update_allowed: upd_para = nodes.line( '', _('Can be updated without replacement.')) definition.append(upd_para) elif prop.immutable: upd_para = nodes.line('', _('Updates are not supported. ' 'Resource update will fail on ' 'any attempt to update this ' 'property.')) definition.append(upd_para) else: upd_para = nodes.line('', _('Updates cause replacement.')) definition.append(upd_para) if prop.default is not None: para = nodes.line('', _('Defaults to ')) default = nodes.literal('', json.dumps(prop.default)) para.append(default) definition.append(para) for constraint in prop.constraints: para = nodes.line('', str(constraint)) definition.append(para) sub_schema = None if prop.schema and prop.type == properties.Schema.MAP: para = nodes.line() emph = nodes.emphasis('', _('Map properties:')) para.append(emph) definition.append(para) sub_schema = prop.schema elif prop.schema and prop.type == properties.Schema.LIST: para = nodes.line() emph = nodes.emphasis('', _('List contents:')) para.append(emph) definition.append(para) sub_schema = prop.schema if sub_schema: indent = nodes.definition_list() definition.append(indent) for _key, _prop in sorted(sub_schema.items(), key=cmp_to_key(self.cmp_prop)): if _prop.support_status.status != support.HIDDEN: self.contribute_property( indent, _key, _prop, upd_para, id_pattern, sub_prop=True)
def process(app, doctree, fromdocname): # Replace all changelist nodes with a list of the collected todos. # Augment each change with a backlink to the original location. env = app.builder.env changelists = doctree.traverse(change_list) if len(changelists) == 0: return version_changes = getattr(env, 'changelist_versionchanges', {}) content = {} for version in version_changes: for target, change in version_changes[version]: para = nodes.line(classes=['changes-source']) # Create a reference newnode = nodes.reference('', '', internal=True) typ = change['type'] if typ == 'versionchanged': para.append(nodes.Text('[Changed] ')) elif typ == 'versionadded': para.append(nodes.Text('[New] ')) elif typ == 'deprecated': para.append(nodes.Text('[Deprecated] ')) next_node = change.next_node(ascend=True) while next_node: if isinstance(next_node, nodes.Text): break descend = False if isinstance(next_node, nodes.inline) else True next_node = next_node.next_node(descend=descend, ascend=True) if next_node: newnode.append(next_node) try: newnode['refuri'] = app.builder.get_relative_uri( fromdocname, change['docname']) newnode['refuri'] += '#' + target['refid'] except NoUri: # ignore if no URI can be determined, e.g. for LaTeX output pass para.append(newnode) content.setdefault(version, []).append(para) for node in changelists: sorted_content = [] for ver, comment, items in node.versions: sub_nodes = content.get(ver, []) line = '%s %s' % (ver, comment) sorted_content.append(nodes.title(line, line)) for item in items: line = nodes.line() line.append(nodes.Text(item)) sorted_content.append(line) sorted_content.extend(sub_nodes) node.replace_self(sorted_content)
def create_hoverlist(app,doctree, fromdocname): # If translationlists are set to not appear, replace them with empty nodes. if not app.config.hover_translationList: for node in doctree.traverse(hoverlist): if not app.config.hover_translationList: node.replace_self([]) return # Words is a dictionary with translated terms as keys and translations as values. words = {} content = [] #with codecs.open("LIST_OF_HOVER_TERMS", encoding = "utf-8") as listfile: listfile = open("LIST_OF_HOVER_TERMS",'r') listcontents = listfile.readlines() listfile.close() for line in listcontents: # Clean up the strings. line = line.split(";") for idx,entry in enumerate(line): beginindex = entry.find("'") newentry = entry[beginindex+1:] line[idx] = newentry citationform = line[2] translation = line[3] if citationform in words: continue words[citationform] = translation # Add words and translations (sorted) to nodes. for key,value in sorted(words.items()): wordnode = nodes.emphasis(key,key) translationstring = " : " + value # Add linebreak if smaller version of list is used. if app.config.hover_miniTranslationList: translationstring += "\n" translationnode = nodes.Text(translationstring) # If the larger version of list is requested, create new paragraph. if not app.config.hover_miniTranslationList: para = nodes.paragraph() # If the smaller version of list is requested, create a new line. else: para = nodes.line() # Append the line/paragraph. para += wordnode para += translationnode content.append(para) # Replace all hoverlist nodes with the translations for node in doctree.traverse(hoverlist): # If hover_translation userconfig is set to 0 remove all hoverlist nodes. if not app.config.hover_translationList: node.replace_self([]) continue node.replace_self(content) return
def process_postlist(app, doctree, docname): """Replace `PostList` nodes with lists of posts. Also, register all posts if they have not been registered yet.""" blog = Blog() if not blog: register_posts(app) for node in doctree.traverse(PostList): colls = [] for cat in ['tags', 'author', 'category', 'location', 'language']: for coll in node[cat]: if coll in blog.catalogs[cat].collections: colls.append(blog.catalogs[cat].collections[coll]) if colls: posts = set(blog.posts) for coll in colls: posts = posts & set(coll) posts = list(posts) posts.sort(reverse=True) posts = posts[:node.attributes['length']] else: posts = list(blog.recent(node.attributes['length'], docname, **node.attributes)) if node.attributes['sort']: posts.sort() # in reverse chronological order, so no reverse=True fmts = list(Formatter().parse(node.attributes['format'])) for text, key, __, __ in fmts: if key not in {'date', 'title', 'author', 'location', 'language', 'category', 'tags'}: raise KeyError('{} is not recognized in postlist format' .format(key)) excerpts = node.attributes['excerpts'] date_format = node.attributes['date'] or _(blog.post_date_format_short) bl = nodes.bullet_list() bl.attributes['classes'].append('post-list-style-' + node['list-style']) for post in posts: bli = nodes.list_item() bl.append(bli) par = nodes.paragraph() bli.append(par) for text, key, __, __ in fmts: if text: par.append(nodes.Text(text)) if key == 'date': par.append(nodes.Text(post.date.strftime(date_format))) else: if key == 'title': items = [post] else: items = getattr(post, key) for i, item in enumerate(items, start=1): if key == 'title': ref = nodes.reference() ref['refuri'] = app.builder.get_relative_uri(docname, item.docname) ref['ids'] = [] ref['backrefs'] = [] ref['dupnames'] = [] ref['classes'] = [] ref['names'] = [] ref['internal'] = True emp = nodes.emphasis() ref.append(emp) emp.append(nodes.Text(text_type(item))) else: ref = _missing_reference(app, item.xref, docname) par.append(ref) if i < len(items): par.append(nodes.Text(', ')) if excerpts and post.excerpt: for enode in post.excerpt: enode = enode.deepcopy() revise_pending_xrefs(enode, docname) app.env.resolve_references(enode, docname, app.builder) enode.parent = bli.parent bli.append(enode) bli.append(nodes.line()) node.replace_self(bl)