def build_toc(descinfo, env): """Return a desc table of contents node tree""" separator = EMDASH child_ids = descinfo['children'] if not child_ids: return None max_fullname_len = 0 max_summary_len = 0 rows = [] for fullname, refid, summary in ichild_ids(child_ids, env): max_fullname_len = max(max_fullname_len, len(fullname)) max_summary_len = max(max_summary_len, len(summary)) reference_node = toc_ref(fullname, refid) ref_entry_node = entry('', paragraph('', '', reference_node)) sep_entry_node = entry('', paragraph('', separator)) sum_entry_node = entry('', paragraph('', summary)) row_node = row('', ref_entry_node, sep_entry_node, sum_entry_node) rows.append(row_node) col0_len = max_fullname_len + 2 # add error margin col1_len = len(separator) # no padding col2_len = max_summary_len + 10 # add error margin tbody_node = tbody('', *rows) col0_colspec_node = colspec(colwidth=col0_len) col1_colspec_node = colspec(colwidth=col1_len) col2_colspec_node = colspec(colwidth=col2_len) tgroup_node = tgroup('', col0_colspec_node, col1_colspec_node, col2_colspec_node, tbody_node, cols=3) return TocTable('', tgroup_node, classes=['toc'])
def create_cross_table(self, app, docname, node, matrix, options): table = nodes.table() table["classes"].append("traceables-crosstable") tgroup = nodes.tgroup(cols=len(matrix.secondaries), colwidths="auto") table += tgroup # Add column specifications. tgroup += nodes.colspec(colwidth=1) for column in matrix.secondaries: tgroup += nodes.colspec(colwidth=1) # Add heading row. thead = nodes.thead() tgroup += thead row = nodes.row() thead += row entry = nodes.entry() row += entry for secondary in matrix.secondaries: entry = nodes.entry() row += entry container = nodes.container() entry += container inline = nodes.inline() container += inline paragraph = nodes.paragraph() inline += paragraph paragraph += secondary.make_reference_node(app.builder, docname) # Add table body. tbody = nodes.tbody() tgroup += tbody for primary in matrix.primaries: row = nodes.row() tbody += row entry = nodes.entry() row += entry paragraph = nodes.paragraph() entry += paragraph paragraph += primary.make_reference_node(app.builder, docname) for is_related in matrix.get_boolean_row(primary): entry = nodes.entry() row += entry if is_related: checkmark = traceable_checkmark() entry += checkmark checkmark += nodes.inline(u"\u2714", u"\u2714") else: continue container = traceable_matrix_crosstable() container += table container["traceables-matrix"] = matrix # backward = matrix.backward_relationship.capitalize() # forward = matrix.forward_relationship.capitalize() # container["relationships"] = (forward, backward) # container["boolean_matrix"] = 0#boolean_matrix # container["secondaries"] = matrix.secondaries return container
def build_table(row_nodes, colwidth_list, headrow_data=None): """ Creates new rst table node tree. Args: row_nodes (list): list of docutils.nodes.row nodes, contains actual content of the rst table colwidth_list (list): list of width percentages for each column, eg.: use [10, 90] for 2 columns, 1st has 10% width, 2nd the rest Returns: docutils.nodes.table: rst table node tree which contains given rows """ table = nodes.table() tgroup = nodes.tgroup(cols=len(colwidth_list)) table += tgroup for colwidth in colwidth_list: colspec = nodes.colspec(colwidth=colwidth) tgroup += colspec if headrow_data is not None: thead = nodes.thead() tgroup += thead head_row_node = build_row(headrow_data) thead += head_row_node tbody = nodes.tbody() tgroup += tbody for row in row_nodes: tbody += row return table
def _description_table(self, descriptions, widths, headers): # generate table-root tgroup = nodes.tgroup(cols=len(widths)) for width in widths: tgroup += nodes.colspec(colwidth=width) table = nodes.table() table += tgroup # generate table-header thead = nodes.thead() row = nodes.row() for header in headers: entry = nodes.entry() entry += nodes.paragraph(text=header) row += entry thead += row tgroup += thead # generate table-body tbody = nodes.tbody() for desc in descriptions: row = nodes.row() for attr in desc: entry = nodes.entry() if not isinstance(attr, string_types): attr = str(attr) self.state.nested_parse(ViewList([attr], source=attr), 0, entry) row += entry tbody += row tgroup += tbody return table
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns): table = nodes.table() tgroup = nodes.tgroup(cols=len(col_widths)) table += tgroup for col_width in col_widths: colspec = nodes.colspec(colwidth=col_width) if stub_columns: colspec.attributes['stub'] = 1 stub_columns -= 1 tgroup += colspec rows = [] for row in table_data: row_node = nodes.row() for cell in row: entry = nodes.entry() entry += cell row_node += entry rows.append(row_node) if header_rows: thead = nodes.thead() thead.extend(rows[:header_rows]) tgroup += thead tbody = nodes.tbody() tbody.extend(rows[header_rows:]) tgroup += tbody return table
def get_tablespec(self): table_spec = addnodes.tabular_col_spec() table_spec['spec'] = 'll' table = autosummary_table('') real_table = nodes.table('', classes=['longtable']) table.append(real_table) group = nodes.tgroup('', cols=2) real_table.append(group) group.append(nodes.colspec('', colwidth=10)) group.append(nodes.colspec('', colwidth=90)) body = nodes.tbody('') group.append(body) def append_row(*column_texts): row = nodes.row('') for text in column_texts: node = nodes.paragraph('') vl = ViewList() vl.append(text, '<autosummary>') self.state.nested_parse(vl, 0, node) try: if isinstance(node[0], nodes.paragraph): node = node[0] except IndexError: pass row.append(nodes.entry('', node)) body.append(row) return table, table_spec, append_row
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns): table = nodes.table() if self.widths == 'auto': table['classes'] += ['colwidths-auto'] elif self.widths: # "grid" or list of integers table['classes'] += ['colwidths-given'] tgroup = nodes.tgroup(cols=len(col_widths)) table += tgroup for col_width in col_widths: colspec = nodes.colspec() if col_width is not None: colspec.attributes['colwidth'] = col_width if stub_columns: colspec.attributes['stub'] = 1 stub_columns -= 1 tgroup += colspec rows = [] for row in table_data: row_node = nodes.row() for cell in row: entry = nodes.entry() entry += cell row_node += entry rows.append(row_node) if header_rows: thead = nodes.thead() thead.extend(rows[:header_rows]) tgroup += thead tbody = nodes.tbody() tbody.extend(rows[header_rows:]) tgroup += tbody return table
def create_table(self, data, num_headers=1): table_node = nodes.table() if len(data) > 0: tgroup_node = nodes.tgroup(cols=len(data[0])) table_node += tgroup_node col_width = 100 // len(data[0]) for col_index in range(len(data[0])): colspec_node = nodes.colspec(colwidth=col_width) tgroup_node += colspec_node thead = nodes.thead() tgroup_node += thead tbody = nodes.tbody() tgroup_node += tbody for row_index, row in enumerate(data): row_node = nodes.row() for col_index, cell_item in enumerate(row): row_node += self.create_cell(col_index, cell_item, row_index < num_headers) if row_index < num_headers: thead += row_node else: tbody += row_node return table_node
def run(self): ncolumns = self.options.get('columns', 2) node = nodes.Element() node.document = self.state.document self.state.nested_parse(self.content, self.content_offset, node) if len(node.children) != 1 or not isinstance(node.children[0], nodes.bullet_list): return [self.state.document.reporter.warning( '.. hlist content is not a list', line=self.lineno)] fulllist = node.children[0] # create a hlist node where the items are distributed npercol, nmore = divmod(len(fulllist), ncolumns) index = 0 table = nodes.table() tg = nodes.tgroup() table += tg row = nodes.row() tbody = nodes.tbody() for column in range(ncolumns): endindex = index + (column < nmore and (npercol + 1) or npercol) colspec = nodes.colspec() colspec.attributes['stub'] = 0 colspec.attributes['colwidth'] = 100. / ncolumns col = nodes.entry() col += nodes.bullet_list() col[0] += fulllist.children[index:endindex] index = endindex tg += colspec row += col tbody += row tg += tbody table['classes'].append('hlist') return [table]
def run(self): header = self.options.get('header').split(',') lines = self._get_lines() regex = self.options.get('regex') max_cols = len(header) table = nodes.table() tgroup = nodes.tgroup(max_cols) table += tgroup col_widths = self.get_column_widths(max_cols) tgroup.extend(nodes.colspec(colwidth=col_width) for col_width in col_widths) thead = nodes.thead() tgroup += thead thead += self.create_table_row(header) tbody = nodes.tbody() tgroup += tbody for row in lines: matched = re.search(regex, row) if matched: tbody += self.create_table_row(matched.groups()) return [table]
def description_table(descriptions, widths, headers): # generate table-root tgroup = nodes.tgroup(cols=len(widths)) for width in widths: tgroup += nodes.colspec(colwidth=width) table = nodes.table() table += tgroup # generate table-header thead = nodes.thead() row = nodes.row() for header in headers: entry = nodes.entry() entry += nodes.paragraph(text=header) row += entry thead += row tgroup += thead # generate table-body tbody = nodes.tbody() for desc in descriptions: row = nodes.row() for col in desc: entry = nodes.entry() if not isinstance(col, basestring): col = str(col) paragraph = nodes.paragraph() paragraph += nodes.Text(col) entry += paragraph row += entry tbody += row tgroup += tbody return table
def build_table(self): table = nodes.table() tgroup = nodes.tgroup(cols=len(self.headers)) table += tgroup # TODO(sdague): it would be really nice to figure out how not # to have this stanza, it kind of messes up all of the table # formatting because it doesn't let tables just be the right # size. tgroup.extend( nodes.colspec(colwidth=col_width, colname='c' + str(idx)) for idx, col_width in enumerate(self.col_widths) ) thead = nodes.thead() tgroup += thead row_node = nodes.row() thead += row_node row_node.extend(nodes.entry(h, nodes.paragraph(text=h)) for h in self.headers) tbody = nodes.tbody() tgroup += tbody rows, groups = self.collect_rows() tbody.extend(rows) table.extend(groups) return table
def run(self): if ValueTableDirective.values[0].description is None: list = nodes.bullet_list() for v in ValueTableDirective.values: item = nodes.list_item() item += nodes.literal(v.value, v.value) list += item return [list] table = nodes.table() tgroup = nodes.tgroup() tbody = nodes.tbody() for v in ValueTableDirective.values: row = nodes.row() entry = nodes.entry() entry += nodes.literal(v.value, v.value) row += entry entry = nodes.entry() entry += nodes.paragraph(text=v.description) row += entry tbody += row tgroup += nodes.colspec(colwidth=10) tgroup += nodes.colspec(colwidth=90) tgroup += tbody table += tgroup return [table]
def gen_table(columns, data): table = nodes.table() tgroup = nodes.tgroup(cols=len(columns)) table += tgroup for column in columns: tgroup += nodes.colspec(colwidth=1) thead = nodes.thead() tgroup += thead headrow = nodes.row() for column in columns: entry = nodes.entry() para = nodes.paragraph() entry += para header = column.header() para += nodes.Text(header, header) headrow += entry thead += headrow tbody = nodes.tbody() tgroup += tbody for obj in data: row = nodes.row() for column in columns: entry = nodes.entry() para = nodes.paragraph() entry += para para += column.data(obj) row += entry tbody += row return [table]
def build_table(self, table_data): table = nodes.table() tgroup = nodes.tgroup(cols=len(self.headers)) table += tgroup tgroup.extend( nodes.colspec(colwidth=col_width, colname='c' + str(idx)) for idx, col_width in enumerate(self.col_widths) ) thead = nodes.thead() tgroup += thead row_node = nodes.row() thead += row_node row_node.extend(nodes.entry(h, nodes.paragraph(text=h)) for h in self.headers) tbody = nodes.tbody() tgroup += tbody rows, groups = self.get_rows(table_data) tbody.extend(rows) table.extend(groups) return table
def create_progtable(self, **attrs): _attrs = { 'classes': ['progress', 'outer', 'docutils', 'field-list'], 'colwidths': [20, 80] } _attrs.update(attrs) # create container elements node = nodes.table(classes=_attrs['classes']) tgroup = nodes.tgroup(cols=2) thead = thead = nodes.thead() thead += self.create_headrow() tbody = nodes.tbody() # tgroup gets: # - colspec # - thead # - tbody for w in _attrs['colwidths']: tgroup += nodes.colspec(colwidth=w) # assemble the hierarchy tgroup += thead tgroup += tbody node += tgroup # return the table return node
def build_table_from_list(self, table_data, num_cols, col_widths, header_rows, stub_columns): table = nodes.table() tgroup = nodes.tgroup(cols=len(col_widths)) table += tgroup for col_width in col_widths: colspec = nodes.colspec(colwidth=col_width) if stub_columns: colspec.attributes['stub'] = 1 stub_columns -= 1 tgroup += colspec rows = [] for row in table_data: row_node = nodes.row() for cell_index, cell in enumerate(row): entry = nodes.entry() entry += cell row_node += entry if self.bias == "left" and not cell_index: remainder = num_cols - len(row) if remainder: entry["morecols"] = remainder if self.bias == "right" and cell_index == len(row) - 1: remainder = num_cols - (cell_index + 1) if remainder: entry["morecols"] = remainder rows.append(row_node) if header_rows: thead = nodes.thead() thead.extend(rows[:header_rows]) tgroup += thead tbody = nodes.tbody() tbody.extend(rows[header_rows:]) tgroup += tbody return table
def run(self): table = nodes.table('') ## Create table group = nodes.tgroup('', cols=3) table.append(group) for colwidth in 10,40,5: group.append(nodes.colspec('', colwidth=colwidth)) head = nodes.thead('') group.append(head) body = nodes.tbody('') group.append(body) def add_row(target, *column_texts): row = nodes.row('') for text in column_texts: if text == None: text = "" node = nodes.paragraph('') vl = ViewList() vl.append(text, '<autosummary>') self.state.nested_parse(vl, 0, node) try: if isinstance(node[0], nodes.paragraph): node = node[0] except IndexError: pass row.append(nodes.entry('', node)) target.append(row) def get_symbol(s): parametertable_path = s.split('.') for i in reversed(range(len(parametertable_path))): module = '.'.join(parametertable_path[:i]) symbol = parametertable_path[i:] try: m = __import__(str(module), fromlist='true') except ImportError: continue else: break parent = m for sym in symbol: parent = getattr(parent, sym) return parent add_row(head, 'Parameter', 'Description' , 'Unit') for param in get_symbol(self.arguments[0]): add_row(body, param.name, param.desc, param.unit) return [table]
def envy_resolve(app, doctree, fromdocname): objects = app.env.domaindata['envy']['objects'] # add uplink info for holder in doctree.traverse(uplink_placeholder): obj = objects[holder.name] links = [] for sp, pos, name, variants in obj.uplinks: signode = addnodes.desc_signature('', '') signode['first'] = False signode += make_refnode(app.builder, fromdocname, sp.docname, sp.iname + '-' + sp.name, addnodes.desc_addname(sp.name, sp.name), sp.name) text = ' {}: {}'.format(pos, name) signode += addnodes.desc_name(text, text) if variants is not None: text = ' [{}]'.format(variants) signode += addnodes.desc_annotation(text, text) links.append(signode) holder.replace_self(links) # add subnode list for holder in doctree.traverse(sub_placeholder): obj = objects[holder.name] add_variant = False for pos, name, child, variants in obj.subs: if variants is not None: add_variant = True table = nodes.table() headers = [(1, 'Address'), (1, 'Name'), (10, 'Description')] if add_variant: headers.insert(1, (1, 'Variants')) tgroup = nodes.tgroup(cols=len(headers)) table += tgroup for colwidth, header in headers: tgroup += nodes.colspec(colwidth=colwidth) thead = nodes.thead() tgroup += thead headrow = nodes.row() for colwidth, header in headers: entry = nodes.entry() para = nodes.paragraph() entry += para para += nodes.Text(header, header) headrow += entry thead += headrow tbody = nodes.tbody() tgroup += tbody for pos, name, child, variants in obj.subs: row = nodes.row() row += wrap_text_entry(pos) if add_variant: row += wrap_text_entry('all' if variants is None else variants) row += wrap_text_entry(name) entry = nodes.entry() para = nodes.paragraph() entry += para para += make_refnode(app.builder, fromdocname, child.docname, child.iname + '-' + child.name, nodes.Text(child.brief, child.brief), obj.brief) row += entry tbody += row holder.replace_self([table])
def build_links_table(self, resource): is_list = "is-list" in self.options table = nodes.table() tgroup = nodes.tgroup(cols=3) table += tgroup tgroup += nodes.colspec(colwidth=25) tgroup += nodes.colspec(colwidth=15) tgroup += nodes.colspec(colwidth=60) thead = nodes.thead() tgroup += thead append_row(thead, ["Name", "Method", "Resource"]) tbody = nodes.tbody() tgroup += tbody request = DummyRequest() if is_list: child_resources = resource.list_child_resources else: child_resources = resource.item_child_resources names_to_resource = {} for child in child_resources: names_to_resource[child.name_plural] = (child, True) if not is_list and resource.model: child_keys = {} create_fake_resource_path(request, resource, child_keys, True) obj = resource.get_queryset(request, **child_keys)[0] else: obj = None related_links = resource.get_related_links(request=request, obj=obj) for key, info in related_links.iteritems(): if "resource" in info: names_to_resource[key] = (info["resource"], info.get("list-resource", False)) links = resource.get_links(child_resources, request=DummyRequest(), obj=obj) for linkname in sorted(links.iterkeys()): info = links[linkname] child, is_child_link = names_to_resource.get(linkname, (resource, is_list)) paragraph = nodes.paragraph() paragraph += get_ref_to_resource(child, is_child_link) append_row(tbody, [nodes.strong(text=linkname), info["method"], paragraph]) return table
def create_list_table(self, matrix, options, docname): table = nodes.table() tgroup = nodes.tgroup(cols=2, colwidths="auto") table += tgroup # Add column specifications. tgroup += nodes.colspec(colwidth=50) tgroup += nodes.colspec(colwidth=50) # Add heading row. thead = nodes.thead() tgroup += thead row = nodes.row() thead += row entry = nodes.entry() row += entry backward_relationship = matrix.backward_relationship.capitalize() entry += nodes.paragraph(backward_relationship, backward_relationship) entry = nodes.entry() row += entry forward_relationship = matrix.forward_relationship.capitalize() entry += nodes.paragraph(forward_relationship, forward_relationship) # Add table body. tbody = nodes.tbody() tgroup += tbody for traceable in matrix.primaries: relatives = matrix.get_relatives(traceable) # Create first row with a first column. row = nodes.row() entry = nodes.entry(morerows=len(relatives) - 1) row += entry paragraph = nodes.paragraph() entry += paragraph paragraph += traceable.make_reference_node( self.app.builder, docname) for relative in relatives: if not row: # Create subsequent rows without a first column. row = nodes.row() tbody += row entry = nodes.entry() row += entry paragraph = nodes.paragraph() entry += paragraph paragraph += relative.make_reference_node( self.app.builder, docname) row = None return table
def get_table(self, items_set): """Generate a proper list of table nodes for errorsummary:: directive. *items* is a list produced by :meth:`get_items`. """ table_spec = addnodes.tabular_col_spec() table_spec['spec'] = 'll' table = autosummary_table('') real_table = nodes.table('', classes=['longtable']) table.append(real_table) group = nodes.tgroup('', cols=3) real_table.append(group) group.append(nodes.colspec('', colwidth=70)) group.append(nodes.colspec('', colwidth=20)) group.append(nodes.colspec('', colwidth=90)) body = nodes.tbody('') group.append(body) def append_row(*column_texts): row = nodes.row('') for text in column_texts: node = nodes.paragraph('') vl = ViewList() vl.append(text, '<autosummary>') self.state.nested_parse(vl, 0, node) try: if isinstance(node[0], nodes.paragraph): node = node[0] except IndexError: pass row.append(nodes.entry('', node)) body.append(row) col1 = u"**Name**" col2 = u"**Code**" col3 = u"**Message**" append_row(col1, col2, col3) for class_name, items in sorted(items_set.items()): for name, sig, summary, real_name, code in items: if 'nosignatures' not in self.options: col1 = '_%s' % name col2 = '{}'.format(code) translated = translate(summary, "ru") if summary else "" if translated != summary: col3 = u"{} / {}".format(translated, summary) else: col3 = summary append_row(col1, col2, col3) return [table_spec, table]
def get_autosummary(names, state, no_signatures=False): """ Generate a proper table node for autosummary:: directive. *names* is a list of names of Python objects to be imported and added to the table. *document* is the Docutils document object. """ document = state.document real_names = {} warnings = [] prefixes = [''] prefixes.insert(0, document.settings.env.currmodule) table = nodes.table('') group = nodes.tgroup('', cols=2) table.append(group) group.append(nodes.colspec('', colwidth=30)) group.append(nodes.colspec('', colwidth=70)) body = nodes.tbody('') group.append(body) def append_row(*column_texts): row = nodes.row('') for text in column_texts: node = nodes.paragraph('') vl = ViewList() vl.append(text, '<autosummary>') state.nested_parse(vl, 0, node) row.append(nodes.entry('', node)) body.append(row) for name in names: try: obj, real_name = import_by_name(name, prefixes=prefixes) except ImportError: warnings.append(document.reporter.warning( 'failed to import %s' % name)) append_row(':obj:`%s`' % name, '') continue real_names[name] = real_name title = '' qualifier = 'obj' col1 = ':'+qualifier+':`%s <%s>`' % (name, real_name) col2 = title append_row(col1, col2) return table, warnings, real_names
def build_details_table(self, error_obj): table = nodes.table() tgroup = nodes.tgroup(cols=2) table += tgroup tgroup += nodes.colspec(colwidth=20) tgroup += nodes.colspec(colwidth=80) tbody = nodes.tbody() tgroup += tbody # API Error Code append_detail_row(tbody, 'API Error Code', nodes.literal(text=error_obj.code)) # HTTP Status Code ref = parse_text(self, ':http:`%s`' % error_obj.http_status) append_detail_row(tbody, 'HTTP Status Code', ref) # Error Text append_detail_row(tbody, 'Error Text', nodes.literal(text=error_obj.msg)) if error_obj.headers: if callable(error_obj.headers): headers = error_obj.headers(DummyRequest()) # HTTP Headers if len(headers) == 1: content = nodes.literal(text=headers.keys()[0]) else: content = nodes.bullet_list() for header in headers.iterkeys(): item = nodes.list_item() content += item literal = nodes.literal(text=header) item += literal append_detail_row(tbody, 'HTTP Headers', content) # Description append_detail_row( tbody, 'Description', parse_text(self, '\n'.join(self.content), where='API error %s description' % error_obj.code)) return table
def process_indigo_option_nodes(app, doctree, fromdocname): env = app.builder.env for node in doctree.traverse(optionslist): content = [] tbl = nodes.table() tgroup = nodes.tgroup(cols=4) tbl += tgroup tgroup += nodes.colspec(colwidth=35) tgroup += nodes.colspec(colwidth=9) tgroup += nodes.colspec(colwidth=9) tgroup += nodes.colspec(colwidth=73) thead = nodes.thead() tgroup += thead row = nodes.row() thead += row row += createRowEntryText('Name') row += createRowEntryText('Type') row += createRowEntryText('Default') row += createRowEntryText('Short description') tbody = nodes.tbody() tgroup += tbody content.append(tbl) sorted_options = sorted(env.indigo_options, key=lambda o:o['name']) for opt_info in sorted_options: row = nodes.row() tbody += row # Create a reference newnode = nodes.reference('', '') innernode = nodes.Text(opt_info['name'], opt_info['name']) newnode['refdocname'] = opt_info['docname'] newnode['refuri'] = app.builder.get_relative_uri( fromdocname, opt_info['docname']) newnode['refuri'] += '#' + normalize_name(opt_info['name']) newnode.append(innernode) row += createRowEntry(newnode) row += createRowEntryText(opt_info['type']) row += createRowEntryText(opt_info['default']) row += createRowEntryText(opt_info['short']) node.replace_self(content)
def format(self, app, docname, node, traceables, options): additional_attributes = options.get("attributes") or [] columns = ["tag", "title"] + additional_attributes table = nodes.table() table["classes"].append("traceables-listtable") tgroup = nodes.tgroup(cols=len(columns), colwidths="auto") table += tgroup # Add column specifications. for attribute_name in columns: tgroup += nodes.colspec(colwidth=1) # Add heading row. thead = nodes.thead() tgroup += thead row = nodes.row() thead += row for attribute_name in columns: entry = nodes.entry() row += entry container = nodes.container() entry += container text = attribute_name.capitalize() inline = nodes.inline(text, text) container += inline # Add table body. tbody = nodes.tbody() tgroup += tbody for traceable in traceables: row = nodes.row() tbody += row for attribute_name in columns: entry = nodes.entry() row += entry if attribute_name == "tag": inline = nodes.inline() inline += traceable.make_reference_node( app.builder, docname) elif attribute_name == "title": text = traceable.title if traceable.has_title else "" inline = nodes.inline(text, text) else: text = traceable.attributes.get(attribute_name, "") inline = nodes.inline(text, text) entry += inline return table
def _build_markup(self, notifications): content = [] cols = ['Notification class', 'Payload class', 'Sample file link'] table = nodes.table() content.append(table) group = nodes.tgroup(cols=len(cols)) table.append(group) head = nodes.thead() group.append(head) for i in range(len(cols)): group.append(nodes.colspec(colwidth=1)) body = nodes.tbody() group.append(body) # fill the table header row = nodes.row() body.append(row) for col_name in cols: col = nodes.entry() row.append(col) text = nodes.strong(text=col_name) col.append(text) # fill the table content, one notification per row for name, payload, sample in notifications: row = nodes.row() body.append(row) col = nodes.entry() row.append(col) text = nodes.literal(text=name) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=payload) col.append(text) col = nodes.entry() row.append(col) ref = nodes.reference(refuri=self.LINK_PREFIX + self.SAMPLE_ROOT + sample) txt = nodes.inline() col.append(txt) txt.append(ref) ref.append(nodes.literal(text=sample)) return content
def _build_grade_listing(self, matrix, content): summarytitle = nodes.subtitle(text="Grades") content.append(nodes.raw(text="Grades", attributes={'tagname': 'h2'})) content.append(summarytitle) table = nodes.table() table.set_class("table") table.set_class("table-condensed") grades = matrix.grades tablegroup = nodes.tgroup(cols=2) summarybody = nodes.tbody() summaryhead = nodes.thead() for i in range(2): tablegroup.append(nodes.colspec(colwidth=1)) tablegroup.append(summaryhead) tablegroup.append(summarybody) table.append(tablegroup) content.append(table) header = nodes.row() blank = nodes.entry() blank.append(nodes.strong(text="Grade")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="Description")) header.append(blank) summaryhead.append(header) for grade in grades: item = nodes.row() namecol = nodes.entry() class_name = "label-%s" % grade.css_class status_text = nodes.paragraph(text=grade.title) status_text.set_class(class_name) status_text.set_class("label") namecol.append(status_text) item.append(namecol) notescol = nodes.entry() notescol.append(nodes.paragraph(text=grade.notes)) item.append(notescol) summarybody.append(item) return content
def get_table(self): """docstring for get_table""" table_spec = addnodes.tabular_col_spec() table_spec['spec'] = 'll' table = autosummary_table('') real_table = nodes.table('', classes=['longtable']) table.append(real_table) group = nodes.tgroup('', cols=2) real_table.append(group) group.append(nodes.colspec('', colwidth=10)) group.append(nodes.colspec('', colwidth=90)) body = nodes.tbody('') group.append(body) return table_spec, table, body
def get_table(self, items): """Generate a proper list of table nodes for autosummary:: directive. *items* is a list produced by :meth:`get_items`. """ table_spec = addnodes.tabular_col_spec() table_spec['spec'] = 'LL' table = autosummary_table('') real_table = nodes.table('') if "title" in self.options: title_node = self.make_title(self.options["title"]) real_table.insert(0, title_node) table.append(real_table) group = nodes.tgroup('', cols=2) real_table.append(group) group.append(nodes.colspec('', colwidth=10)) group.append(nodes.colspec('', colwidth=90)) body = nodes.tbody('') group.append(body) def append_row(*column_texts): row = nodes.row('') for text in column_texts: node = nodes.paragraph('') vl = ViewList() vl.append(text, '<doxybridge-autosummary>') self.sphinx_directive.state.nested_parse(vl, 0, node) try: if isinstance(node[0], nodes.paragraph): node = node[0] except IndexError: pass row.append(nodes.entry('', node)) body.append(row) for name, sig, summary, real_name in items: qualifier = self.get_qualifier(name) if 'nosignatures' not in self.options: col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, sig) else: col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name) col2 = summary append_row(col1, col2) return [table_spec, table]
def run(self): oldStdout, sys.stdout = sys.stdout, StringIO() source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) try: table = nodes.table() tgroup = nodes.tgroup(cols=2) table += tgroup tgroup += nodes.colspec(colwidth=25, classes=['key']) tgroup += nodes.colspec(colwidth=8, classes=['value']) thead = nodes.thead() tgroup += thead # Add headers row = nodes.row() thead += row entry = nodes.entry() row += entry node = nodes.paragraph(text='Key') entry += node entry = nodes.entry() row += entry node = nodes.paragraph(text='Value') entry += node # Add body tbody = nodes.tbody() tgroup += tbody row = nodes.row() tbody += row for key in ot.ResourceMap.GetKeys(): row = nodes.row() tbody += row entry = nodes.entry() row += entry node = nodes.paragraph(text=key) entry += node entry = nodes.entry() row += entry value = ot.ResourceMap.Get(key) if not len(value): value = ' '.__repr__() if '\t' in value: value = value.replace('\t', '\\t') node = nodes.paragraph(text=value) entry += node return [table] except Exception: return [ nodes.error( None, nodes.paragraph( text="Unable to execute python code at %s:%d:" % (basename(source), self.lineno)), nodes.paragraph(text=str(sys.exc_info()[1]))) ] finally: sys.stdout = oldStdout
def run(self): env = self.state.document.settings.env items = [] data = list(csv.reader(self.content)) for row in data: if not row: continue name, page, image = row link = page.strip() if not link.startswith('http') and not link.startswith('/'): link = '/{}'.format(link) if '.html' not in link: link += '.html' items.append({ 'name': name.strip(), 'link': link, 'image': '/images/{}'.format(image.strip()), }) col_widths = self.get_column_widths(3) title, messages = self.make_title() table = nodes.table() table['classes'].append('table-center') # Set up column specifications based on widths tgroup = nodes.tgroup(cols=3) table += tgroup tgroup.extend( nodes.colspec(colwidth=col_width) for col_width in col_widths) tbody = nodes.tbody() tgroup += tbody rows = [] for value in grouper(3, items): trow = nodes.row() for cell in value: entry = nodes.entry() if cell is None: entry += nodes.paragraph() trow += entry continue name = cell['name'] link = cell['link'] image = cell['image'] reference_node = nodes.reference(refuri=link) img = nodes.image(uri=directives.uri(image), alt=name) img['classes'].append('component-image') reference_node += img para = nodes.paragraph() para += reference_node entry += para trow += entry rows.append(trow) trow = nodes.row() for cell in value: entry = nodes.entry() if cell is None: entry += nodes.paragraph() trow += entry continue name = cell['name'] link = cell['link'] ref = nodes.reference(name, name, refuri=link) para = nodes.paragraph() para += ref entry += para trow += entry rows.append(trow) tbody.extend(rows) self.add_name(table) if title: table.insert(0, title) return [table] + messages
def build_details_table(self, resource): is_list = 'is-list' in self.options table = nodes.table(classes=['resource-info']) tgroup = nodes.tgroup(cols=2) table += tgroup tgroup += nodes.colspec(colwidth=30, classes=['field']) tgroup += nodes.colspec(colwidth=70, classes=['value']) tbody = nodes.tbody() tgroup += tbody # Name if is_list: resource_name = resource.name_plural else: resource_name = resource.name append_detail_row(tbody, "Name", nodes.literal(text=resource_name)) # URI uri_template = get_resource_uri_template(resource, not is_list) append_detail_row(tbody, "URI", nodes.literal(text=uri_template)) # URI Parameters #append_detail_row(tbody, "URI Parameters", '') # HTTP Methods allowed_http_methods = self.get_http_methods(resource, is_list) bullet_list = nodes.bullet_list() for http_method in allowed_http_methods: item = nodes.list_item() bullet_list += item paragraph = nodes.paragraph() item += paragraph ref = nodes.reference(text=http_method, refid=http_method) paragraph += ref doc_summary = self.get_doc_for_http_method(resource, http_method) i = doc_summary.find('.') if i != -1: doc_summary = doc_summary[:i + 1] paragraph += nodes.inline(text=" - ") paragraph += parse_text(self, doc_summary, nodes.inline, where='HTTP %s handler summary for %s' % (http_method, self.options['classname'])) append_detail_row(tbody, "HTTP Methods", bullet_list) # Parent Resource if is_list or resource.uri_object_key is None: parent_resource = resource._parent_resource is_parent_list = False else: parent_resource = resource is_parent_list = True if parent_resource: paragraph = nodes.paragraph() paragraph += get_ref_to_resource(parent_resource, is_parent_list) else: paragraph = 'None.' append_detail_row(tbody, "Parent Resource", paragraph) # Child Resources if is_list: child_resources = list(resource.list_child_resources) if resource.name != resource.name_plural: if resource.uri_object_key: child_resources.append(resource) are_children_lists = False else: are_children_lists = True else: child_resources = resource.item_child_resources are_children_lists = True if child_resources: tocnode = addnodes.toctree() tocnode['glob'] = None tocnode['maxdepth'] = 1 tocnode['hidden'] = False docnames = sorted([ docname_join( self.state.document.settings.env.docname, get_resource_docname(child_resource, are_children_lists)) for child_resource in child_resources ]) tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docname) for docname in docnames] else: tocnode = nodes.paragraph(text="None") append_detail_row(tbody, "Child Resources", tocnode) # Anonymous Access if is_list and not resource.singleton: getter = resource.get_list else: getter = resource.get if getattr(getter, 'login_required', False): anonymous_access = 'No' elif getattr(getter, 'checks_login_required', False): anonymous_access = 'Yes, if anonymous site access is enabled' else: anonymous_access = 'Yes' append_detail_row(tbody, "Anonymous Access", anonymous_access) return table
def run(self) -> List[nodes.Element]: # parse show-first option show_first_raw = self.options.get("show-first", None) show_first = ([entry.strip() for entry in show_first_raw.split(",")] if show_first_raw else []) # sort manifest projects accounting for show-first manifest = Manifest.from_file( self.env.config.manifest_revisions_table_manifest) projects = [None] * len(show_first) for project in manifest.projects: if project.name == "manifest": continue if project.name in show_first: projects[show_first.index(project.name)] = project else: projects.append(project) if not all(projects): raise ExtensionError( f"Invalid show-first option: {show_first_raw}") # build table table = nodes.table() tgroup = nodes.tgroup(cols=2) tgroup += nodes.colspec(colwidth=1) tgroup += nodes.colspec(colwidth=1) table += tgroup thead = nodes.thead() tgroup += thead row = nodes.row() thead.append(row) entry = nodes.entry() entry += nodes.paragraph(text="Project") row += entry entry = nodes.entry() entry += nodes.paragraph(text="Revision") row += entry rows = [] for project in projects: row = nodes.row() rows.append(row) entry = nodes.entry() entry += nodes.paragraph(text=project.name) row += entry entry = nodes.entry() par = nodes.paragraph() par += nodes.reference( project.revision, project.revision, internal=False, refuri=ManifestRevisionsTable.rev_url(project.url, project.revision), ) entry += par row += entry tbody = nodes.tbody() tbody.extend(rows) tgroup += tbody return [table]
def run(self): cols = self.options.get("columns", 3) items = [] data = list(csv.reader(self.content)) for row in data: if not row: continue name, page, image = row[0:3] link = page.strip() if link.startswith("http"): pass else: if not link.startswith("/"): link = "/{}".format(link) if ".html" not in link: link += ".html" items.append({ "name": name.strip(), "link": link, "image": "/images/{}".format(image.strip()), "category": row[3] if len(row) >= 4 else None }) col_widths = self.get_column_widths(cols) title, messages = self.make_title() table = nodes.table() table["classes"].append("table-center") # Set up column specifications based on widths tgroup = nodes.tgroup(cols=cols) table += tgroup tgroup.extend( nodes.colspec(colwidth=col_width) for col_width in col_widths) tbody = nodes.tbody() tgroup += tbody rows = [] for value in grouper(cols, items): trow = nodes.row() for cell in value: entry = nodes.entry() if cell is None: entry += nodes.paragraph() trow += entry continue name = cell["name"] link = cell["link"] image = cell["image"] reference_node = nodes.reference(refuri=link) img = nodes.image(uri=directives.uri(image), alt=name) img["classes"].append("component-image") reference_node += img para = nodes.paragraph() para += reference_node entry += para trow += entry rows.append(trow) trow = nodes.row() for cell in value: entry = nodes.entry() if cell is None: entry += nodes.paragraph() trow += entry continue name = cell["name"] link = cell["link"] ref = nodes.reference(name, name, refuri=link) para = nodes.paragraph() para += ref entry += para cat_text = cell["category"] if cat_text: cat = nodes.paragraph(text=cat_text) entry += cat trow += entry rows.append(trow) tbody.extend(rows) self.add_name(table) if title: table.insert(0, title) return [table] + messages
def run(self): filename = self.arguments[0] document = self.state.document env = document.settings.env rel_filename, filename = env.relfn2path(filename) env.note_dependency(filename) try: with open(filename, 'r') as fp: releases = yaml.safe_load(fp) except Exception as e: return [ document.reporter.warning( "Failed to open Ceph releases file {}: {}".format( filename, e), line=self.lineno) ] display_releases = self.arguments[1:] timeline = [] for code_name, info in releases["releases"].items(): if code_name in display_releases: for release in info.get("releases", []): released = release["released"] timeline.append((released, code_name, release["version"], release.get("skip_ref", False))) assert "development" not in releases["releases"] if "development" in display_releases: for release in releases["development"]["releases"]: released = release["released"] timeline.append((released, "development", release["version"], release.get("skip_ref", False))) timeline = sorted(timeline, key=lambda t: t[0], reverse=True) table = nodes.table() tgroup = nodes.tgroup(cols=3) table += tgroup columns = ["Date"] + display_releases tgroup.extend( nodes.colspec(colwidth=30, colname='c' + str(idx)) for idx, _ in enumerate(range(len(columns)))) thead = nodes.thead() tgroup += thead row_node = nodes.row() thead += row_node for col in columns: entry = nodes.entry() if col.lower() in ["date", "development"]: para = nodes.paragraph(text=col.title()) else: para = nodes.paragraph( text=f"`{col.title()} <{col}>`_".format(col)) sphinx.util.nodes.nested_parse_with_titles(self.state, para, entry) row_node += entry tbody = nodes.tbody() tgroup += tbody rows = [] for row_info in timeline: trow = nodes.row() entry = nodes.entry() para = nodes.paragraph(text=row_info[0]) entry += para trow += entry for release in display_releases: entry = nodes.entry() if row_info[1] == release: if row_info[3]: # if skip ref para = nodes.paragraph(text=row_info[2]) else: para = nodes.paragraph( text="`{}`_".format(row_info[2])) sphinx.util.nodes.nested_parse_with_titles( self.state, para, entry) else: para = nodes.paragraph(text="--") entry += para trow += entry rows.append(trow) tbody.extend(rows) return [table]
def __init__(self, app, need, layout, node, style=None, fromdocname=None): self.app = app self.need = need self.layout_name = layout available_layouts = app.config.needs_layouts if self.layout_name not in available_layouts.keys(): raise SphinxNeedLayoutException( 'Given layout "{}" is unknown for need {}. Registered layouts are: {}' .format(self.layout_name, need["id"], " ,".join(available_layouts.keys()))) self.layout = available_layouts[self.layout_name] self.node = node # Used, if need is referenced from another page if fromdocname is None: self.fromdocname = need["docname"] else: self.fromdocname = fromdocname # For ReadTheDocs Theme we need to add 'rtd-exclude-wy-table'. classes = [ "need", "needs_grid_" + self.layout["grid"], "needs_layout_" + self.layout_name ] classes.extend(app.config.needs_table_classes) self.style = style or self.need["style"] or getattr( self.app.config, "needs_default_style", None) if self.style: for style in self.style.strip().split(","): style = style.strip() classes.append("needs_style_" + style) else: classes.append("needs_style_none") classes.append("needs_type_" + "".join(self.need["type"].split())) self.node_table = nodes.table(classes=classes, ids=[self.need["id"]]) self.node_tbody = nodes.tbody() self.grids = { "simple": { "func": self._grid_simple, "configs": { "colwidths": [100], "side_left": False, "side_right": False, "footer": False }, }, "simple_side_left": { "func": self._grid_simple, "configs": { "colwidths": [30, 70], "side_left": "full", "side_right": False, "footer": False }, }, "simple_side_right": { "func": self._grid_simple, "configs": { "colwidths": [70, 30], "side_left": False, "side_right": "full", "footer": False }, }, "simple_side_left_partial": { "func": self._grid_simple, "configs": { "colwidths": [20, 80], "side_left": "part", "side_right": False, "footer": False }, }, "simple_side_right_partial": { "func": self._grid_simple, "configs": { "colwidths": [80, 20], "side_left": False, "side_right": "part", "footer": False }, }, "complex": self._grid_complex, "content": { "func": self._grid_content, "configs": { "colwidths": [100], "side_left": False, "side_right": False, "footer": False }, }, "content_footer": { "func": self._grid_content, "configs": { "colwidths": [100], "side_left": False, "side_right": False, "footer": True }, }, "content_side_left": { "func": self._grid_content, "configs": { "colwidths": [5, 95], "side_left": True, "side_right": False, "footer": False }, }, "content_side_right": { "func": self._grid_content, "configs": { "colwidths": [95, 5], "side_left": False, "side_right": True, "footer": False }, }, "content_footer_side_left": { "func": self._grid_content, "configs": { "colwidths": [5, 95], "side_left": True, "side_right": False, "footer": True }, }, "content_footer_side_right": { "func": self._grid_content, "configs": { "colwidths": [95, 5], "side_left": False, "side_right": True, "footer": True }, }, } # Dummy Document setup self.doc_settings = OptionParser( components=(Parser, )).get_default_values() self.dummy_doc = new_document("dummy", self.doc_settings) self.doc_language = languages.get_language( self.dummy_doc.settings.language_code) self.doc_memo = Struct( document=self.dummy_doc, reporter=self.dummy_doc.reporter, language=self.doc_language, title_styles=[], section_level=0, section_bubble_up_kludge=False, inliner=None, ) self.functions = { "meta": self.meta, "meta_all": self.meta_all, "meta_links": self.meta_links, "meta_links_all": self.meta_links_all, "meta_id": self.meta_id, "image": self.image, "link": self.link, "collapse_button": self.collapse_button, } # Prepare string_links dict, so that regex and templates get not recompiled too often. # # Do not set needs_string_links here and update it. # This would lead to deepcopy()-errors, as needs_string_links gets some "pickled" and jinja Environment is # too complex for this. self.string_links = {} for link_name, link_conf in app.config.needs_string_links.items(): self.string_links[link_name] = { "url_template": Environment(loader=BaseLoader).from_string( link_conf["link_url"]), "name_template": Environment(loader=BaseLoader).from_string( link_conf["link_name"]), "regex_compiled": re.compile(link_conf["regex"]), "options": link_conf["options"], "name": link_name, }
def run(self): """Output a table that follows a standard output format for attributes""" required = 'required' in self.options attr_name = self.arguments[0] attr_type = self.options.get('type') attr_default = self.options.get('default') # Create domain indexed object dom_obj = JSObject('attribute', [attr_name], {}, nodes.Text('', ''), self.lineno, self.content_offset, self.block_text, self.state, self.state_machine) (node_index, node_desc) = dom_obj.run() # Pull out object name node_attr_name = nodes.Text(attr_name, attr_name) node_desc_name = node_desc.next_node(addnodes.desc_name) if node_desc_name: node_attr_name = node_desc_name.next_node(nodes.Text) node_desc.next_node(addnodes.desc_signature) node_desc_sig = node_desc.next_node(addnodes.desc_signature) del node_desc_sig table = nodes.table() node_desc.append(table) table['classes'] = ['details-table'] tgroup = nodes.tgroup(cols=3) tgroup.append(nodes.colspec(colwidth=5)) tgroup.append(nodes.colspec(colwidth=10)) table.append(tgroup) tbody = nodes.tbody() tgroup.append(tbody) # Attribute name row_head = nodes.row() entry_attribute = nodes.entry(classes=['details-table-name']) entry_attribute.append(nodes.paragraph('', '', node_attr_name)) row_head.append(entry_attribute) entry_type = nodes.entry(classes=['details-table-type']) row_head.append(entry_type) tbody.append(row_head) if attr_type: entry_type.append( nodes.paragraph( '', '', addnodes.pending_xref( '', nodes.literal(attr_type, attr_type), refdomain='js', reftype=None, reftarget=attr_type, #refdoc=self.env.docname, ))) # Required if required: row_required = nodes.row() row_required.append( nodes.entry( '', nodes.paragraph( '', '', nodes.emphasis('', nodes.Text('Required', 'Required'))))) row_required.append(nodes.entry()) tbody.append(row_required) # Content if self.content: row_desc = nodes.row() row_desc.append( nodes.entry('', nodes.paragraph('Description', 'Description'))) entry_description = nodes.entry() row_desc.append(entry_description) self.state.nested_parse(self.content, self.content_offset, entry_description) tbody.append(row_desc) return [node_index, node_desc]
def _build_summary(self, matrix, content): """Constructs the docutils content for the summary of the support matrix. The summary consists of a giant table, with one row for each feature, and a column for each hypervisor driver. It provides an 'at a glance' summary of the status of each driver """ summarytitle = nodes.subtitle(text="Summary") summary = nodes.table() cols = len(matrix.targets.keys()) cols += 2 summarygroup = nodes.tgroup(cols=cols) summarybody = nodes.tbody() summaryhead = nodes.thead() for i in range(cols): summarygroup.append(nodes.colspec(colwidth=1)) summarygroup.append(summaryhead) summarygroup.append(summarybody) summary.append(summarygroup) content.append(summarytitle) content.append(summary) # This sets up all the column headers - two fixed # columns for feature name & status header = nodes.row() blank = nodes.entry() blank.append(nodes.emphasis(text="Feature")) header.append(blank) blank = nodes.entry() blank.append(nodes.emphasis(text="Status")) header.append(blank) summaryhead.append(header) # then one column for each hypervisor driver impls = matrix.targets.keys() impls.sort() for key in impls: target = matrix.targets[key] implcol = nodes.entry() header.append(implcol) implcol.append(nodes.strong(text=target.title)) # We now produce the body of the table, one row for # each feature to report on for feature in matrix.features: item = nodes.row() # the hyperlink target name linking to details id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # first the to fixed columns for title/status keycol = nodes.entry() item.append(keycol) keyref = nodes.reference(refid=id) keytxt = nodes.inline() keycol.append(keytxt) keytxt.append(keyref) keyref.append(nodes.strong(text=feature.title)) statuscol = nodes.entry() item.append(statuscol) statuscol.append( nodes.inline(text=feature.status, classes=["sp_feature_" + feature.status])) # and then one column for each hypervisor driver impls = matrix.targets.keys() impls.sort() for key in impls: target = matrix.targets[key] impl = feature.implementations[key] implcol = nodes.entry() item.append(implcol) id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) implref = nodes.reference(refid=id) impltxt = nodes.inline() implcol.append(impltxt) impltxt.append(implref) status = "" if impl.status == SupportMatrixImplementation.STATUS_COMPLETE: status = u"\u2714" elif impl.status == SupportMatrixImplementation.STATUS_MISSING: status = u"\u2716" elif impl.status == SupportMatrixImplementation.STATUS_PARTIAL: status = u"\u2714" elif impl.status == SupportMatrixImplementation.STATUS_UKNOWN: status = u"?" implref.append( nodes.literal( text=status, classes=["sp_impl_summary", "sp_impl_" + impl.status])) summarybody.append(item)
def _build_backend_detail_table(self, backend, matrix): table = nodes.table() tgroup = nodes.tgroup(cols=2) thead = nodes.thead() tbody = nodes.tbody() for i in range(2): tgroup.append(nodes.colspec(colwidth=1)) tgroup.append(thead) tgroup.append(tbody) table.append(tgroup) header = nodes.row() blank = nodes.entry() blank.append(nodes.emphasis(text=backend.title)) header.append(blank) blank = nodes.entry() header.append(blank) thead.append(header) graderow = nodes.row() gradetitle = nodes.entry() gradetitle.append(nodes.strong(text="Grade")) gradetext = nodes.entry() gradetext.append(nodes.paragraph( text=matrix.grade_names[backend.status])) graderow.append(gradetitle) graderow.append(gradetext) tbody.append(graderow) treerow = nodes.row() treetitle = nodes.entry() treetitle.append(nodes.strong(text="In Tree")) if bool(backend.in_tree): status = u"\u2714" else: status = u"\u2716" treetext = nodes.entry() treetext.append(nodes.paragraph(text=status)) treerow.append(treetitle) treerow.append(treetext) tbody.append(treerow) maintrow = nodes.row() mainttitle = nodes.entry() mainttitle.append(nodes.strong(text="Maintainers")) mainttext = nodes.entry() mainttext.append(nodes.paragraph(text=backend.maintainers)) maintrow.append(mainttitle) maintrow.append(mainttext) tbody.append(maintrow) reporow = nodes.row() repotitle = nodes.entry() repotitle.append(nodes.strong(text="Repository")) repotext = nodes.entry() repotext.append(nodes.paragraph(text=backend.repository)) reporow.append(repotitle) reporow.append(repotext) tbody.append(reporow) noterow = nodes.row() notetitle = nodes.entry() notetitle.append(nodes.strong(text="Notes")) notetext = nodes.entry() notetext.append(nodes.paragraph(text=backend.notes)) noterow.append(notetitle) noterow.append(notetext) tbody.append(noterow) return table
def _build_grade_table(self, matrix, content): summarytitle = nodes.title(text="Backends - Summary") summary = nodes.table() cols = len(matrix.backends.keys()) cols += 2 summarygroup = nodes.tgroup(cols=cols) summarybody = nodes.tbody() summaryhead = nodes.thead() for i in range(cols): summarygroup.append(nodes.colspec(colwidth=1)) summarygroup.append(summaryhead) summarygroup.append(summarybody) summary.append(summarygroup) content.append(summarytitle) content.append(summary) header = nodes.row() blank = nodes.entry() blank.append(nodes.strong(text="Backend")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="Status")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="Type")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="In Tree")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="Notes")) header.append(blank) summaryhead.append(header) grades = matrix.grades impls = matrix.backends.keys() impls.sort() for grade in grades: for backend in impls: if matrix.backends[backend].status == grade.key: item = nodes.row() namecol = nodes.entry() namecol.append( nodes.paragraph(text=matrix.backends[backend].title)) item.append(namecol) statuscol = nodes.entry() statuscol.append(nodes.paragraph(text=grade.title)) item.append(statuscol) typecol = nodes.entry() typecol.append(nodes.paragraph( text=matrix.backends[backend].type)) item.append(typecol) if bool(matrix.backends[backend].in_tree): status = u"\u2714" else: status = u"\u2716" intreecol = nodes.entry() intreecol.append(nodes.paragraph(text=status)) item.append(intreecol) notescol = nodes.entry() notescol.append(nodes.paragraph( text=matrix.backends[backend].notes)) item.append(notescol) summarybody.append(item) return content
def make_stat_table(parent_docname: str, metadata: DefaultDict[str, dict]) -> nodes.table: """Create a table of statistics on executed notebooks.""" # top-level element table = nodes.table() table["classes"] += ["colwidths-auto"] # self.set_source_info(table) # column settings element ncols = len(_key2header) + 1 tgroup = nodes.tgroup(cols=ncols) table += tgroup colwidths = [round(100 / ncols, 2)] * ncols for colwidth in colwidths: colspec = nodes.colspec(colwidth=colwidth) tgroup += colspec # header thead = nodes.thead() tgroup += thead row = nodes.row() thead += row for name in ["Document"] + list(_key2header.values()): row.append(nodes.entry("", nodes.paragraph(text=name))) # body tbody = nodes.tbody() tgroup += tbody for docname in sorted(metadata): data = metadata[docname].get("exec_data") if not data: continue row = nodes.row() tbody += row # document name doclink = pending_xref( refdoc=parent_docname, reftarget=posixpath.relpath(docname, posixpath.dirname(parent_docname)), reftype="doc", refdomain="std", refexplicit=True, refwarn=True, classes=["xref", "doc"], ) doclink += nodes.inline(text=docname) paragraph = nodes.paragraph() paragraph += doclink row.append(nodes.entry("", paragraph)) # other rows for name in _key2header.keys(): paragraph = nodes.paragraph() if name == "succeeded" and data[name] is False: paragraph += nodes.abbreviation( text=_key2transform[name](data[name]), explanation=(data["error"] or ""), ) else: paragraph += nodes.Text(_key2transform[name](data[name])) row.append(nodes.entry("", paragraph)) return table
def process_coverity_nodes(self, app, doctree, fromdocname): """ This function should be triggered upon ``doctree-resolved event`` Obtain information from Coverity server and generate a table. """ env = app.builder.env if self.coverity_login_error: # Create failed topnode for node in doctree.traverse(CoverityDefect): top_node = create_top_node( "Failed to connect to Coverity Server") node.replace_self(top_node) report_warning( env, 'Connection failed: %s' % self.coverity_login_error_msg, fromdocname) return # Item matrix: # Create table with related items, printing their target references. # Only source and target items matching respective regexp shall be included for node in doctree.traverse(CoverityDefect): top_node = create_top_node(node['title']) table = nodes.table() tgroup = nodes.tgroup() for c in node['col']: tgroup += [nodes.colspec(colwidth=5)] tgroup += nodes.thead('', create_row(node['col'])) tbody = nodes.tbody() tgroup += tbody table += tgroup # Setup counters # count_total = 0 # count_covered = 0 # Get items from server report_info(env, 'obtaining defects... ', True) try: defects = self.coverity_service.get_defects( self.project_name, app.config.coverity_credentials['stream'], # noqa: E501 checker=node['checker'], impact=node['impact'], kind=node['kind'], # noqa: E501 classification=node['classification'], action=node['action'], # noqa: E501 component=node['component'], cwe=node['cwe'], cid=node['cid']) # noqa: E501 except (URLError, AttributeError, Exception) as e: report_warning(env, 'failed with %s' % e, fromdocname) continue report_info(env, "%d received" % (defects['totalNumberOfRecords'])) report_info(env, "building defects table... ", True) try: for defect in defects['mergedDefects']: row = nodes.row() # go through each col and decide if it is there or we print empty for item_col in node['col']: if 'CID' == item_col: # CID is default and even if it is in disregard row += create_cell( str(defect['cid']), url=self.coverity_service.get_defect_url( app.config.coverity_credentials[ 'stream'], # noqa: E501 str(defect['cid']))) elif 'Category' == item_col: row += create_cell(defect['displayCategory']) elif 'Impact' == item_col: row += create_cell(defect['displayImpact']) elif 'Issue' == item_col: row += create_cell(defect['displayIssueKind']) elif 'Type' == item_col: row += create_cell(defect['displayType']) elif 'Checker' == item_col: row += create_cell(defect['checkerName']) elif 'Component' == item_col: row += create_cell(defect['componentName']) elif 'Comment' == item_col: row += cov_attribute_value_to_col( defect, 'Comment') elif 'Classification' == item_col: row += cov_attribute_value_to_col( defect, 'Classification') elif 'Action' == item_col: row += cov_attribute_value_to_col(defect, 'Action') elif 'Status' == item_col: row += cov_attribute_value_to_col( defect, 'DefectStatus') else: # generic check which if it is missing prints empty cell anyway row += cov_attribute_value_to_col(defect, item_col) tbody += row report_info(env, "done") top_node += table except AttributeError as e: report_info( env, 'No issues matching your query or empty stream. %s' % e) top_node += table top_node += nodes.paragraph( text='No issues matching your query or empty stream') node.replace_self(top_node)
def process_needfilters(app, doctree, fromdocname): # Replace all needlist nodes with a list of the collected needs. # Augment each need with a backlink to the original location. env = app.builder.env # NEEDFILTER for node in doctree.traverse(Needfilter): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ("ids", "names", "classes", "dupnames"): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needfilter = env.need_all_needfilters[id] all_needs = env.needs_all_needs if current_needfilter["layout"] == "list": content = [] elif current_needfilter["layout"] == "diagram": content = [] try: if "sphinxcontrib.plantuml" not in app.config.extensions: raise ImportError from sphinxcontrib.plantuml import plantuml except ImportError: content = nodes.error() para = nodes.paragraph() text = nodes.Text("PlantUML is not available!", "PlantUML is not available!") para += text content.append(para) node.replace_self(content) continue plantuml_block_text = ".. plantuml::\n" "\n" " @startuml" " @enduml" puml_node = plantuml(plantuml_block_text) puml_node["uml"] = "@startuml\n" puml_connections = "" elif current_needfilter["layout"] == "table": content = nodes.table() tgroup = nodes.tgroup() id_colspec = nodes.colspec(colwidth=5) title_colspec = nodes.colspec(colwidth=15) type_colspec = nodes.colspec(colwidth=5) status_colspec = nodes.colspec(colwidth=5) links_colspec = nodes.colspec(colwidth=5) tags_colspec = nodes.colspec(colwidth=5) tgroup += [id_colspec, title_colspec, type_colspec, status_colspec, links_colspec, tags_colspec] tgroup += nodes.thead( "", nodes.row( "", nodes.entry("", nodes.paragraph("", "ID")), nodes.entry("", nodes.paragraph("", "Title")), nodes.entry("", nodes.paragraph("", "Type")), nodes.entry("", nodes.paragraph("", "Status")), nodes.entry("", nodes.paragraph("", "Links")), nodes.entry("", nodes.paragraph("", "Tags")), ), ) tbody = nodes.tbody() tgroup += tbody content += tgroup all_needs = list(all_needs.values()) found_needs = process_filters(app, all_needs, current_needfilter) line_block = nodes.line_block() for need_info in found_needs: if current_needfilter["layout"] == "list": para = nodes.line() description = "{}: {}".format(need_info["id"], need_info["title"]) if current_needfilter["show_status"] and need_info["status"]: description += " (%s)" % need_info["status"] if current_needfilter["show_tags"] and need_info["tags"]: description += " [%s]" % "; ".join(need_info["tags"]) title = nodes.Text(description, description) # Create a reference if need_info["hide"]: para += title else: ref = nodes.reference("", "") ref["refdocname"] = need_info["docname"] ref["refuri"] = app.builder.get_relative_uri(fromdocname, need_info["docname"]) ref["refuri"] += "#" + need_info["target_node"]["refid"] ref.append(title) para += ref line_block.append(para) elif current_needfilter["layout"] == "table": row = nodes.row() row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "id", make_ref=True) row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "title") row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "type_name") row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "status") row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "links", ref_lookup=True) row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "tags") tbody += row elif current_needfilter["layout"] == "diagram": # Link calculation # All links we can get from docutils functions will be relative. # But the generated link in the svg will be relative to the svg-file location # (e.g. server.com/docs/_images/sqwxo499cnq329439dfjne.svg) # and not to current documentation. Therefore we need to add ../ to get out of the image folder. try: link = ( "../" + app.builder.get_target_uri(need_info["docname"]) + "?highlight={}".format(urlparse(need_info["title"])) + "#" + need_info["target_node"]["refid"] ) # Gets mostly called during latex generation except NoUri: link = "" diagram_template = Template(env.config.needs_diagram_template) node_text = diagram_template.render(**need_info) puml_node["uml"] += '{style} "{node_text}" as {id} [[{link}]] {color}\n'.format( id=need_info["id"], node_text=node_text, link=link, color=need_info["type_color"], style=need_info["type_style"], ) for link in need_info["links"]: puml_connections += "{id} --> {link}\n".format(id=need_info["id"], link=link) if current_needfilter["layout"] == "list": content.append(line_block) if current_needfilter["layout"] == "diagram": puml_node["uml"] += puml_connections # Create a legend if current_needfilter["show_legend"]: puml_node["uml"] += create_legend(app.config.needs_types) puml_node["uml"] += "@enduml" puml_node["incdir"] = os.path.dirname(current_needfilter["docname"]) puml_node["filename"] = os.path.split(current_needfilter["docname"])[1] # Needed for plantuml >= 0.9 content.append(puml_node) if len(content) == 0: nothing_found = "No needs passed the filters" para = nodes.line() nothing_found_node = nodes.Text(nothing_found, nothing_found) para += nothing_found_node content.append(para) if current_needfilter["show_filters"]: para = nodes.paragraph() filter_text = "Used filter:" filter_text += ( " status(%s)" % " OR ".join(current_needfilter["status"]) if len(current_needfilter["status"]) > 0 else "" ) if len(current_needfilter["status"]) > 0 and len(current_needfilter["tags"]) > 0: filter_text += " AND " filter_text += ( " tags(%s)" % " OR ".join(current_needfilter["tags"]) if len(current_needfilter["tags"]) > 0 else "" ) if (len(current_needfilter["status"]) > 0 or len(current_needfilter["tags"]) > 0) and len( current_needfilter["types"] ) > 0: filter_text += " AND " filter_text += ( " types(%s)" % " OR ".join(current_needfilter["types"]) if len(current_needfilter["types"]) > 0 else "" ) filter_node = nodes.emphasis(filter_text, filter_text) para += filter_node content.append(para) node.replace_self(content)
def process_needtables(app, doctree, fromdocname): """ Replace all needtables nodes with a tale of filtered noded. :param app: :param doctree: :param fromdocname: :return: """ env = app.builder.env for node in doctree.traverse(Needtable): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ('ids', 'names', 'classes', 'dupnames'): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needtable = env.need_all_needtables[id] all_needs = env.needs_all_needs if current_needtable["style"] == "" or current_needtable[ "style"].upper() not in ["TABLE", "DATATABLES"]: if app.config.needs_table_style == "": style = "DATATABLES" else: style = app.config.needs_table_style.upper() else: style = current_needtable["style"].upper() # Prepare table classes = ["NEEDS_{style}".format(style=style)] content = nodes.table(classes=classes) tgroup = nodes.tgroup() # Define Table column width # ToDo: Find a way to chosen to perfect width automatically. for col in current_needtable["columns"]: if col == "TITLE": tgroup += nodes.colspec(colwidth=15) else: tgroup += nodes.colspec(colwidth=5) node_columns = [] for col in current_needtable["columns"]: header_name = col.title() if col != "ID" else col header_name = header_name.replace("_", " ") node_columns.append( nodes.entry('', nodes.paragraph('', header_name))) tgroup += nodes.thead('', nodes.row('', *node_columns)) tbody = nodes.tbody() tgroup += tbody content += tgroup all_needs = list(all_needs.values()) if current_needtable["sort_by"] is not None: if current_needtable["sort_by"] == "id": all_needs = sorted(all_needs, key=lambda node: node["id"]) elif current_needtable["sort_by"] == "status": all_needs = sorted(all_needs, key=status_sorter) # Perform filtering of needs found_needs = procces_filters(all_needs, current_needtable) for need_info in found_needs: temp_need = need_info.copy() if temp_need['is_need']: row = nodes.row(classes=['need']) prefix = '' else: row = nodes.row(classes=['need_part']) temp_need['id'] = temp_need['id_complete'] prefix = app.config.needs_part_prefix temp_need['title'] = temp_need['content'] for col in current_needtable["columns"]: if col == "ID": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "id", make_ref=True, prefix=prefix) elif col == "TITLE": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "title", prefix=app.config.needs_part_prefix) elif col == "INCOMING": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "links_back", ref_lookup=True) elif col == "OUTGOING": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "links", ref_lookup=True) else: row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, col.lower()) tbody += row # Need part rows if current_needtable["show_parts"] and need_info['is_need']: for key, part in need_info["parts"].items(): row = nodes.row(classes=['need_part']) temp_part = part.copy( ) # The dict needs to be manipulated, so that row_col_maker() can be used temp_part['docname'] = need_info['docname'] for col in current_needtable["columns"]: if col == "ID": temp_part['id'] = '.'.join( [need_info['id'], part['id']]) row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, "id", make_ref=True, prefix=app.config.needs_part_prefix) elif col == "TITLE": row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, "content", prefix=app.config.needs_part_prefix) elif col == "INCOMING": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_part, "links_back", ref_lookup=True) else: row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_part, col.lower()) tbody += row if len(found_needs) == 0: content.append(no_needs_found_paragraph()) # add filter information to output if current_needtable["show_filters"]: content.append(used_filter_paragraph(current_needtable)) node.replace_self(content)
def process_req_nodes(app, doctree, fromdocname): for node in doctree.traverse(req): if not app.config.req_include_reqs: node.parent.remove(node) # Replace all reqtrace nodes with a list of the collected reqs. # Augment each req with a backlink to the original location. env = app.builder.env for node in doctree.traverse(reqtrace): if not app.config.req_include_reqs: node.replace_self([]) continue content = [] reqtable = nodes.table(classes=['reqtable']) tgroup = nodes.tgroup(cols=3) reqtable += tgroup tgroup += nodes.colspec(colwidth=15, classes=['reqid']) tgroup += nodes.colspec(colwidth=15, classes=['section']) tgroup += nodes.colspec(colwidth=70, classes=['evidence']) thead = nodes.thead() tgroup += thead append_row(thead, ['Req', 'Section', 'Evidence']) tbody = nodes.tbody() tgroup += tbody sorted_req = sorted(env.req_all_reqs, key=lambda req: req['reqid']) for req_info in sorted_req: refpara = nodes.paragraph() refpara += nodes.Text("", "") # Create a reference try: newnode = nodes.reference('', '') #pprint(req_info['reqid']) section = req_info['section'] section_name = '' if section.get('secnumber'): section_name += (('%s' + self.secnumber_suffix) % '.'.join(map(str, node['secnumber']))) section_name += section[section.first_child_matching_class( title)][0].astext() pprint(section_name) innernode = nodes.emphasis(section_name, section_name) newnode['refdocname'] = req_info['docname'] newnode['refuri'] = app.builder.get_relative_uri( fromdocname, req_info['docname']) newnode['refuri'] += '#' + req_info['target']['refid'] newnode.append(innernode) refpara += newnode refpara += nodes.Text('', '') except: continue append_row(tbody, [req_info['reqid'], refpara, req_info['evidence']]) content.append(reqtable) node.replace_self(content)
def run(self): filename = self.arguments[0] current = self.arguments[1] == 'current' document = self.state.document env = document.settings.env rel_filename, filename = env.relfn2path(filename) env.note_dependency(filename) try: with open(filename, 'r') as fp: releases = yaml.safe_load(fp) releases = releases["releases"] except Exception as e: return [ document.reporter.warning( "Failed to open Ceph releases file {}: {}".format( filename, e), line=self.lineno) ] table = nodes.table() tgroup = nodes.tgroup(cols=3) table += tgroup tgroup.extend( nodes.colspec(colwidth=30, colname='c' + str(idx)) for idx, _ in enumerate(range(4))) thead = nodes.thead() tgroup += thead row_node = nodes.row() thead += row_node row_node.extend( nodes.entry(h, nodes.paragraph(text=h)) for h in [ "Name", "Initial release", "Latest", "End of life (estimated)" if current else "End of life" ]) releases = releases.items() releases = sorted(releases, key=lambda t: t[0], reverse=True) tbody = nodes.tbody() tgroup += tbody rows = [] for code_name, info in releases: actual_eol = info.get("actual_eol", None) if current: if actual_eol and actual_eol <= datetime.datetime.now().date(): continue else: if not actual_eol: continue trow = nodes.row() entry = nodes.entry() para = nodes.paragraph( text=f"`{code_name.title()} <{code_name}>`_") sphinx.util.nodes.nested_parse_with_titles(self.state, para, entry) #entry += para trow += entry sorted_releases = sorted( info["releases"], key=lambda t: [t["released"]] + list( map(lambda v: int(v), t["version"].split(".")))) oldest_release = sorted_releases[0] newest_release = sorted_releases[-1] entry = nodes.entry() para = nodes.paragraph( text="{}".format(oldest_release["released"])) entry += para trow += entry entry = nodes.entry() if newest_release.get("skip_ref", False): para = nodes.paragraph( text="{}".format(newest_release["version"])) else: para = nodes.paragraph( text="`{}`_".format(newest_release["version"])) sphinx.util.nodes.nested_parse_with_titles(self.state, para, entry) #entry += para trow += entry entry = nodes.entry() if current: para = nodes.paragraph(text=info.get("target_eol", '--')) else: para = nodes.paragraph(text=info.get('actual_eol', '--')) entry += para trow += entry rows.append(trow) tbody.extend(rows) return [table]
def _build_markup(self, notifications): content = [] cols = ['Event type', 'Notification class', 'Payload class', 'Sample'] table = nodes.table() content.append(table) group = nodes.tgroup(cols=len(cols)) table.append(group) head = nodes.thead() group.append(head) for _ in cols: group.append(nodes.colspec(colwidth=1)) body = nodes.tbody() group.append(body) # fill the table header row = nodes.row() body.append(row) for col_name in cols: col = nodes.entry() row.append(col) text = nodes.strong(text=col_name) col.append(text) # fill the table content, one notification per row for name, payload, sample_file in notifications: event_type = sample_file[0:-5].replace('-', '.') row = nodes.row() body.append(row) col = nodes.entry() row.append(col) text = nodes.literal(text=event_type) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=name) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=payload) col.append(text) col = nodes.entry() row.append(col) with open(self.SAMPLE_ROOT + sample_file, 'r') as f: sample_content = f.read() event_type = sample_file[0:-5] html_str = self.TOGGLE_SCRIPT % ((event_type, ) * 3) html_str += ("<input type='button' id='%s-hideshow' " "value='hide/show sample'>" % event_type) html_str += ("<div id='%s-div'><pre>%s</pre></div>" % (event_type, sample_content)) raw = nodes.raw('', html_str, format="html") col.append(raw) return content
def process_item_nodes(app, doctree, fromdocname): """ This function should be triggered upon ``doctree-resolved event`` Replace all ItemList nodes with a list of the collected items. Augment each item with a backlink to the original location. """ env = app.builder.env if sphinx_version < '1.6.0': try: env.traceability_collection.self_test(fromdocname) except TraceabilityException as err: report_warning(env, err, fromdocname) except MultipleTraceabilityExceptions as errs: for err in errs.iter(): report_warning(env, err, err.get_document()) # Item matrix: # Create table with related items, printing their target references. # Only source and target items matching respective regexp shall be included for node in doctree.traverse(ItemMatrix): showcaptions = not node['nocaptions'] source_ids = env.traceability_collection.get_items(node['source']) target_ids = env.traceability_collection.get_items(node['target']) top_node = create_top_node(node['title']) table = nodes.table() tgroup = nodes.tgroup() left_colspec = nodes.colspec(colwidth=5) right_colspec = nodes.colspec(colwidth=5) tgroup += [left_colspec, right_colspec] tgroup += nodes.thead('', nodes.row( '', nodes.entry('', nodes.paragraph('', node['sourcetitle'])), nodes.entry('', nodes.paragraph('', node['targettitle'])))) tbody = nodes.tbody() tgroup += tbody table += tgroup relationships = node['type'] if not relationships: relationships = env.traceability_collection.iter_relations() count_total = 0 count_covered = 0 for source_id in source_ids: source_item = env.traceability_collection.get_item(source_id) count_total += 1 covered = False row = nodes.row() left = nodes.entry() left += make_internal_item_ref(app, node, fromdocname, source_id, showcaptions) right = nodes.entry() for relationship in relationships: if REGEXP_EXTERNAL_RELATIONSHIP.search(relationship): for target_id in source_item.iter_targets(relationship): right += make_external_item_ref(app, target_id, relationship) covered = True for target_id in target_ids: if env.traceability_collection.are_related(source_id, relationships, target_id): right += make_internal_item_ref(app, node, fromdocname, target_id, showcaptions) covered = True if covered: count_covered += 1 row += left row += right tbody += row try: percentage = int(100 * count_covered / count_total) except ZeroDivisionError: percentage = 0 disp = 'Statistics: {cover} out of {total} covered: {pct}%'.format(cover=count_covered, total=count_total, pct=percentage) if node['stats']: p_node = nodes.paragraph() txt = nodes.Text(disp) p_node += txt top_node += p_node top_node += table node.replace_self(top_node) # Item 2D matrix: # Create table with related items, printing their target references. # Only source and target items matching respective regexp shall be included for node in doctree.traverse(Item2DMatrix): source_ids = env.traceability_collection.get_items(node['source']) target_ids = env.traceability_collection.get_items(node['target']) top_node = create_top_node(node['title']) table = nodes.table() tgroup = nodes.tgroup() colspecs = [nodes.colspec(colwidth=5)] hrow = nodes.row('', nodes.entry('', nodes.paragraph('', ''))) for source_id in source_ids: colspecs.append(nodes.colspec(colwidth=5)) src_cell = make_internal_item_ref(app, node, fromdocname, source_id, False) hrow.append(nodes.entry('', src_cell)) tgroup += colspecs tgroup += nodes.thead('', hrow) tbody = nodes.tbody() for target_id in target_ids: row = nodes.row() tgt_cell = nodes.entry() tgt_cell += make_internal_item_ref(app, node, fromdocname, target_id, False) row += tgt_cell for source_id in source_ids: cell = nodes.entry() p_node = nodes.paragraph() if env.traceability_collection.are_related(source_id, node['type'], target_id): txt = node['hit'] else: txt = node['miss'] p_node += nodes.Text(txt) cell += p_node row += cell tbody += row tgroup += tbody table += tgroup top_node += table node.replace_self(top_node) # Item list: # Create list with target references. Only items matching list regexp # shall be included for node in doctree.traverse(ItemList): item_ids = env.traceability_collection.get_items(node['filter'], node['attributes']) showcaptions = not node['nocaptions'] top_node = create_top_node(node['title']) ul_node = nodes.bullet_list() for i in item_ids: bullet_list_item = nodes.list_item() p_node = nodes.paragraph() p_node.append(make_internal_item_ref(app, node, fromdocname, i, showcaptions)) bullet_list_item.append(p_node) ul_node.append(bullet_list_item) top_node += ul_node node.replace_self(top_node) # Item tree: # Create list with target references. Only items matching list regexp # shall be included for node in doctree.traverse(ItemTree): top_item_ids = env.traceability_collection.get_items(node['top']) showcaptions = not node['nocaptions'] top_node = create_top_node(node['title']) ul_node = nodes.bullet_list() ul_node.set_class('bonsai') for i in top_item_ids: if is_item_top_level(env, i, node['top'], node['top_relation_filter']): ul_node.append(generate_bullet_list_tree(app, env, node, fromdocname, i, showcaptions)) top_node += ul_node node.replace_self(top_node) # Resolve item cross references (from ``item`` role) for node in doctree.traverse(PendingItemXref): # Create a dummy reference to be used if target reference fails new_node = make_refnode(app.builder, fromdocname, fromdocname, 'ITEM_NOT_FOUND', node[0].deepcopy(), node['reftarget'] + '??') # If target exists, try to create the reference item_info = env.traceability_collection.get_item(node['reftarget']) if item_info: if item_info.is_placeholder(): docname, lineno = get_source_line(node) report_warning(env, 'Traceability: cannot link to %s, item is not defined' % item_info.get_id(), docname, lineno) else: try: new_node = make_refnode(app.builder, fromdocname, item_info.docname, item_info.node['refid'], node[0].deepcopy(), node['reftarget']) except NoUri: # ignore if no URI can be determined, e.g. for LaTeX output :( pass else: docname, lineno = get_source_line(node) report_warning(env, 'Traceability: item %s not found' % node['reftarget'], docname, lineno) node.replace_self(new_node) # Item: replace item nodes, with admonition, list of relationships for node in doctree.traverse(Item): currentitem = env.traceability_collection.get_item(node['id']) showcaptions = not node['nocaptions'] header = currentitem.get_id() if currentitem.caption: header += ' : ' + currentitem.caption top_node = create_top_node(header) if app.config.traceability_render_relationship_per_item: par_node = nodes.paragraph() dl_node = nodes.definition_list() if currentitem.iter_attributes(): li_node = nodes.definition_list_item() dt_node = nodes.term() txt = nodes.Text('Attributes') dt_node.append(txt) li_node.append(dt_node) for attr in currentitem.iter_attributes(): dd_node = nodes.definition() p_node = nodes.paragraph() txt = nodes.Text('{attr}: {value}'.format(attr=attr, value=currentitem.get_attribute(attr))) p_node.append(txt) dd_node.append(p_node) li_node.append(dd_node) dl_node.append(li_node) for rel in env.traceability_collection.iter_relations(): tgts = currentitem.iter_targets(rel) if tgts: li_node = nodes.definition_list_item() dt_node = nodes.term() if rel in app.config.traceability_relationship_to_string: relstr = app.config.traceability_relationship_to_string[rel] else: continue txt = nodes.Text(relstr) dt_node.append(txt) li_node.append(dt_node) for tgt in tgts: dd_node = nodes.definition() p_node = nodes.paragraph() if REGEXP_EXTERNAL_RELATIONSHIP.search(rel): link = make_external_item_ref(app, tgt, rel) else: link = make_internal_item_ref(app, node, fromdocname, tgt, showcaptions) p_node.append(link) dd_node.append(p_node) li_node.append(dd_node) dl_node.append(li_node) par_node.append(dl_node) top_node.append(par_node) # Note: content should be displayed during read of RST file, as it contains other RST objects node.replace_self(top_node)
def _build_grade_table(self, matrix, content): summarytitle = nodes.subtitle(text="Backends - Summary") summary = nodes.table() summary.set_class("table") summary.set_class("table-condensed") cols = len(list(six.iterkeys(matrix.backends))) cols += 2 summarygroup = nodes.tgroup(cols=cols) summarybody = nodes.tbody() summaryhead = nodes.thead() for i in range(cols): summarygroup.append(nodes.colspec(colwidth=1)) summarygroup.append(summaryhead) summarygroup.append(summarybody) summary.append(summarygroup) content.append(summarytitle) content.append(summary) header = nodes.row() blank = nodes.entry() blank.append(nodes.strong(text="Backend")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="Status")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="Type")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="In Tree")) header.append(blank) blank = nodes.entry() blank.append(nodes.strong(text="Notes")) header.append(blank) summaryhead.append(header) grades = matrix.grades impls = list(six.iterkeys(matrix.backends)) impls.sort() for grade in grades: for backend in impls: if matrix.backends[backend].status == grade.key: item = nodes.row() namecol = nodes.entry() namecol.append( nodes.paragraph(text=matrix.backends[backend].title)) item.append(namecol) statuscol = nodes.entry() class_name = "label-%s" % grade.css_class status_text = nodes.paragraph(text=grade.title) status_text.set_class(class_name) status_text.set_class("label") statuscol.append(status_text) item.append(statuscol) typecol = nodes.entry() type_text = nodes.paragraph( text=matrix.backends[backend].type) type_text.set_class("label") type_text.set_class("label-info") typecol.append(type_text) item.append(typecol) if bool(matrix.backends[backend].in_tree): status = u"\u2714" intree = nodes.paragraph(text=status) intree.set_class("label") intree.set_class("label-success") else: status = u"\u2716" intree = nodes.paragraph(text=status) intree.set_class("label") intree.set_class("label-danger") intreecol = nodes.entry() intreecol.append(intree) item.append(intreecol) notescol = nodes.entry() notescol.append( nodes.paragraph(text=matrix.backends[backend].notes)) item.append(notescol) summarybody.append(item) return content
def make_pcap_table(self, sequence, sequence_dir): table_node = table_plot_node() # find the inputs that change input_changes = [] data_header = [] for ts, inputs, outputs in timing_entries(sequence, sequence_dir): for name in inputs: if "." in name: input_changes.append(name) elif name == "START_WRITE": data_header = [] elif name == "WRITE": if "x" in inputs[name]: hdr_name = "0x%X" % int(inputs[name], 16) else: hdr_name = "0x%X" % int(inputs[name], 0) data_header.append(hdr_name) if not data_header: return table_node table_hdr = ["Row"] # This contains instructions about how to process each data entry # - None: Just emit it # - str name: It is the higher order bits of a given name # - [int shift]: For each shifted value, emit the relevant bit entry bit_extracts = [] for name in data_header: if name.startswith("BITS"): # Add relevant bit entries quadrant = int(name[4]) shifts = [] bit_extracts.append(shifts) for bus_name in input_changes: r = range(quadrant * 32, (quadrant + 1) * 32) idx = cparser.bit_bus.get(bus_name, None) if idx in r and bus_name not in table_hdr: table_hdr.append(bus_name) shifts.append(idx - quadrant * 32) elif name.endswith("_H"): # This is the higher order entry bit_extracts.append(name[:-2]) else: # Add pos entry bit_extracts.append(None) table_hdr.append(name) # Create a table table = nodes.table() table_node += table tgroup = nodes.tgroup(cols=len(table_hdr)) table += tgroup for col_width in [len(x) for x in table_hdr]: tgroup += nodes.colspec(colwidth=col_width) # add the header thead = nodes.thead() tgroup += thead thead += self.make_row(table_hdr) # add the body tbody = nodes.tbody() tgroup += tbody # Add each row r = 0 row = [r] high = {} i = 0 for ts, inputs, outputs in timing_entries(sequence, sequence_dir): for names in outputs: if names == "DATA": if "x" in outputs["DATA"]: data = int(outputs["DATA"], 16) else: data = int(outputs["DATA"], 0) if data is not None: extract = bit_extracts[i] if type(extract) == list: for shift in extract: row.append((data >> shift) & 1) elif type(extract) == str: high[extract] = data else: row.append(data) i += 1 if i >= len(bit_extracts): for name, val in high.items(): idx = [ ix for ix, x in enumerate(table_hdr) if x == name ][0] row[idx] += val << 32 tbody += self.make_row(row) r += 1 row = [r] high = {} i = 0 return table_node
def _build_backend_detail_table(self, backend, matrix): table = nodes.table() table.set_class("table") table.set_class("table-condensed") tgroup = nodes.tgroup(cols=2) tbody = nodes.tbody() for i in range(2): tgroup.append(nodes.colspec(colwidth=1)) tgroup.append(tbody) table.append(tgroup) graderow = nodes.row() gradetitle = nodes.entry() gradetitle.append(nodes.strong(text="Grade")) gradetext = nodes.entry() class_name = "label-%s" % matrix.grade_classes[backend.status] status_text = nodes.paragraph(text=matrix.grade_names[backend.status]) status_text.set_class(class_name) status_text.set_class("label") gradetext.append(status_text) graderow.append(gradetitle) graderow.append(gradetext) tbody.append(graderow) treerow = nodes.row() treetitle = nodes.entry() treetitle.append(nodes.strong(text="In Tree")) if bool(backend.in_tree): status = u"\u2714" intree = nodes.paragraph(text=status) intree.set_class("label") intree.set_class("label-success") else: status = u"\u2716" intree = nodes.paragraph(text=status) intree.set_class("label") intree.set_class("label-danger") status = u"\u2714" treetext = nodes.entry() treetext.append(intree) treerow.append(treetitle) treerow.append(treetext) tbody.append(treerow) maintrow = nodes.row() mainttitle = nodes.entry() mainttitle.append(nodes.strong(text="Maintainers")) mainttext = nodes.entry() mainttext.append(nodes.paragraph(text=backend.maintainers)) maintrow.append(mainttitle) maintrow.append(mainttext) tbody.append(maintrow) reporow = nodes.row() repotitle = nodes.entry() repotitle.append(nodes.strong(text="Repository")) repotext = nodes.entry() repotext.append(nodes.paragraph(text=backend.repository)) reporow.append(repotitle) reporow.append(repotext) tbody.append(reporow) noterow = nodes.row() notetitle = nodes.entry() notetitle.append(nodes.strong(text="Notes")) notetext = nodes.entry() notetext.append(nodes.paragraph(text=backend.notes)) noterow.append(notetitle) noterow.append(notetext) tbody.append(noterow) return table
def process_coverage(app, fromdocname, doccmd, xmlcmd, cli_type): coverage_list = {} int_docs = 0 int_xml = 0 for cmd in doccmd: coverage_item = { 'doccmd': None, 'xmlcmd': None, 'doccmd_item': None, 'xmlcmd_item': None, 'indocs': False, 'inxml': False, 'xmlfilename': None } coverage_item['doccmd'] = cmd['cmd'] coverage_item['doccmd_item'] = cmd coverage_item['indocs'] = True int_docs += 1 coverage_list[strip_cmd(cmd['cmd'])] = dict(coverage_item) #print(coverage_list.keys()) for cmd in xmlcmd: strip = strip_cmd(cmd['cmd']) if strip not in coverage_list.keys(): coverage_item = { 'doccmd': None, 'xmlcmd': None, 'doccmd_item': None, 'xmlcmd_item': None, 'indocs': False, 'inxml': False, 'xmlfilename': None } coverage_item['xmlcmd'] = cmd['cmd'] coverage_item['xmlcmd_item'] = cmd coverage_item['inxml'] = True coverage_item['xmlfilename'] = cmd['filename'] int_xml += 1 coverage_list[strip] = dict(coverage_item) else: coverage_list[strip]['xmlcmd'] = cmd['cmd'] coverage_list[strip]['xmlcmd_item'] = cmd coverage_list[strip]['inxml'] = True coverage_list[strip]['xmlfilename'] = cmd['filename'] int_xml += 1 table = nodes.table() tgroup = nodes.tgroup(cols=3) table += tgroup header = (f'{int_docs}/{len(coverage_list)} in Docs', f'{int_xml}/{len(coverage_list)} in XML', 'Command') colwidths = (1, 1, 8) table = nodes.table() tgroup = nodes.tgroup(cols=len(header)) table += tgroup for colwidth in colwidths: tgroup += nodes.colspec(colwidth=colwidth) thead = nodes.thead() tgroup += thead thead += build_row(app, fromdocname, header) tbody = nodes.tbody() tgroup += tbody for entry in sorted(coverage_list): body_text_list = [] if coverage_list[entry]['indocs']: body_text_list.append(coverage_list[entry]['doccmd_item']) else: body_text_list.append('Not documented yet') if coverage_list[entry]['inxml']: body_text_list.append("------------------") body_text_list.append( str(coverage_list[entry]['xmlfilename']) + ":") body_text_list.append(coverage_list[entry]['xmlcmd']) else: body_text_list.append('Nothing found in XML Definitions') tbody += build_row(app, fromdocname, (coverage_list[entry]['indocs'], coverage_list[entry]['inxml'], body_text_list)) return table
def run(self): cls_ep = self.options.get('class') cls = load_entrypoint(cls_ep) if cls_ep else None enum = load_entrypoint(self.arguments[0]) properties = {} if cls is not None: for name, value in vars(cls).items(): if isinstance(value, property): try: item = value._enum_item except AttributeError: pass else: if isinstance(item, enum): properties[item] = name colwidths = [15, 15, 5, 65] if cls else [15, 5, 75] ncols = len(colwidths) table = nodes.table() tgroup = nodes.tgroup(cols=ncols) table += tgroup for width in colwidths: tgroup += nodes.colspec(colwidth=width) thead = nodes.thead() tgroup += thead tbody = nodes.tbody() tgroup += tbody def makerow(*texts): row = nodes.row() for text in texts: if text is None: continue row += nodes.entry('', nodes.paragraph('', str(text))) return row thead += makerow( '{} Attribute'.format(cls.__name__) if cls else None, '{} Name'.format(enum.__name__), 'Flag Value', 'Meaning in FFmpeg', ) seen = set() for name, item in enum._by_name.items(): if name.lower() in seen: continue seen.add(name.lower()) try: attr = properties[item] except KeyError: if cls: continue attr = None value = '0x{:X}'.format(item.value) doc = item.__doc__ or '-' tbody += makerow( attr, name, value, doc, ) return [table]
def process_audit_events(app, doctree, fromdocname): for node in doctree.traverse(audit_event_list): break else: return env = app.builder.env table = nodes.table(cols=3) group = nodes.tgroup( '', nodes.colspec(colwidth=30), nodes.colspec(colwidth=55), nodes.colspec(colwidth=15), cols=3, ) head = nodes.thead() body = nodes.tbody() table += group group += head group += body row = nodes.row() row += nodes.entry('', nodes.paragraph('', nodes.Text('Audit event'))) row += nodes.entry('', nodes.paragraph('', nodes.Text('Arguments'))) row += nodes.entry('', nodes.paragraph('', nodes.Text('References'))) head += row for name in sorted(getattr(env, "all_audit_events", ())): audit_event = env.all_audit_events[name] row = nodes.row() node = nodes.paragraph('', nodes.Text(name)) row += nodes.entry('', node) node = nodes.paragraph() for i, a in enumerate(audit_event['args']): if i: node += nodes.Text(", ") node += nodes.literal(a, nodes.Text(a)) row += nodes.entry('', node) node = nodes.paragraph() backlinks = enumerate(sorted(set(audit_event['source'])), start=1) for i, (doc, label) in backlinks: if isinstance(label, str): ref = nodes.reference("", nodes.Text("[{}]".format(i)), internal=True) try: ref['refuri'] = "{}#{}".format( app.builder.get_relative_uri(fromdocname, doc), label, ) except NoUri: continue node += ref row += nodes.entry('', node) body += row for node in doctree.traverse(audit_event_list): node.replace_self(table)
def envy_resolve(app, doctree, fromdocname): objects = app.env.domaindata['envy']['objects'] # add uplink info for holder in doctree.traverse(uplink_placeholder): obj = objects[holder.name] links = [] for sp, pos, name, variants in obj.uplinks: signode = addnodes.desc_signature('', '') signode['first'] = False signode += make_refnode(app.builder, fromdocname, sp.docname, sp.iname + '-' + sp.name, addnodes.desc_addname(sp.name, sp.name), sp.name) text = ' {}: {}'.format(pos, name) signode += addnodes.desc_name(text, text) if variants is not None: text = ' [{}]'.format(variants) signode += addnodes.desc_annotation(text, text) links.append(signode) holder.replace_self(links) # add subnode list for holder in doctree.traverse(sub_placeholder): obj = objects[holder.name] add_variant = False for pos, name, child, variants in obj.subs: if variants is not None: add_variant = True table = nodes.table() headers = [(1, 'Address'), (1, 'Name'), (10, 'Description')] if add_variant: headers.insert(1, (1, 'Variants')) tgroup = nodes.tgroup(cols=len(headers)) table += tgroup for colwidth, header in headers: tgroup += nodes.colspec(colwidth=colwidth) thead = nodes.thead() tgroup += thead headrow = nodes.row() for colwidth, header in headers: entry = nodes.entry() para = nodes.paragraph() entry += para para += nodes.Text(header, header) headrow += entry thead += headrow tbody = nodes.tbody() tgroup += tbody for pos, name, child, variants in obj.subs: row = nodes.row() row += wrap_text_entry(pos) if add_variant: row += wrap_text_entry('all' if variants is None else variants) row += wrap_text_entry(name) entry = nodes.entry() para = nodes.paragraph() entry += para para += make_refnode(app.builder, fromdocname, child.docname, child.iname + '-' + child.name, nodes.Text(child.brief, child.brief), child.brief) row += entry tbody += row holder.replace_self([table])
def get_autosummary(names, state, no_signatures=False): """ Generate a proper table node for autosummary:: directive. Parameters ---------- names : list of str Names of Python objects to be imported and added to the table. document : document Docutils document object """ document = state.document real_names = {} warnings = [] prefixes = [''] prefixes.insert(0, document.settings.env.ref_context.get('py:module')) table = nodes.table('') group = nodes.tgroup('', cols=2) table.append(group) group.append(nodes.colspec('', colwidth=30)) group.append(nodes.colspec('', colwidth=70)) body = nodes.tbody('') group.append(body) def append_row(*column_texts): row = nodes.row('') for text in column_texts: node = nodes.paragraph('') vl = ViewList() vl.append(text, '<autosummary>') state.nested_parse(vl, 0, node) row.append(nodes.entry('', node)) body.append(row) for name in names: try: obj, real_name = import_by_name(name, prefixes=prefixes) except ImportError: warnings.append( document.reporter.warning('failed to import %s' % name)) append_row(":obj:`%s`" % name, "") continue real_names[name] = real_name doc = get_doc_object(obj) if doc['Summary']: title = " ".join(doc['Summary']) else: title = "" col1 = ":obj:`%s <%s>`" % (name, real_name) if doc['Signature']: sig = re.sub('^[a-zA-Z_0-9.-]*', '', doc['Signature']) if '=' in sig: # abbreviate optional arguments sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1) sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1) sig = re.sub(r'=[^,)]+,', ',', sig) sig = re.sub(r'=[^,)]+\)$', '])', sig) # shorten long strings sig = re.sub(r'(\[.{16,16}[^,)]*?),.*?\]\)', r'\1, ...])', sig) else: sig = re.sub(r'(\(.{16,16}[^,)]*?),.*?\)', r'\1, ...)', sig) col1 += " " + sig col2 = title append_row(col1, col2) return table, warnings, real_names
def _build_summary(matrix, content): """Constructs the content for the summary of the support matrix. The summary consists of a giant table, with one row for each feature, and a column for each backend driver. It provides an 'at a glance' summary of the status of each driver. """ summary_title = nodes.subtitle(text="Summary") summary = nodes.table(classes=["sp_feature_cells"]) cols = len(matrix.drivers.keys()) # Add two columns for the Feature and Status columns. cols += 2 summary_group = nodes.tgroup(cols=cols) summary_body = nodes.tbody() summary_head = nodes.thead() for i in range(cols): summary_group.append(nodes.colspec(colwidth=1)) summary_group.append(summary_head) summary_group.append(summary_body) summary.append(summary_group) content.append(summary_title) content.append(summary) # This sets up all the column headers - two fixed # columns for feature name & status header = nodes.row() blank = nodes.entry(classes=["sp_feature_cells"]) blank.append(nodes.emphasis(text="Feature")) header.append(blank) blank = nodes.entry(classes=["sp_feature_cells"]) blank.append(nodes.emphasis(text="Status")) header.append(blank) summary_head.append(header) # then one column for each backend driver impls = sorted(matrix.drivers, key=lambda x: matrix.drivers[x].title) for key in impls: driver = matrix.drivers[key] implcol = nodes.entry(classes=["sp_feature_cells"]) header.append(implcol) if driver.link: uri = driver.link target_ref = nodes.reference("", refuri=uri) target_txt = nodes.inline() implcol.append(target_txt) target_txt.append(target_ref) target_ref.append(nodes.strong(text=driver.title)) else: implcol.append(nodes.strong(text=driver.title)) # We now produce the body of the table, one row for # each feature to report on for feature in matrix.features: item = nodes.row() # the hyperlink driver name linking to details feature_id = re.sub(KEY_PATTERN, "_", feature.key) # first the fixed columns for title/status key_col = nodes.entry(classes=["sp_feature_cells"]) item.append(key_col) key_ref = nodes.reference(refid=feature_id) key_txt = nodes.inline() key_col.append(key_txt) key_txt.append(key_ref) key_ref.append(nodes.strong(text=feature.title)) status_col = nodes.entry(classes=["sp_feature_cells"]) item.append(status_col) status_col.append(nodes.inline( text=feature.status, classes=["sp_feature_" + feature.status])) # and then one column for each backend driver for key in impls: impl = feature.implementations[key] impl_col = nodes.entry(classes=["sp_feature_cells"]) item.append(impl_col) key_id = re.sub(KEY_PATTERN, "_", "{}_{}".format(feature.key, key)) impl_ref = nodes.reference(refid=key_id) impl_txt = nodes.inline() impl_col.append(impl_txt) impl_txt.append(impl_ref) status = STATUS_SYMBOLS.get(impl.status, "") impl_ref.append(nodes.literal( text=status, classes=["sp_impl_summary", "sp_impl_" + impl.status])) summary_body.append(item)
def build_fields_table(self, fields, required_fields={}, show_requirement_labels=False): def get_type_name(field_type): # We may be dealing with a forward-declared class. if isinstance(field_type, basestring) and field_type is not str: field_type = self.get_resource_class(field_type) if type(field_type) is list: return [nodes.inline(text='List of ')] + \ get_type_name(field_type[0]) elif type(field_type) is tuple: value_nodes = [] for value in field_type: if value_nodes: value_nodes.append(nodes.inline(text=', ')) value_nodes.append(nodes.literal(text=value)) return [nodes.inline(text='One of ')] + value_nodes elif (inspect.isclass(field_type) and issubclass(field_type, WebAPIResource)): return [get_ref_to_resource(field_type, False)] elif field_type in self.type_mapping: return [nodes.inline(text=self.type_mapping[field_type])] else: print "Unknown type %s" % (field_type, ) assert False table = nodes.table(classes=['resource-fields']) tgroup = nodes.tgroup(cols=3) table += tgroup tgroup += nodes.colspec(colwidth=25, classes=['field']) tgroup += nodes.colspec(colwidth=15, classes=['type']) tgroup += nodes.colspec(colwidth=60, classes=['description']) thead = nodes.thead() tgroup += thead append_row(thead, ['Field', 'Type', 'Description']) tbody = nodes.tbody() tgroup += tbody if isinstance(fields, dict): for field in sorted(fields.iterkeys()): info = fields[field] name_node = nodes.inline() name_node += nodes.strong(text=field) if show_requirement_labels: if field in required_fields: name_node += nodes.inline(text=" (required)") else: name_node += nodes.inline(text=" (optional)") type_node = nodes.inline() type_node += get_type_name(info['type']) append_row(tbody, [ name_node, type_node, parse_text(self, info['description'], where='%s field description' % field) ]) else: for field in sorted(fields): name = field if show_requirement_labels: if field in required_fields: name += " (required)" else: name += " (optional)" append_row(tbody, [name, "", ""]) return table
def build_links_table(self, resource): is_list = 'is-list' in self.options table = nodes.table() tgroup = nodes.tgroup(cols=3) table += tgroup tgroup += nodes.colspec(colwidth=25) tgroup += nodes.colspec(colwidth=15) tgroup += nodes.colspec(colwidth=60) thead = nodes.thead() tgroup += thead append_row(thead, ['Name', 'Method', 'Resource']) tbody = nodes.tbody() tgroup += tbody request = DummyRequest() if is_list: child_resources = resource.list_child_resources else: child_resources = resource.item_child_resources names_to_resource = {} for child in child_resources: names_to_resource[child.name_plural] = (child, True) if not is_list and resource.model: child_keys = {} create_fake_resource_path(request, resource, child_keys, True) obj = resource.get_queryset(request, **child_keys)[0] else: obj = None related_links = resource.get_related_links(request=request, obj=obj) for key, info in related_links.iteritems(): names_to_resource[key] = \ (info['resource'], info.get('list-resource', False)) links = resource.get_links(child_resources, request=DummyRequest(), obj=obj) for linkname in sorted(links.iterkeys()): info = links[linkname] child, is_child_link = \ names_to_resource.get(linkname, (resource, is_list)) paragraph = nodes.paragraph() paragraph += get_ref_to_resource(child, is_child_link) append_row( tbody, [nodes.strong(text=linkname), info['method'], paragraph]) return table