def test_parse_list_items(get_list): p, el = get_list assert parse_list_items(p) == [] p.append(el) item1 = nodes.list_item() paragraph1 = nodes.paragraph() el.append(item1) with pytest.raises(ValueError): parse_list_items(p) item1.append(paragraph1) text1 = nodes.Text('test') paragraph1.append(text1) assert parse_list_items(p) == ['test'] item2 = nodes.list_item() paragraph2 = nodes.paragraph() text21 = nodes.Text('test2') text22 = nodes.Text('test3') el.append(item2) item2.append(paragraph2) paragraph2.append(text21) paragraph2.append(text22) assert parse_list_items(p) == ['test', 'test2 test3']
def run(self): l = nodes.bullet_list('ul-name') for line in self.content: #print line path, rest = line.split(' ', 1) line_text = rest text_nodes, messages = self.state.inline_text(line_text, self.lineno) line = nodes.line(line_text, '', *text_nodes) item = nodes.list_item(line_text, line) l.append(item) # Make the sub-list l2 = nodes.bullet_list('ul-name') for ext, title in [('.html', 'Normal text'), ('-big.html', 'Presentation mode'), #('-s5.html', 'Slide Show'), ]: line_text = '`%s <%s%s>`__'%(title.strip(), path, ext) #print line_text text_nodes, messages = self.state.inline_text(line_text, self.lineno) line = nodes.line(line_text, '', *text_nodes) #print type(line) item2 = nodes.list_item(line_text, line) l2.append(item2) item.append(l2) return [l, nodes.paragraph('hi')]
def _build_details(self, matrix, content): """Constructs the docutils content for the details of the support matrix. This is generated as a bullet list of features. Against each feature we provide the description of the feature and then the details of the hypervisor impls, with any driver specific notes that exist """ detailstitle = nodes.subtitle(text="Details") details = nodes.bullet_list() content.append(detailstitle) content.append(details) # One list entry for each feature we're reporting on for feature in matrix.features: item = nodes.list_item() status = feature.status if feature.group is not None: status += "(" + feature.group + ")" # The hypervisor target name linked from summary table id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # Highlight the feature title name item.append(nodes.strong(text=feature.title, ids=[id])) para = nodes.paragraph() para.append(nodes.strong(text="Status: " + status + ". ")) if feature.notes is not None: para.append(nodes.inline(text=feature.notes)) item.append(para) # A sub-list giving details of each hypervisor target impls = nodes.bullet_list() for key in feature.implementations: target = matrix.targets[key] impl = feature.implementations[key] subitem = nodes.list_item() id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) subitem += [ nodes.strong(text=target.title + ": "), nodes.literal(text=impl.status, classes=["sp_impl_" + impl.status], ids=[id]), ] if impl.notes is not None: subitem.append(nodes.paragraph(text=impl.notes)) impls.append(subitem) item.append(impls) details.append(item)
def visit_language_specific_pages_node_html(self, node): node["classes"] = ["tabbable"] ul = nodes.bullet_list() ul["classes"] = ["nav", "nav-tabs"] # set_source_info(self, ul) href = tab("", "Language-specific info:") href["classes"] = ["disabled"] paragraph = nodes.paragraph("", "") li = nodes.list_item("") li["classes"] = ["disabled"] paragraph.append(href) li.append(paragraph) ul.append(li) first = True for part in node.parts: href = tab(part.language, part.language) href["refuri"] = "#" + make_id(node, part.language) paragraph = nodes.paragraph("") li = nodes.list_item("") if first: li["classes"].append("active") paragraph.append(href) li.append(paragraph) ul.append(li) first = False node.append(ul) pages = section() pages["classes"] = ["tab-content"] first = True for part in node.parts: page = section() page["classes"] = ["tab-pane"] if first: page["classes"].append("active") page["ids"] = [make_id(node, part.language)] page.append(part.paragraph) pages.append(page) first = False node.append(pages) self.body.append(self.starttag(node, "div"))
def visit_language_specific_pages_node_html(self, node): node['classes'] = ['tabbable'] ul = nodes.bullet_list() ul['classes'] = ['nav', 'nav-tabs'] # set_source_info(self, ul) href = tab('', 'Ovation API:') href['classes'] = ['disabled'] paragraph = nodes.paragraph('', '') li = nodes.list_item('') li['classes'] = ['disabled'] paragraph.append(href) li.append(paragraph) ul.append(li) first = True for part in node.parts: href = tab(part.language, part.language) href['refuri'] = '#' + make_id(node, part.language) paragraph = nodes.paragraph('') li = nodes.list_item('') if first: li['classes'].append('active') paragraph.append(href) li.append(paragraph) ul.append(li) first = False node.append(ul) pages = nodes.admonition() pages['classes'] = ['tab-content'] first = True for part in node.parts: page = nodes.admonition() page['classes'] = ['tab-pane'] if first: page['classes'].append('active') page['ids'] = [make_id(node, part.language)] page.append(part.paragraph) pages.append(page) first = False node.append(pages) self.body.append(self.starttag(node, 'div'))
def run(self): try: lines = self.parse_lit(self.parse_file(self.arguments[0])) except IOError as exc: document = self.state.document return [document.reporter.warning(str(exc), line=self.lineno)] node = nodes.container() node['classes'] = ['lit-container'] node.document = self.state.document enum = nodes.enumerated_list() enum['classes'] = ['lit-docs'] node.append(enum) # make first list item list_item = nodes.list_item() list_item['classes'] = ['lit-item'] for is_doc, line in lines: if is_doc and line == ['']: continue section = nodes.section() if is_doc: section['classes'] = ['lit-annotation'] nested_parse_with_titles(self.state, ViewList(line), section) else: section['classes'] = ['lit-content'] code = '\n'.join(line) literal = nodes.literal_block(code, code) literal['language'] = 'yaml' set_source_info(self, literal) section.append(literal) list_item.append(section) # If we have a pair of annotation/content items, append the list # item and create a new list item if len(list_item.children) == 2: enum.append(list_item) list_item = nodes.list_item() list_item['classes'] = ['lit-item'] # Non-semantic div for styling bg = nodes.container() bg['classes'] = ['lit-background'] node.append(bg) return [node]
def make_field(self, types, domain, items): def handle_item(fieldarg, content): par = nodes.paragraph() par.extend(self.make_xrefs(self.rolename, domain, fieldarg, addnodes.literal_strong)) if fieldarg in types: par += nodes.Text(' (') # NOTE: using .pop() here to prevent a single type node to be # inserted twice into the doctree, which leads to # inconsistencies later when references are resolved fieldtype = types.pop(fieldarg) if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text): typename = u''.join(n.astext() for n in fieldtype) par.extend(self.make_xrefs(self.typerolename, domain, typename, addnodes.literal_emphasis)) else: par += fieldtype par += nodes.Text(')') par += nodes.Text(' -- ') par += content return par fieldname = nodes.field_name('', self.label) if len(items) == 1 and self.can_collapse: fieldarg, content = items[0] bodynode = handle_item(fieldarg, content) else: bodynode = self.list_type() for fieldarg, content in items: bodynode += nodes.list_item('', handle_item(fieldarg, content)) fieldbody = nodes.field_body('', bodynode) return nodes.field('', fieldname, fieldbody)
def build_contents(self, node, level=0): level += 1 sections = [] i = len(node) - 1 while i >= 0 and isinstance(node[i], nodes.section): sections.append(node[i]) i -= 1 sections.reverse() entries = [] autonum = 0 depth = self.startnode.details.get('depth', sys.maxint) for section in sections: title = section[0] auto = title.get('auto') # May be set by SectNum. entrytext = self.copy_and_filter(title) reference = nodes.reference('', '', refid=section['id'], *entrytext) ref_id = self.document.set_id(reference) entry = nodes.paragraph('', '', reference) item = nodes.list_item('', entry) if self.backlinks == 'entry': title['refid'] = ref_id elif self.backlinks == 'top': title['refid'] = self.toc_id if level < depth: subsects = self.build_contents(section, level) item += subsects entries.append(item) if entries: contents = nodes.bullet_list('', *entries) if auto: contents.set_class('auto-toc') return contents else: return []
def run(self): """ """ infos = self.options.get('infos', ['title','desc', 'longdesc']) package = self._get_package() jpackage_id = "package-%d" % self.env.new_serialno('jpackage') jpackage_node = nodes.section(ids=[jpackage_id]) if 'title' in infos: title = '%s' % (package.get_name()) jpackage_node += nodes.title(text=title) if 'desc' in infos: jpackage_node.append( nodes.paragraph( text=six.text_type(package.get_description()) ) ) if 'author' in infos: jpackage_node.append( nodes.paragraph( text=six.text_type(package.get_author()) ) ) if 'nickname' in infos: jpackage_node.append( nodes.paragraph( text=six.text_type(package.get_nickname()) ) ) if 'longdesc' in infos: jpackage_node.append( nodes.paragraph( text=six.text_type(package.get_long_description()) ) ) if 'license' in infos: jpackage_node.append( nodes.paragraph( text=six.text_type(package.get_license()) ) ) if 'keywords' in infos: for data in sorted(package.get_keywords()): item = nodes.list_item() item += [ nodes.inline(text=six.text_type(data)), ] jpackage_node.append(item) jpackage_node.append( nodes.paragraph( text='') ) return [jpackage_node]
def _render_service(self, path, service, methods): env = self.state.document.settings.env service_id = "service-%d" % env.new_serialno('service') service_node = nodes.section(ids=[service_id]) service_node += nodes.title(text='Service at %s' % service.route_name) if service.description is not None: service_node += rst2node(_dedent(service.description)) for method, info in methods.items(): method_id = '%s-%s' % (service_id, method) method_node = nodes.section(ids=[method_id]) method_node += nodes.title(text=method) docstring = info['func'].__doc__ or "" if 'validator' in info: validators = to_list(info['validator']) for validator in validators: if validator.__doc__ is not None: if docstring is not None: docstring += '\n' + validator.__doc__.strip() if 'accept' in info: accept = info['accept'] if callable(accept): if accept.__doc__ is not None: docstring += accept.__doc__.strip() else: accept = to_list(accept) accept_node = nodes.strong(text='Accepted content types:') node_accept_list = nodes.bullet_list() accept_node += node_accept_list for item in accept: temp = nodes.list_item() temp += nodes.inline(text=item) node_accept_list += temp method_node += accept_node node = rst2node(docstring) if node is not None: method_node += node renderer = info['renderer'] if renderer == 'simplejson': renderer = 'json' response = nodes.paragraph() response += nodes.strong(text='Response: %s' % renderer) method_node += response service_node += method_node return service_node
def resolve_required_by_xrefs(app, env, node, contnode): """Now that all recipes and packages have been parsed, we are called here for each ``pending_xref`` node that sphinx has not been able to resolve. We handle specifically the ``requiredby`` reftype created by the `RequiredByField` fieldtype allowed in ``conda:package::`` directives, where we replace the ``pending_ref`` node with a bullet list of reference nodes pointing to the package pages that "depended" on the package. """ if node['reftype'] == 'requiredby' and node['refdomain'] == 'conda': target = node['reftarget'] docname = node['refdoc'] backrefs = env.domains['conda'].data['backrefs'].get(target, set()) listnode = nodes.bullet_list() for back_docname, back_target in backrefs: par = nodes.paragraph() name_node = addnodes.literal_strong(back_target, back_target, classes=['xref', 'backref']) refnode = make_refnode(app.builder, docname, back_docname, back_target, name_node) refnode.set_class('conda-package') par += refnode listnode += nodes.list_item('', par) return listnode
def _append_dbapi_bullet(self, dialect_name, dbapi_name, name, idname): env = self.state.document.settings.env dialect_directive = self._dialects[dialect_name] try: relative_uri = env.app.builder.get_relative_uri(dialect_directive.docname, self.docname) except: relative_uri = "" list_node = nodes.list_item('', nodes.paragraph('', '', nodes.reference('', '', nodes.Text(name, name), refdocname=self.docname, refuri= relative_uri + "#" + idname ), #nodes.Text(" ", " "), #nodes.reference('', '', # nodes.Text("(connectstring)", "(connectstring)"), # refdocname=self.docname, # refuri=env.app.builder.get_relative_uri( # dialect_directive.docname, self.docname) + ## "#" + ("dialect-%s-%s-connect" % # (dialect_name, dbapi_name)) # ) ) ) dialect_directive.bullets.append(list_node)
def build_contents(self, node, level=0): level += 1 sections = [sect for sect in node if isinstance(sect, nodes.section)] entries = [] autonum = 0 depth = self.startnode.details.get('depth', sys.maxsize) for section in sections: title = section[0] auto = title.get('auto') # May be set by SectNum. entrytext = self.copy_and_filter(title) reference = nodes.reference('', '', refid=section['ids'][0], *entrytext) ref_id = self.document.set_id(reference) entry = nodes.paragraph('', '', reference) item = nodes.list_item('', entry) if ( self.backlinks in ('entry', 'top') and title.next_node(nodes.reference) is None): if self.backlinks == 'entry': title['refid'] = ref_id elif self.backlinks == 'top': title['refid'] = self.toc_id if level < depth: subsects = self.build_contents(section, level) item += subsects entries.append(item) if entries: contents = nodes.bullet_list('', *entries) if auto: contents['classes'].append('auto-toc') return contents else: return []
def run(self): env = self.state.document.settings.env node = nodes.Element() node.document = self.state.document self.state.nested_parse(self.content, self.content_offset, node) entries = [] for i, child in enumerate(node): if isinstance(child, nodes.literal_block): # add a title (the language name) before each block #targetid = "configuration-block-%d" % env.new_serialno('configuration-block') #targetnode = nodes.target('', '', ids=[targetid]) #targetnode.append(child) if 'language' in child: language = child['language'] else: language = env.app.config.highlight_language innernode = nodes.emphasis(self.formats[language], self.formats[language]) para = nodes.paragraph() para += [innernode, child] entry = nodes.list_item('') entry.append(para) entries.append(entry) resultnode = configurationblock() resultnode.append(nodes.bullet_list('', *entries)) return [resultnode]
def run(self): env = self.state.document.settings.env config = env.config repodir = env.srcdir + '/' + config["git_repository_root"] doc_path = env.srcdir + '/' + env.docname + config["source_suffix"] if self.options.get('dir', False) == None: doc_path = '/'.join(doc_path.split('/')[:-1]) repo = Repo(repodir) commits = repo.iter_commits(paths=doc_path) l = nodes.bullet_list() revisions_to_display = self.options.get('revisions', 10) for commit in list(commits)[:revisions_to_display]: date_str = datetime.fromtimestamp(commit.authored_date) if '\n' in commit.message: message, detailed_message = commit.message.split('\n', 1) else: message = commit.message detailed_message = None item = nodes.list_item() item += [ nodes.strong(text=message), nodes.inline(text=" by "), nodes.emphasis(text=str(commit.author)), nodes.inline(text=" at "), nodes.emphasis(text=str(date_str)) ] if detailed_message: item.append(nodes.caption(text=detailed_message.strip())) l.append(item) return [l]
def run(self): env = self.state.document.settings.env repo = Repo(env.srcdir) commits = repo.iter_commits() l = nodes.bullet_list() for commit in list(commits)[:10]: date_str = datetime.fromtimestamp(commit.authored_date) if '\n' in commit.message: message, detailed_message = commit.message.split('\n', 1) else: message = commit.message detailed_message = None item = nodes.list_item() item += [ nodes.strong(text=message), nodes.inline(text=" by "), nodes.emphasis(text=str(commit.author)), nodes.inline(text=" at "), nodes.emphasis(text=str(date_str)) ] if detailed_message: item.append(nodes.caption(text=detailed_message.strip())) l.append(item) return [l]
def _output_resource(self, resource, parent, is_list): item = nodes.list_item() parent += item paragraph = nodes.paragraph() item += paragraph paragraph += parse_text( self, ':ref:`%s <%s>`' % (get_resource_title(resource, is_list, False), 'webapi2.0-%s-resource' % get_resource_docname(resource, is_list))) bullet_list = nodes.bullet_list() item += bullet_list if is_list: if resource.uri_object_key: self._output_resource(resource, bullet_list, False) for child in resource.list_child_resources: self._output_resource(child, bullet_list, True) else: for child in resource.item_child_resources: self._output_resource(child, bullet_list, True)
def generate_breadcrumb(self): "Generate ordered and linked 'breadcrumb' path list. " #sep = self.document.settings.breadcrumb_path_separator sep = '/' path = getattr(self.document.settings, 'breadcrumb_path') if not path: path = self.document['source'] breadcrumb = nodes.enumerated_list(classes=['breadcrumb']) # TODO: much more customization, what about domain, etc? s,h,path,para,q,f = urlparse.urlparse(path) dirs = path.split(sep) or [] _p = [] while dirs: dn = dirs.pop(0) _p.append(dn) if dirs: href = sep.join(_p) or sep # XXX: fix the path to be absolute if not href.startswith(sep): href = sep+href dn += sep ref = nodes.reference('', nodes.Text(dn), refuri=href) else: ref = nodes.Text(dn) p = nodes.paragraph('', '', ref) item = nodes.list_item() item.append(p) breadcrumb.append(item) return breadcrumb
def gene_link_list(uri_list, bullet="*"): """ Generate bullet list of uri from the list of uri >>> print gene_link_list(['a', 'b']).pformat() <bullet_list bullet="*"> <list_item> <paragraph> <reference name="a" refuri="a"> a <target ids="a" names="a" refuri="a"> <list_item> <paragraph> <reference name="b" refuri="b"> b <target ids="b" names="b" refuri="b"> <BLANKLINE> """ bullet_list = nodes.bullet_list(bullet=bullet) bullet_list += [ nodes.list_item( '', with_children(nodes.paragraph, gene_link(l))) for l in uri_list] return bullet_list
def _build_markup(self, commits): list_node = nodes.bullet_list() for commit in commits: date_str = datetime.fromtimestamp(commit.authored_date) if '\n' in commit.message: message, detailed_message = commit.message.split('\n', 1) else: message = commit.message detailed_message = None item = nodes.list_item() item += [ nodes.strong(text=message), nodes.inline(text=" by "), nodes.emphasis(text=six.text_type(commit.author)), nodes.inline(text=" at "), nodes.emphasis(text=str(date_str)) ] if detailed_message: detailed_message = detailed_message.strip() if self.options.get('detailed-message-pre', False): item.append( nodes.literal_block(text=detailed_message)) else: item.append(nodes.paragraph(text=detailed_message)) list_node.append(item) return [list_node]
def toctree_directive(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): node = nodes.admonition() node['classes'] += ['admonition-toctree'] node += nodes.title('', 'Toctree') para = nodes.paragraph('') node += para ul = nodes.bullet_list() para += ul for line in content: line = line.strip() if not line or line.startswith(':'): continue try: uri, name = resolve_name(line, state.inliner) title = name try: doc = models.Docstring.on_site.get(name=name) if doc.title: title = doc.title except models.Docstring.DoesNotExist: pass entry = nodes.reference('', title, refuri=uri) except ValueError: entry = nodes.reference('', line, name=line, refname=':ref:`%s`' % line) ul += nodes.list_item('', nodes.paragraph('', '', entry)) return [node]
def insert_callback(parameters_node, callback_required): # We need to know what params are here already parameter_names = get_parameter_names(parameters_node) if 'callback' not in parameter_names: if '*args' in parameter_names: args_pos = parameter_names.index('*args') else: args_pos = len(parameter_names) if '**kwargs' in parameter_names: kwargs_pos = parameter_names.index('**kwargs') else: kwargs_pos = len(parameter_names) doc = (" (%s): function taking (result, error), to execute when operation" " completes" % ("required" if callback_required else "optional")) new_item = list_item( '', paragraph( '', '', title_reference('', 'callback'), Text(doc))) # Insert "callback" before *args and **kwargs parameters_node.insert(min(args_pos, kwargs_pos), new_item)
def process_latest_toc(app, doctree, fromdocname): """We traverse the doctree looking for publication dates to build the date-based ToC here. Since the ordering is ill-defined, from our perspective, we parse all of them each time, but cache them in the environment""" env = app.builder.env cache_article_dates(env) feed_pub_dates = getattr(env, 'feed_pub_dates', {}) def is_blacklisted(docname): for blacklist_entry in app.config.feed_blacklist: if blacklist_entry in docname: return True return False for node in doctree.traverse(latest): entries = node['entries'] includefiles = node['includefiles'] decorated_entries = [ (feed_pub_dates.get(doc), title, doc) for title, doc in entries if doc in feed_pub_dates] decorated_entries.sort(reverse=True) latest_list = nodes.bullet_list('') for date, title, docname in decorated_entries: if is_blacklisted(docname): continue para = nodes.paragraph() list_item = nodes.list_item('', para) if title is None: title = env.titles.get(docname) if title: title = title[0] #.astext() # Create a reference newnode = nodes.reference('', '') # date stringdate = date.strftime('%Y-%m-%d') + ':' para += nodes.Text(stringdate, stringdate) para += nodes.Text(' ', ' ') # title and link innernode = title #nodes.emphasis(title, title) newnode['refdocname'] = docname newnode['refuri'] = app.builder.get_relative_uri( fromdocname, docname) newnode.append(innernode) para += newnode # Insert into the latestlist latest_list.append(list_item) node.replace_self(latest_list)
def build_contents(self, node, level=0): level += 1 sections = [] i = len(node) - 1 while i >= 0 and isinstance(node[i], nodes.section): sections.append(node[i]) i -= 1 sections.reverse() entries = [] autonum = 0 depth = 4 # XXX FIXME for section in sections: title = section[0] entrytext = title try: reference = nodes.reference('', '', refid=section['ids'][0], *entrytext) except IndexError: continue ref_id = self.document.set_id(reference) entry = nodes.paragraph('', '', reference) item = nodes.list_item('', entry) if level < depth: subsects = self.build_contents(section, level) item += subsects entries.append(item) if entries: contents = nodes.bullet_list('', *entries) return contents else: return []
def make_field(self, types, domain, items): fieldname = nodes.field_name('', self.label) listnode = self.list_type() for fieldarg, content in items: listnode += nodes.list_item('', nodes.paragraph('', '', *content)) fieldbody = nodes.field_body('', listnode) return nodes.field('', fieldname, fieldbody)
def insert_callback(parameters_node): # We need to know what params are here already parameter_names = get_parameter_names(parameters_node) if 'callback' not in parameter_names: if '*args' in parameter_names: args_pos = parameter_names.index('*args') else: args_pos = len(parameter_names) if '**kwargs' in parameter_names: kwargs_pos = parameter_names.index('**kwargs') else: kwargs_pos = len(parameter_names) doc = ( " (optional): function taking (result, error), executed when" " operation completes") new_item = list_item( '', paragraph( '', '', literal('', 'callback'), # literal(text='callback'), Text(doc))) # Insert "callback" before *args and **kwargs parameters_node.insert(min(args_pos, kwargs_pos), new_item)
def run(self): """Create a type list.""" config = self.state.document.settings.env.config # Group processes by category processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url) processes.sort(key=itemgetter('type')) processes_by_types = {k: list(g) for k, g in groupby(processes, itemgetter('type'))} listnode = nodes.bullet_list() for typ in sorted(processes_by_types.keys()): par = nodes.paragraph() par += nodes.literal(typ, typ) par += nodes.Text(' - ') processes = sorted(processes_by_types[typ], key=itemgetter('name')) last_process = processes[-1] for process in processes: node = nodes.reference('', process['name'], internal=True) node['refuri'] = config.autoprocess_definitions_uri + '#process-' + process['slug'] node['reftitle'] = process['name'] par += node if process != last_process: par += nodes.Text(', ') listnode += nodes.list_item('', par) return [listnode]
def run(self): if ValueTableDirective.values[0].description is None: list = nodes.bullet_list() for v in ValueTableDirective.values: item = nodes.list_item() item += nodes.literal(v.value, v.value) list += item return [list] table = nodes.table() tgroup = nodes.tgroup() tbody = nodes.tbody() for v in ValueTableDirective.values: row = nodes.row() entry = nodes.entry() entry += nodes.literal(v.value, v.value) row += entry entry = nodes.entry() entry += nodes.paragraph(text=v.description) row += entry tbody += row tgroup += nodes.colspec(colwidth=10) tgroup += nodes.colspec(colwidth=90) tgroup += tbody table += tgroup return [table]
def list_item(self, block): node = nodes.list_item() node.line = block.start_line with self._temp_current_node(node): self.convert_blocks(block.children) self.current_node.append(node)
def unmarked_bullet_list_items_treated_as_bugs(self): fake = list_item('', paragraph('', '', raw('', 'whatever'))) releases = _releases('1.0.2', self.f, fake) entries = releases[1]['entries'] eq_(len(entries), 1) assert self.f not in entries assert isinstance(entries[0], issue) eq_(entries[0].number, None)
def descriptions_are_parsed_for_issue_roles(self): item = list_item( '', paragraph('', '', self.b.deepcopy(), _issue('support', '5'))) para = self._generate('1.0.2', item)[0] # Sanity check - in a broken parsing scenarion, the 4th child will be a # raw issue object assert not isinstance(para[4], Issue) # First/primary link _expect_type(para[2], reference) eq_(para[2].astext(), '#15') assert 'Bug' in para[0].astext() # Second/inline link _expect_type(para[6], reference) eq_(para[6].astext(), '#5') assert 'Support' in para[4].astext()
def generate_collapsible_classlist(app, fromdocname, classes, container, caption, module_index): entries = defaultdict(list) prefix = ".".join(classes[0][0].split(".")[:module_index]) + "." for e in classes: module = e[0].split(".")[module_index] entries[module].append(e) #print("t", fromdocname) toc = nodes.bullet_list() toc += nodes.caption(caption, '', *[nodes.Text(caption)]) for module, class_list in entries.items(): #print("t2", "src." + prefix + module) ref = nodes.reference('', '') ref['refuri'] = app.builder.get_relative_uri(fromdocname, prefix + module) ref.append(nodes.Text(module.capitalize())) module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=["toctree-l1"]) if fromdocname.startswith(prefix + module): module_item["classes"].append('current') toc += module_item subtree = nodes.bullet_list() module_item += subtree for e in class_list: ref = nodes.reference('', '') ref['refdocname'] = e[3] ref['refuri'] = app.builder.get_relative_uri(fromdocname, e[3]) ref['refuri'] += '#' + e[4] ref.append(nodes.Text(e[0].split(".")[-1])) class_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=["toctree-l2"]) if fromdocname.startswith(e[3]): class_item['classes'].append('current') subtree += class_item container += toc
def bokeh_requires(name, rawtext, text, lineno, inliner, options=None, content=None): """Provide the minimum required Python version from setup.py. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. """ loader = importlib.machinery.SourceFileLoader("setup", join(TOP_PATH, "_setup_support.py")) setup = types.ModuleType(loader.name) loader.exec_module(setup) node = nodes.bullet_list() for dep in setup.INSTALL_REQUIRES: node += nodes.list_item("", nodes.Text(dep)) return [node], []
def make_field(self, types, domain, items): fieldname = nodes.field_name('', self.label) listnode = self.list_type() for fieldarg, content in items: par = nodes.paragraph() par += self.make_xref(self.rolename, domain, fieldarg, addnodes.literal_strong) par += nodes.Text(' -- ') par += content listnode += nodes.list_item('', par) if len(items) == 1 and self.can_collapse: fieldbody = nodes.field_body('', listnode[0][0]) return nodes.field('', fieldname, fieldbody) fieldbody = nodes.field_body('', listnode) return nodes.field('', fieldname, fieldbody)
def _doc_chain(self, widgetname, chainidx): ol = nodes.enumerated_list() chain = factory._blueprints[widgetname][chainidx] exist = False for el in chain: exist = True li = nodes.list_item() if hasattr(el, 'func_name'): # function li.append(nodes.paragraph(text=el.func_name)) else: # class li.append(nodes.paragraph(text=el.__class__.__name__)) ol.append(li) if exist: return ol return nodes.paragraph(text="-/-")
def _build_notes(self, content): """Constructs a list of notes content for the support matrix. This is generated as a bullet list. """ notes_title = nodes.subtitle(text="Notes:") notes = nodes.bullet_list() content.append(notes_title) content.append(notes) for note in ["This document is a continuous work in progress"]: item = nodes.list_item() item.append(nodes.strong(text=note)) notes.append(item)
def build(self, class_name): list_class = class_name + "_list" item_class = list_class + "__item" node_container = nodes.container(classes=[class_name]) if self.avatars: node_container["classes"].append(class_name + "--avatars") node_list = nodes.bullet_list(classes=[list_class]) for contributor in self.contributors: node_item = nodes.list_item(classes=[item_class]) node_item += contributor.build(class_name) node_list += node_item node_container += node_list return node_container
def _build_toc_node(docname, anchor='anchor', text='test text', bullet=False): """ Create the node structure that Sphinx expects for TOC Tree entries. The ``bullet`` argument wraps it in a ``nodes.bullet_list``, which is how you nest TOC Tree entries. """ reference = nodes.reference('', '', internal=True, refuri=docname, anchorname='#' + anchor, *[nodes.Text(text, text)]) para = addnodes.compact_paragraph('', '', reference) ret_list = nodes.list_item('', para) if not bullet: return ret_list else: return nodes.bullet_list('', ret_list)
def visit_li(self, node): thisItem = nodes.list_item() ids = node.attrib.get("id", "") if ids: thisItem['ids'] = [node.attrib.pop("id")] classes = node.attrib.get("classes", "") if classes: thisItem["classes"] = [node.attrib.pop("class")] ch = list(node) # extra "paragraph" is needed to avoid breaking docutils assumptions if not ch or ch[0].tag in TAGS_INLINE: self.append_node(thisItem) return nodes.paragraph() else: return thisItem
def patched_make_field(self, types, domain, items, **kw): # `kw` catches `env=None` needed for newer sphinx while maintaining # backwards compatibility when passed along further down! # type: (List, unicode, Tuple) -> nodes.field def handle_item(fieldarg, content): par = nodes.paragraph() par += addnodes.literal_strong("", fieldarg) # Patch: this line added # par.extend(self.make_xrefs(self.rolename, domain, fieldarg, # addnodes.literal_strong)) if fieldarg in types: par += nodes.Text(" (") # NOTE: using .pop() here to prevent a single type node to be # inserted twice into the doctree, which leads to # inconsistencies later when references are resolved fieldtype = types.pop(fieldarg) if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text): typename = u"".join(n.astext() for n in fieldtype) typename = typename.replace("int", "python:int") typename = typename.replace("long", "python:long") typename = typename.replace("float", "python:float") typename = typename.replace("type", "python:type") par.extend( self.make_xrefs( self.typerolename, domain, typename, addnodes.literal_emphasis, **kw ) ) else: par += fieldtype par += nodes.Text(")") par += nodes.Text(" -- ") par += content return par fieldname = nodes.field_name("", self.label) if len(items) == 1 and self.can_collapse: fieldarg, content = items[0] bodynode = handle_item(fieldarg, content) else: bodynode = self.list_type() for fieldarg, content in items: bodynode += nodes.list_item("", handle_item(fieldarg, content)) fieldbody = nodes.field_body("", bodynode) return nodes.field("", fieldname, fieldbody)
def patched_make_field( self, # type: TypedField types, # type: typing.Dict[str, typing.List[nodes.Node]] domain, # type: str items, # type: typing.Tuple env=None, # type: typing.Any ): # type: (...) -> nodes.field def handle_item(fieldarg, content): # type: (str, str) -> nodes.paragraph par = nodes.paragraph() par += addnodes.literal_strong("", fieldarg) # Patch: this line added # par.extend(self.make_xrefs(self.rolename, domain, fieldarg, # addnodes.literal_strong, env=env)) if fieldarg in types: par += nodes.Text(" (") # NOTE: using .pop() here to prevent a single type node to be # inserted twice into the doctree, which leads to # inconsistencies later when references are resolved fieldtype = types.pop(fieldarg) if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text): typename = u"".join(n.astext() for n in fieldtype) par.extend( self.make_xrefs( self.typerolename, domain, typename, addnodes.literal_emphasis, env=env, ) ) else: par += fieldtype par += nodes.Text(")") par += nodes.Text(" -- ") par += content return par fieldname = nodes.field_name("", self.label) if len(items) == 1 and self.can_collapse: fieldarg, content = items[0] bodynode = handle_item(fieldarg, content) else: bodynode = self.list_type() for fieldarg, content in items: bodynode += nodes.list_item("", handle_item(fieldarg, content)) fieldbody = nodes.field_body("", bodynode) return nodes.field("", fieldname, fieldbody)
def build_contents(self, node, level=0): level += 1 sections = [] # Replaced this with the for below to make it work for Sphinx # trees. #sections = [sect for sect in node if isinstance(sect, nodes.section)] for sect in node: if isinstance(sect, nodes.compound): for sect2 in sect: if isinstance(sect2, addnodes.start_of_file): for sect3 in sect2: if isinstance(sect3, nodes.section): sections.append(sect3) elif isinstance(sect, nodes.section): sections.append(sect) entries = [] autonum = 0 # FIXME: depth should be taken from :maxdepth: (Issue 320) depth = self.toc_depth for section in sections: title = section[0] auto = title.get('auto') # May be set by SectNum. entrytext = self.copy_and_filter(title) reference = nodes.reference('', '', refid=section['ids'][0], *entrytext) ref_id = self.document.set_id(reference) entry = nodes.paragraph('', '', reference) item = nodes.list_item('', entry) if (self.backlinks in ('entry', 'top') and title.next_node(nodes.reference) is None): if self.backlinks == 'entry': title['refid'] = ref_id elif self.backlinks == 'top': title['refid'] = self.toc_id if level < depth: subsects = self.build_contents(section, level) item += subsects entries.append(item) if entries: contents = nodes.bullet_list('', *entries) if auto: contents['classes'].append('auto-toc') return contents else: return []
def build_toc(node, depth=1): # type: (nodes.Node, int) -> List[nodes.Node] entries = [] for sectionnode in node: # find all toctree nodes in this section and add them # to the toc (just copying the toctree node which is then # resolved in self.get_and_resolve_doctree) if isinstance(sectionnode, addnodes.only): onlynode = addnodes.only(expr=sectionnode['expr']) blist = build_toc(sectionnode, depth) if blist: onlynode += blist.children # type: ignore entries.append(onlynode) continue if not isinstance(sectionnode, nodes.section): for toctreenode in traverse_in_section(sectionnode, addnodes.toctree): item = toctreenode.copy() entries.append(item) # important: do the inventory stuff TocTree(app.env).note(docname, toctreenode) continue title = sectionnode[0] # copy the contents of the section title, but without references # and unnecessary stuff visitor = SphinxContentsFilter(doctree) title.walkabout(visitor) nodetext = visitor.get_entry_text() if not numentries[0]: # for the very first toc entry, don't add an anchor # as it is the file's title anyway anchorname = '' else: anchorname = '#' + sectionnode['ids'][0] numentries[0] += 1 # make these nodes: # list_item -> compact_paragraph -> reference reference = nodes.reference( '', '', internal=True, refuri=docname, anchorname=anchorname, *nodetext) para = addnodes.compact_paragraph('', '', reference) item = nodes.list_item('', para) sub_item = build_toc(sectionnode, depth + 1) item += sub_item entries.append(item) if entries: return nodes.bullet_list('', *entries) return []
def run(self): idb = nodes.make_id("emva1288-" + self.options['section']) section = nodes.section(ids=[idb]) section += nodes.rubric(text='Emva1288') lst = nodes.bullet_list() for k in self.option_spec.keys(): if k not in self.options: continue item = nodes.list_item() item += nodes.strong(text=k + ':') item += nodes.inline(text=' ' + self.options[k]) lst += item section += lst return [section]
def _build_toc_node(docname, anchor="anchor", text="test text", bullet=False): """ Create the node structure that Sphinx expects for TOC Tree entries. The ``bullet`` argument wraps it in a ``nodes.bullet_list``, which is how you nest TOC Tree entries. """ reference = nodes.reference("", "", internal=True, refuri=docname, anchorname="#" + anchor, *[nodes.Text(text, text)]) para = addnodes.compact_paragraph("", "", reference) ret_list = nodes.list_item("", para) return nodes.bullet_list("", ret_list) if bullet else ret_list
def run(self): with open(self.config["image_test_json"], "r") as fh: imagerepo = json.load(fh) enum_list = nodes.enumerated_list() nodelist = [] nodelist.append(enum_list) for test in sorted(imagerepo): link_node = nodes.raw( "", f'<a href="{self.config["html_baseurl"]}/generated/image_test/{test}.html" />{test}</a>', format="html", ) li_node = nodes.list_item("") li_node += link_node enum_list += li_node return nodelist
def make_mixins(self): doc = self.item if not doc.mixins: return [] ret = nodes.field('', nodes.field_name("Mixes", "Mixes")) with addto(ret, nodes.field_body()) as body: with addto(body, nodes.bullet_list()) as mixins: for mixin in sorted(doc.mixins, key=lambda m: m.name): mixin_link = addnodes.pending_xref( mixin.name, nodes.paragraph(mixin.name, mixin.name), refdomain='js', reftype='mixin', reftarget=mixin.name ) mixin_link['js:module'] = mixin['sourcemodule'].name mixins += nodes.list_item('', mixin_link) return ret
def renderDocEndpoint(data, node, endpoints): if not node.link: node.replace_self([doctree.render_entry(data, ep) for ep in endpoints]) return # todo: to be "correct", this `repl` should be a nodes.bullet_list... # but the problem with that is that then two consecutive lists # will be technically not part of the same list. # ==> NOTE: by makeing DocEndpoint accept multiple spec's, this # would be made a non-issue... repl = [] for endpoint in endpoints: repl.append( nodes.list_item( '', doctree.rpara(doc_link('', doctree.rtext(endpoint.dpath))))) node.replace_self(repl)
def patched_make_field( self, types, # type: Dict[unicode, List[nodes.Node]] domain, # type: unicode items, # type: Tuple env=None, # type: BuildEnvironment ): # type: (...) -> nodes.field def handle_item(fieldarg, content): # type: (unicode, unicode) -> nodes.paragraph par = nodes.paragraph() # Adding the next line, and taking out the one after should prevent # ivars from getting incorrect cross-references. par += addnodes.literal_strong('', fieldarg) #par.extend(self.make_xrefs(self.rolename, domain, fieldarg, # addnodes.literal_strong, env=env)) if fieldarg in types: par += nodes.Text(' (') # NOTE: using .pop() here to prevent a single type node to be # inserted twice into the doctree, which leads to # inconsistencies later when references are resolved fieldtype = types.pop(fieldarg) if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text): typename = u''.join(n.astext() for n in fieldtype) par.extend( self.make_xrefs(self.typerolename, domain, typename, addnodes.literal_emphasis, env=env)) else: par += fieldtype par += nodes.Text(')') par += nodes.Text(' -- ') par += content return par fieldname = nodes.field_name('', self.label) if len(items) == 1 and self.can_collapse: fieldarg, content = items[0] bodynode = handle_item(fieldarg, content) else: bodynode = self.list_type() for fieldarg, content in items: bodynode += nodes.list_item('', handle_item(fieldarg, content)) fieldbody = nodes.field_body('', bodynode) return nodes.field('', fieldname, fieldbody)
def generate_list_items(app, tagname, fromdocname, sort=True): '''Generate list items with link to every document tagged with tagname. List items concists of a inner paragraph. The paragraph then contains a reference to the tagged document. Args: app (Sphinx): The sphinx application object. tagname (str): The tag to generate list items for. fromdocname (str): The source document. Yields: docutils.nodes.list_item ''' env = app.builder.env if not hasattr(env, 'tags'): env.tags = dict() if sort: docs = sorted(env.tags[tagname], key=lambda doc: doc['title'].lower()) else: docs = env.tags[tagname] for doc_info in docs: item = nodes.list_item() innerpara = nodes.paragraph() description = '%s' % doc_info['title'] item += innerpara # Create a reference refnode = nodes.reference('', '', internal=True) try: refnode['refuri'] = app.builder.get_relative_uri( fromdocname, doc_info['docname']) except NoUri: logging.getLogger(__name__).warning( '%s.%s: URI cannot be determined' % (fromdocname, node.line)) refnode.append(nodes.Text(description, description)) innerpara += refnode yield item
def build_http_method_section(self, resource, http_method): doc = self.get_doc_for_http_method(resource, http_method) http_method_func = self.get_http_method_func(resource, http_method) # Description text returned_nodes = [parse_text(self, doc)] # Request Parameters section required_fields = getattr(http_method_func, 'required_fields', []) optional_fields = getattr(http_method_func, 'optional_fields', []) if required_fields or optional_fields: all_fields = dict(required_fields) all_fields.update(optional_fields) fields_section = nodes.section(ids=['%s_params' % http_method]) returned_nodes.append(fields_section) fields_section += nodes.title(text='Request Parameters') table = self.build_fields_table(all_fields, required_fields=required_fields, show_requirement_labels=True) fields_section += table # Errors section errors = getattr(http_method_func, 'response_errors', []) if errors: errors_section = nodes.section(ids=['%s_errors' % http_method]) returned_nodes.append(errors_section) errors_section += nodes.title(text='Errors') bullet_list = nodes.bullet_list() errors_section += bullet_list for error in sorted(errors, key=lambda x: x.code): item = nodes.list_item() bullet_list += item paragraph = nodes.paragraph() item += paragraph paragraph += get_ref_to_error(error) return returned_nodes
def make_properties_list(self, field): """Fill the ``field`` into a properties list and return it. :param dict field: the content of the property list to make :return: field_list instance filled with given field :rtype: nodes.field_list """ properties_list = nodes.field_list() # changing the order of elements in this list affects # the order in which they are displayed property_names = ['label', 'type', 'description', 'required', 'disabled', 'hidden', 'default', 'placeholder', 'validate_regex', 'choices', 'collapse', 'group'] for name in property_names: if name not in field: continue value = field[name] # Value should be formatted in code-style (=literal) mode if name in ['type', 'default', 'placeholder', 'validate_regex']: literal_node = nodes.literal(str(value), str(value)) properties_list += self.make_field(name, literal_node) # Special formating of ``value`` is needed if name == 'choices' elif name == 'choices': bullet_list = nodes.bullet_list() for choice in value: label = nodes.Text(choice['label'] + ': ') val = nodes.literal(choice['value'], choice['value']) paragraph = nodes.paragraph() paragraph += label paragraph += val list_item = nodes.list_item() list_item += paragraph bullet_list += list_item properties_list += self.make_field(name, bullet_list) else: properties_list += self.make_field(name, str(value)) return properties_list
def process_latest_toc(app, doctree, fromdocname): """We traverse the doctree looking for publication dates to build the date-based ToC here. Since the ordering is ill-defined, from our perspective, we parse all of them each time, but cache them in the environment""" env = app.builder.env cache_article_dates(env) feed_pub_dates = getattr(env, 'feed_pub_dates', {}) for node in doctree.traverse(latest): entries = node['entries'] includefiles = node['includefiles'] decorated_entries = [(feed_pub_dates.get(doc), title, doc) for title, doc in entries if doc in feed_pub_dates] decorated_entries.sort(reverse=True) latest_list = nodes.bullet_list('') for date, title, docname in decorated_entries: para = nodes.paragraph() list_item = nodes.list_item('', para) if title is None: title = env.titles.get(docname) if title: title = title[0] #.astext() # Create a reference newnode = nodes.reference('', '') innernode = title #nodes.emphasis(title, title) newnode['refdocname'] = docname newnode['refuri'] = app.builder.get_relative_uri( fromdocname, docname) newnode.append(innernode) para += newnode para += nodes.Text(' ', ' ') stringdate = date.strftime('%Y/%m/%d') para += nodes.Text(stringdate, stringdate) # Insert into the latestlist latest_list.append(list_item) node.replace_self(latest_list)
def _build_notes(self, content): """Constructs a list of notes content for the support matrix. This is generated as a bullet list. """ notestitle = nodes.subtitle(text="Notes") notes = nodes.bullet_list() content.append(notestitle) content.append(notes) NOTES = ["Virtuozzo was formerly named Parallels in this document"] for note in NOTES: item = nodes.list_item() item.append(nodes.strong(text=note)) notes.append(item)
def make_field(self, types: Dict[str, List[Node]], domain: str, items: Tuple, env: BuildEnvironment = None, inliner: Inliner = None, location: Node = None) -> nodes.field: """Copy+Paste of TypedField.make_field() from Sphinx version 4.3.2. The first and second nodes.Text() instance are changed in this implementation to be ' : ' and '' respectively (instead of ' (' and ')'). TODO: Ask sphinx devs if there is a better way to support this that is less copy+pasty. (thomasvandoren, 2015-03-17) """ def handle_item(fieldarg: str, content: str) -> nodes.paragraph: par = nodes.paragraph() par.extend(self.make_xrefs(self.rolename, domain, fieldarg, addnodes.literal_strong, env=env)) if fieldarg in types: par += nodes.Text(' : ') # NOTE: using .pop() here to prevent a single type node to be # inserted twice into the doctree, which leads to # inconsistencies later when references are resolved fieldtype = types.pop(fieldarg) if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text): typename = fieldtype[0].astext() par.extend(self.make_xrefs(self.typerolename, domain, typename, addnodes.literal_emphasis, env=env, inliner=inliner, location=location)) else: par += fieldtype par += nodes.Text('') par += nodes.Text(' -- ') par += content return par fieldname = nodes.field_name('', self.label) if len(items) == 1 and self.can_collapse: fieldarg, content = items[0] bodynode: Node = handle_item(fieldarg, content) else: bodynode = self.list_type() for fieldarg, content in items: bodynode += nodes.list_item('', handle_item(fieldarg, content)) fieldbody = nodes.field_body('', bodynode) return nodes.field('', fieldname, fieldbody)
def _append_info_list_item(self, info_list, title, text=None, link=None, items=None): subitem = nodes.list_item() subitem.append(nodes.strong(text="%s: " % title)) if items: for item in items: subitem.append(item) elif link: inline_link = self._get_uri_ref(link, text) subitem.append(inline_link) elif text: subitem.append(nodes.literal(text=text)) info_list.append(subitem)
def add_chain_for_property(chain): for el in chain: if prop not in getattr(el, '__yafowil_managed_props__', []): #if getattr(el, '__yafowil_managed_props__', True): #print ('YAFOWIL callable %s has no ' % el, # 'managed props decorator!') continue li = nodes.list_item() if hasattr(el, 'func_name'): # function name = el.func_name else: # class name = el.__class__.__name__ if name in used: continue used.append(name) li.append(nodes.paragraph(text=name)) ul.append(li)
def build_details_table(self, error_obj): table = nodes.table() tgroup = nodes.tgroup(cols=2) table += tgroup tgroup += nodes.colspec(colwidth=20) tgroup += nodes.colspec(colwidth=80) tbody = nodes.tbody() tgroup += tbody # API Error Code append_detail_row(tbody, 'API Error Code', nodes.literal(text=error_obj.code)) # HTTP Status Code ref = parse_text(self, ':http:`%s`' % error_obj.http_status) append_detail_row(tbody, 'HTTP Status Code', ref) # Error Text append_detail_row(tbody, 'Error Text', nodes.literal(text=error_obj.msg)) if error_obj.headers: # HTTP Headers if len(error_obj.headers) == 1: content = nodes.literal(text=error_obj.headers.keys()[0]) else: content = nodes.bullet_list() for header in error_obj.headers.iterkeys(): item = nodes.list_item() content += item literal = nodes.literal(text=header) item += literal append_detail_row(tbody, 'HTTP Headers', content) # Description append_detail_row(tbody, 'Description', parse_text(self, '\n'.join(self.content))) return table
def run_add_specializers(self, result): package = self.env.temp_data.get('cl:package') name = self.cl_symbol_name() specializers = METHODS[package].get(name, {}).keys() if specializers: spec = nodes.bullet_list() for s in specializers: spec_xref = specializer_xref(package + ":" + name, s, self.state, package) item = nodes.list_item('', spec_xref) spec.append(item) field_list = self.get_field_list(result) field_list.append( nodes.field('', nodes.field_name('', "Specializers"), nodes.field_body('', spec))) return result
def genCaseCrossRefNodes(self, caseDetailObj, crossFieldName, crossTitle): nodeList = [] if len(caseDetailObj[crossFieldName]) > 0: nodeList.append(nodes.paragraph(crossTitle, crossTitle)) relationCaseList = nodes.bullet_list() for relationCase in caseDetailObj[crossFieldName]: ref = nodes.reference('', '') ref.append( nodes.Text(relationCase['name'], relationCase['name'])) ref['refuri'] = '#' + REF_ID % relationCase['id'] para = nodes.paragraph('', '') para += ref listItem = nodes.list_item('') listItem.append(para) relationCaseList.append(listItem) nodeList.append(relationCaseList) return nodeList