def find_doxygen_link(name, rawtext, text, lineno, inliner, options={}, content=[]): # from :name:`title <part>` has_explicit_title, title, part = split_explicit_title(text) part = utils.unescape(part) warning_messages = [] if not tag_file: warning_messages.append('Could not find match for `%s` because tag file not found' % part) return [nodes.inline(title, title)], [] try: url = app.env.doxylink_cache[cache_name]['mapping'][part] except LookupError as error: inliner.reporter.warning('Could not find match for `%s` in `%s` tag file. Error reported was %s' % (part, tag_filename, error), line=lineno) return [nodes.inline(title, title)], [] except ParseException as error: inliner.reporter.warning('Error while parsing `%s`. Is not a well-formed C++ function call or symbol.' 'If this is not the case, it is a doxylink bug so please report it.' 'Error reported was: %s' % (part, error), line=lineno) return [nodes.inline(title, title)], [] # If it's an absolute path then the link will work regardless of the document directory # Also check if it is a URL (i.e. it has a 'scheme' like 'http' or 'file') if os.path.isabs(rootdir) or urllib.parse.urlparse(rootdir).scheme: full_url = join(rootdir, url.file) # But otherwise we need to add the relative path of the current document to the root source directory to the link else: relative_path_to_docsrc = os.path.relpath(app.env.srcdir, os.path.dirname(inliner.document.attributes['source'])) full_url = join(relative_path_to_docsrc, '/', rootdir, url.file) # We always use the '/' here rather than os.sep since this is a web link avoids problems like documentation/.\../library/doc/ (mixed slashes) if url.kind == 'function' and app.config.add_function_parentheses and normalise(title)[1] == '' and not has_explicit_title: title = join(title, '()') pnode = nodes.reference(title, title, internal=False, refuri=full_url) pnode.set_class('reference-'+name) return [pnode], []
def handle(self, args, options, content): cx = self.context file_like_args = args lines = map(str, content) output_lines = cx.run_interactive(file_like_args, lines, self.language) if len(output_lines) < len(lines): output_lines = output_lines + ([''] * (len(lines) - len(output_lines))) sess_nodes = [] for k in range(len(lines)): input_node = nodes.inline(classes = ['interactive-input']) input_node += nodes.inline('', self.language.interactive_prompt()) for n in self.language.highlight(lines[k].rstrip().lstrip()): input_node += n output_line = output_lines[k] output_line = filter( lambda c: ord(c) < 128, output_line ) output_node = nodes.inline('', output_line, classes = ['interactive-output']) sess_nodes += [input_node, nodes.inline('\n','\n'), output_node] if k < len(lines)-1: sess_nodes += [nodes.inline('\n\n','\n\n')] all_node = nodes.literal_block(classes=['interactive-session']) for n in sess_nodes: all_node += n return [all_node]
def create_cross_table(self, app, docname, node, matrix, options): table = nodes.table() table["classes"].append("traceables-crosstable") tgroup = nodes.tgroup(cols=len(matrix.secondaries), colwidths="auto") table += tgroup # Add column specifications. tgroup += nodes.colspec(colwidth=1) for column in matrix.secondaries: tgroup += nodes.colspec(colwidth=1) # Add heading row. thead = nodes.thead() tgroup += thead row = nodes.row() thead += row entry = nodes.entry() row += entry for secondary in matrix.secondaries: entry = nodes.entry() row += entry container = nodes.container() entry += container inline = nodes.inline() container += inline paragraph = nodes.paragraph() inline += paragraph paragraph += secondary.make_reference_node(app.builder, docname) # Add table body. tbody = nodes.tbody() tgroup += tbody for primary in matrix.primaries: row = nodes.row() tbody += row entry = nodes.entry() row += entry paragraph = nodes.paragraph() entry += paragraph paragraph += primary.make_reference_node(app.builder, docname) for is_related in matrix.get_boolean_row(primary): entry = nodes.entry() row += entry if is_related: checkmark = traceable_checkmark() entry += checkmark checkmark += nodes.inline(u"\u2714", u"\u2714") else: continue container = traceable_matrix_crosstable() container += table container["traceables-matrix"] = matrix # backward = matrix.backward_relationship.capitalize() # forward = matrix.forward_relationship.capitalize() # container["relationships"] = (forward, backward) # container["boolean_matrix"] = 0#boolean_matrix # container["secondaries"] = matrix.secondaries return container
def _get_column_node(m): if m.group('name'): node = addnodes.desc_parameter() if m.group('key'): node += nodes.Text("#", "#") key = nodes.strong(m.group('name'), m.group('name')) key['classes'].append('arg-key') node += key if m.group('type'): node += nodes.Text(" : ", " : ") value = nodes.inline(m.group('type'), m.group('type')) value['classes'].append('arg-value') # FIXME: should vbe arg type probably node += value if m.group('optional'): node += nodes.Text("? ", "?") # FIXME: find a better type if m.group('reference'): value = nodes.inline(m.group('reference'), m.group('reference')) value['classes'].append('arg-value') # FIXME: should vbe arg type probably node += value return node else: return addnodes.desc_parameter(m.group(0), m.group(0))
def _build_markup(self): field_list = nodes.field_list() item = nodes.paragraph() item.append(field_list) if 'branch' in self.options: name = nodes.field_name(text="Branch") body = nodes.field_body() body.append(nodes.emphasis(text=self.branch_name)) field = nodes.field() field += [name, body] field_list.append(field) if 'commit' in self.options: name = nodes.field_name(text="Commit") body = nodes.field_body() if 'no_github_link' in self.options: body.append(self._commit_text_node()) else: body.append(self._github_link()) field = nodes.field() field += [name, body] field_list.append(field) if 'uncommitted' in self.options and self.repo.is_dirty(): item.append(nodes.warning('', nodes.inline( text="There were uncommitted changes when this was compiled." ))) if 'untracked' in self.options and self.repo.untracked_files: item.append(nodes.warning('', nodes.inline( text="There were untracked files when this was compiled." ))) return [item]
def run(self): node = nodes.paragraph() node['classes'] = ['versionadded'] node.document = self.state.document set_source_info(self, node) node['type'] = self.name node['version'] = self.arguments[0] text = versionlabels[self.name] % self.arguments[0] if len(self.arguments) == 2: inodes, messages = self.state.inline_text(self.arguments[1], self.lineno + 1) para = nodes.paragraph(self.arguments[1], '', *inodes) set_source_info(self, para) node.append(para) else: messages = [] if self.content: self.state.nested_parse(self.content, self.content_offset, node) if len(node): if isinstance(node[0], nodes.paragraph) and node[0].rawsource: content = nodes.inline(node[0].rawsource, translatable=True) content.source = node[0].source content.line = node[0].line content += node[0].children node[0].replace_self(nodes.paragraph('', '', content)) node[0].insert(0, nodes.inline('', '%s: ' % text, classes=['versionmodified'])) else: para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified'])) node.append(para) language = languages.get_language(self.state.document.settings.language_code, self.state.document.reporter) language.labels.update(versionlabels) return [node] + messages
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """ Use: :issue|bug|feature|support:`ticket number` When invoked as :issue:, turns into just a "#NN" hyperlink to Github. When invoked otherwise, turns into "[Type] <#NN hyperlink>: ". """ # Old-style 'just the issue link' behavior issue_no = utils.unescape(text) ref = "https://github.com/fabric/fabric/issues/" + issue_no link = nodes.reference(rawtext, '#' + issue_no, refuri=ref, **options) ret = [link] # Additional 'new-style changelog' stuff if name in issue_types: which = '[<span class="changelog-%s">%s</span>]' % ( name, name.capitalize() ) ret = [ nodes.raw(text=which, format='html'), nodes.inline(text=" "), link, nodes.inline(text=":") ] return ret, []
def run(self): node = addnodes.versionmodified() node.document = self.state.document node["type"] = "deprecated-removed" version = (self.arguments[0], self.arguments[1]) node["version"] = version text = self._label % version if len(self.arguments) == 3: inodes, messages = self.state.inline_text(self.arguments[2], self.lineno + 1) para = nodes.paragraph(self.arguments[2], "", *inodes) node.append(para) else: messages = [] if self.content: self.state.nested_parse(self.content, self.content_offset, node) if isinstance(node[0], nodes.paragraph) and node[0].rawsource: content = nodes.inline(node[0].rawsource, translatable=True) content.source = node[0].source content.line = node[0].line content += node[0].children node[0].replace_self(nodes.paragraph("", "", content)) if not SPHINX11: node[0].insert(0, nodes.inline("", "%s: " % text, classes=["versionmodified"])) elif not SPHINX11: para = nodes.paragraph("", "", nodes.inline("", "%s." % text, classes=["versionmodified"])) if len(node): node.insert(0, para) else: node.append(para) env = self.state.document.settings.env env.note_versionchange("deprecated", version[0], node, self.lineno) return [node] + messages
def process_node(node): if isinstance(node, nodes.Text): node = nodes.inline('', node.astext()) else: node = nodes.inline('', '', node) node['classes'].append('pre') return node
def run(self): self.assert_has_content() packages = [] for line in self.content: (pkg_path, _, reason) = line.partition(':') if len(reason) == 0: raise RuntimeError("Missing reason for inclusion of package %s" % pkg_path) # Parse reason from docutils.statemachine import ViewList reason_vl = ViewList(initlist=[reason.strip()]) reason_node = nodes.paragraph() self.state.nested_parse(reason_vl, 0, reason_node) packages.append((pkg_path, reason_node)) # Create column headers for table header = [ nodes.inline(text=h) for h in ["Package", "Version", "Reason for inclusion"] ] package_list = [header] for (pkg_path, reason) in sorted(packages): (pkg_name, pkg_version) = read_cabal_file(pkg_path) cells = [ nodes.paragraph(text=pkg_name), nodes.inline(text=pkg_version), reason ] package_list.append(cells) table = build_table_from_list(package_list, [20, 20, 40]) table['classes'].append('longtable') return [table]
def run(self): result = nodes.definition_list() for option in sorted(self.options.keys()): if option == 'added': continue term = option.capitalize() result += nodes.term(text=term) definition = nodes.definition() if option in ['kerk', 'predikant', 'tags']: taglink = {'kerk': SERMONCHURHLINK, 'predikant': SERMONREFERENTLINK, 'tags': SERMONTAGLINK}[option] value = self.options[option] values = [value.strip() for value in value.split(',')] paragraph = nodes.paragraph() for i, value in enumerate(values): link = taglink % value paragraph += nodes.reference(refuri=link, text=value) if not i == len(values) - 1: paragraph += nodes.inline(text=', ') definition += paragraph else: paragraph = nodes.paragraph() paragraph += nodes.inline(text=self.options[option]) definition += paragraph result += definition return [result]
def _build_markup(self, commits): list_node = nodes.bullet_list() for commit in commits: date_str = datetime.fromtimestamp(commit.authored_date) if '\n' in commit.message: message, detailed_message = commit.message.split('\n', 1) else: message = commit.message detailed_message = None item = nodes.list_item() item += [ nodes.strong(text=message), nodes.inline(text=" by "), nodes.emphasis(text=six.text_type(commit.author)), nodes.inline(text=" at "), nodes.emphasis(text=str(date_str)) ] if detailed_message: detailed_message = detailed_message.strip() if self.options.get('detailed-message-pre', False): item.append( nodes.literal_block(text=detailed_message)) else: item.append(nodes.paragraph(text=detailed_message)) list_node.append(item) return [list_node]
def run(self): env = self.state.document.settings.env config = env.config repodir = env.srcdir + '/' + config["git_repository_root"] doc_path = env.srcdir + '/' + env.docname + config["source_suffix"] if self.options.get('dir', False) == None: doc_path = '/'.join(doc_path.split('/')[:-1]) repo = Repo(repodir) commits = repo.iter_commits(paths=doc_path) l = nodes.bullet_list() revisions_to_display = self.options.get('revisions', 10) for commit in list(commits)[:revisions_to_display]: date_str = datetime.fromtimestamp(commit.authored_date) if '\n' in commit.message: message, detailed_message = commit.message.split('\n', 1) else: message = commit.message detailed_message = None item = nodes.list_item() item += [ nodes.strong(text=message), nodes.inline(text=" by "), nodes.emphasis(text=str(commit.author)), nodes.inline(text=" at "), nodes.emphasis(text=str(date_str)) ] if detailed_message: item.append(nodes.caption(text=detailed_message.strip())) l.append(item) return [l]
def run(self): env = self.state.document.settings.env repo = Repo(env.srcdir) commits = repo.iter_commits() l = nodes.bullet_list() for commit in list(commits)[:10]: date_str = datetime.fromtimestamp(commit.authored_date) if '\n' in commit.message: message, detailed_message = commit.message.split('\n', 1) else: message = commit.message detailed_message = None item = nodes.list_item() item += [ nodes.strong(text=message), nodes.inline(text=" by "), nodes.emphasis(text=str(commit.author)), nodes.inline(text=" at "), nodes.emphasis(text=str(date_str)) ] if detailed_message: item.append(nodes.caption(text=detailed_message.strip())) l.append(item) return [l]
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA env = inliner.document.settings.env if not typ: assert env.temp_data['default_role'] typ = env.temp_data['default_role'].lower() else: typ = typ.lower() text = utils.unescape(text) if typ == 'menuselection': text = text.replace('-->', u'\N{TRIANGULAR BULLET}') spans = _amp_re.split(text) node = nodes.inline(rawtext=rawtext) for i, span in enumerate(spans): span = span.replace('&&', '&') if i == 0: if len(span) > 0: textnode = nodes.Text(span) node += textnode continue accel_node = nodes.inline() letter_node = nodes.Text(span[0]) accel_node += letter_node accel_node['classes'].append('accelerator') node += accel_node textnode = nodes.Text(span[1:]) node += textnode node['classes'].append(typ) return [node], []
def run(self): node = addnodes.versionmodified() node.document = self.state.document node['type'] = 'deprecated-removed' version = (self.arguments[0], self.arguments[1]) node['version'] = version label = translators['sphinx'].gettext(self._label) text = label.format(deprecated=self.arguments[0], removed=self.arguments[1]) if len(self.arguments) == 3: inodes, messages = self.state.inline_text(self.arguments[2], self.lineno+1) para = nodes.paragraph(self.arguments[2], '', *inodes, translatable=False) node.append(para) else: messages = [] if self.content: self.state.nested_parse(self.content, self.content_offset, node) if len(node): if isinstance(node[0], nodes.paragraph) and node[0].rawsource: content = nodes.inline(node[0].rawsource, translatable=True) content.source = node[0].source content.line = node[0].line content += node[0].children node[0].replace_self(nodes.paragraph('', '', content, translatable=False)) node[0].insert(0, nodes.inline('', '%s: ' % text, classes=['versionmodified'])) else: para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified']), translatable=False) node.append(para) env = self.state.document.settings.env env.note_versionchange('deprecated', version[0], node, self.lineno) return [node] + messages
def _create_notes_paragraph(self, notes): """ Constructs a paragraph which represents the implementation notes The paragraph consists of text and clickable URL nodes if links were given in the notes. """ para = nodes.paragraph() # links could start with http:// or https:// link_idxs = [m.start() for m in re.finditer('https?://', notes)] start_idx = 0 for link_idx in link_idxs: # assume the notes start with text (could be empty) para.append(nodes.inline(text=notes[start_idx:link_idx])) # create a URL node until the next text or the end of the notes link_end_idx = notes.find(" ", link_idx) if link_end_idx == -1: # In case the notes end with a link without a blank link_end_idx = len(notes) uri = notes[link_idx:link_end_idx + 1] para.append(nodes.reference("", uri, refuri=uri)) start_idx = link_end_idx + 1 # get all text after the last link (could be empty) or all of the # text if no link was given para.append(nodes.inline(text=notes[start_idx:])) return para
def get_type_name(field_type): # We may be dealing with a forward-declared class. if isinstance(field_type, basestring) and field_type is not str: field_type = self.get_resource_class(field_type) if type(field_type) is list: return [nodes.inline(text='List of ')] + \ get_type_name(field_type[0]) elif type(field_type) is tuple: value_nodes = [] for value in field_type: if value_nodes: value_nodes.append(nodes.inline(text=', ')) value_nodes.append(nodes.literal(text=value)) return [nodes.inline(text='One of ')] + value_nodes elif (inspect.isclass(field_type) and issubclass(field_type, WebAPIResource)): return [get_ref_to_resource(field_type, False)] elif field_type in self.type_mapping: return [nodes.inline(text=self.type_mapping[field_type])] else: print "Unknown type %s" % (field_type,) assert False
def handle_signature(self, sig, signode): match = re.match(r'(\S+)\s+' r'(\(.*\)|)\s*' r'([^"]*?)\s*' r'(".*"|)\s*' r'$', sig + ' ') if not match: raise ValueError name, stack, flags, say = match.groups() self.stack = stack # Process flags text flags, messages = self.state.inline_text(flags, self.lineno) signode += extras(0) signode += addnodes.desc_name(name, name) signode += extras(1) for s in stack.split(): if s in ['(', '--', ')'] or s.endswith(':'): signode += nodes.inline(s, s) else: signode += nodes.emphasis(s, s) signode += nodes.inline(' ', ' ') signode += extras(2) signode += flags signode += nodes.inline(' ', ' ') signode += nodes.emphasis(say, say) signode += extras(3) return name
def make_nodes(): yield nodes.inline(left_space, left_space) for ttype, text in tokens: yield nodes.inline(text, text, classes=[ _get_ttype_class(ttype) ]) yield nodes.inline(right_space, right_space)
def handle_signature(self, sig, signode): m = sig.split() name = m[0] args = m[1:] if len(m) > 1 else [] for a in args: if a.startswith("t:"): signode += addnodes.literal_emphasis("", a[2:]) elif a.startswith("a:"): signode += addnodes.literal_emphasis("", a[2:]) elif a.startswith("op:"): signode += nodes.literal("", a[3:]) elif a.startswith("x:"): signode += nodes.emphasis("", a[2:].replace("-", " ")) elif a == "<sp>": signode += nodes.inline("", " ") else: signode += nodes.inline("", a) return name
def run(self): node = addnodes.versionmodified() node.document = self.state.document set_source_info(self, node) node['type'] = self.name node['version'] = self.arguments[0] text = versionlabels[self.name] % self.arguments[0] if len(self.arguments) == 2: inodes, messages = self.state.inline_text(self.arguments[1], self.lineno+1) para = nodes.paragraph(self.arguments[1], '', *inodes) set_source_info(self, para) node.append(para) else: messages = [] if self.content: self.state.nested_parse(self.content, self.content_offset, node) if len(node): if isinstance(node[0], nodes.paragraph) and node[0].rawsource: content = nodes.inline(node[0].rawsource, translatable=True) content.source = node[0].source content.line = node[0].line content += node[0].children node[0].replace_self(nodes.paragraph('', '', content)) node[0].insert(0, nodes.inline('', '%s: ' % text, classes=['versionmodified'])) else: para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified'])) node.append(para) env = self.state.document.settings.env # XXX should record node.source as well env.note_versionchange(node['type'], node['version'], node, node.line) return [node] + messages
def run(self): document = self.state.document env = document.settings.env series = env.temp_data[SERIES_KEY] app = env.app if not document.settings.file_insertion_enabled: msg = "File insertion disabled" app.warn(msg) error = nodes.error('', nodes.inline(text=msg)) error.lineno = self.lineno return [error] patch = next(series, None) if patch is None: msg = "No patch left in queue %s" % series.path app.warn(msg) warning = nodes.warning('', nodes.inline(text=msg)) warning.lineno = self.lineno return [warning] if 'hidden' in self.options: return [] doc_dir = os.path.dirname(env.doc2path(env.docname)) patch_root = nodes.container(classes=['pq-patch']) for fname, path, hunks in patch: patch_root.append(nodes.emphasis(text=fname)) relative_path = os.path.relpath(path, doc_dir) try: lang = pygments.lexers.guess_lexer_for_filename( fname, open(path, 'rb').read()).aliases[0] except pygments.util.ClassNotFound: lang = 'guess' patchlines = [] section = nodes.container(classes=['pq-section']) for hunk in hunks: patchlines.extend(line.rstrip('\n') for line in hunk.hunk) section.extend(self.run_hunk(hunk, relative_path, lang=lang)) patch_root.append(section) patch_root.append( nodes.container( '', *self.run_diff(patchlines), classes=['pq-diff'])) patch_root.append( nodes.container( '', *self.run_content(relative_path, lang=lang), classes=['pq-file'])) undepend(env, relative_path) return [patch_root]
def issue_nodelist(name, identifier=None): which = '[<span style="color: #%s;">%s</span>]' % ( ISSUE_TYPES[name], name.capitalize() ) signifier = [nodes.raw(text=which, format='html')] id_nodelist = [nodes.inline(text=" "), identifier] if identifier else [] trail = [] if identifier else [nodes.inline(text=" ")] return signifier + id_nodelist + [nodes.inline(text=":")] + trail
def issue_nodelist(name, link=None): which = '[<span style="color: #%s;">%s</span>]' % ( ISSUE_TYPES[name], name.capitalize() ) signifier = [nodes.raw(text=which, format='html')] hyperlink = [nodes.inline(text=" "), link] if link else [] trail = [] if link else [nodes.inline(text=" ")] return signifier + hyperlink + [nodes.inline(text=":")] + trail
def apply (self, **kwargs): iter_ = self.startnode.traverse (nodes.paragraph, siblings = 1) if len (iter_): para = iter_[0] iter_ = para.traverse (nodes.Text) details = self.startnode.details if len (iter_): textnode = iter_[0] charnode = spannode = restnode = None char = details['char'] if not textnode.startswith (char): error ("Dropcap: next paragraph doesn't start with: '%s'." % char) return span = details.get ('span', '') if not textnode.startswith (span): error ("Dropcap: next paragraph doesn't start with: '%s'." % span) return if span and not span.startswith (char): error ("Dropcap: span doesn't start with: '%s'." % char) return if span == char: span = '' if span: # split into char/span/rest restnode = nodes.Text (textnode.astext ()[len (span):]) spannode = nodes.inline () spannode.append (nodes.Text (textnode.astext ()[len (char):len (span)])) spannode['classes'].append ('dropspan') else: # split into char/rest restnode = nodes.Text (textnode.astext ()[len (char):]) spannode = nodes.inline ('', '') spannode['classes'].append ('dropspan') if (not self.document.settings.no_images) and ('image' in details): charnode = nodes.image () charnode['uri'] = details['image'] charnode['alt'] = char # debug ("Inserting image %s as dropcap." % uri) else: charnode = nodes.inline () charnode.append (nodes.Text (char)) # debug ("Inserting char %s as dropcap." % char) charnode['classes'].append ('dropcap') charnode.attributes.update (details) para.replace (textnode, [charnode, spannode, restnode]) self.startnode.parent.remove (self.startnode)
def doctree_read(app, doctree): # Add viewcode nodes for code elements referenced in the document. env = app.builder.env if not hasattr(env, '_viewcode_modules'): env._viewcode_modules = {} # handle desc (description) nodes (module contents) for objnode in doctree.traverse(addnodes.desc): if objnode.get('domain') != 'py': continue names = set() for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue modname = signode.get('module') if not modname: continue fullname = signode.get('fullname') if not _update_tags(env, modname, fullname): continue if fullname in names: # only one link per name, please continue names.add(fullname) pagename = '_modules/' + modname.replace('.', '/') # build up an xref and add it to the desc node onlynode = addnodes.only(expr='html') onlynode += addnodes.pending_xref( '', reftype='viewcode', refdomain='std', refexplicit=False, reftarget=pagename, refid=fullname, refdoc=env.docname) onlynode[0] += nodes.inline('', _('[source]'), classes=['viewcode-link']) signode += onlynode # handle index nodes (modules themselves) for objnode in doctree.traverse(addnodes.index): # extract module name by de-munging the "target" field index_target = objnode['entries'][0][2] if not index_target.startswith('module-'): continue modname = index_target.replace('module-', '', 1) _update_tags(env, modname) pagename = '_modules/' + modname.replace('.', '/') # build up an xref and add it in a new paragraph after the index node xref = addnodes.pending_xref( '', reftype='viewcode', refdomain='std', refexplicit=False, reftarget=pagename, refid='', refdoc=env.docname) xref += nodes.inline('', _('[source]'), classes=['viewcode-link']) idx = objnode.parent.index(objnode) + 1 objnode.parent.insert(idx, nodes.paragraph('', '', xref))
def param_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """ Handles the list of values accepted by the call. This is used to render both query parameters for GET calls and the valid body contents for POST calls. The text argument must contain three parts, separated by a comma: - parameter name - type - description If parameter name begins with ?, the description will be edited to indicate the parameter is optional. Example: id,str,uniquely identifies the repository """ # Split apart the parameter name, type, and description param_parts = text.split(',', 2) param_name = param_parts[0].strip() param_type = param_parts[1].strip().lower() param_description = param_parts[2].strip() role_nodes = [] # Handle name optional = param_name.startswith('?') if optional: param_name = param_name[1:] role_nodes.append(nodes.strong(text=param_name)) # Handle type if param_type != '': # Safety net in case the python types are specified type_translations = { 'str': 'string', 'int': 'number', 'dict': 'object', 'dictionary': 'object', 'list': 'array', 'bool': 'boolean', } param_type = type_translations.get(param_type, param_type) role_nodes.append(nodes.inline(text=' (%s) - ' % param_type)) # Handle description if param_description != '': if optional: role_nodes.append(nodes.emphasis(text='(optional) ')) param_description = _format_description(param_description) role_nodes.append(nodes.inline(text=param_description)) return role_nodes, []
def create_title_node(self, traceable): if traceable.has_title(): title_content = nodes.inline() title_content += nodes.literal(text=traceable.tag) title_content += nodes.inline(text=" -- ") title_content += nodes.inline(text=traceable.title) else: title_content = nodes.literal(text=traceable.tag) title_node = nodes.inline() title_node += title_content return [title_node]
def format(self, app, docname, node, traceables, options): additional_attributes = options.get("attributes") or [] columns = ["tag", "title"] + additional_attributes table = nodes.table() table["classes"].append("traceables-listtable") tgroup = nodes.tgroup(cols=len(columns), colwidths="auto") table += tgroup # Add column specifications. for attribute_name in columns: tgroup += nodes.colspec(colwidth=1) # Add heading row. thead = nodes.thead() tgroup += thead row = nodes.row() thead += row for attribute_name in columns: entry = nodes.entry() row += entry container = nodes.container() entry += container text = attribute_name.capitalize() inline = nodes.inline(text, text) container += inline # Add table body. tbody = nodes.tbody() tgroup += tbody for traceable in traceables: row = nodes.row() tbody += row for attribute_name in columns: entry = nodes.entry() row += entry if attribute_name == "tag": inline = nodes.inline() inline += traceable.make_reference_node( app.builder, docname) elif attribute_name == "title": text = traceable.title if traceable.has_title else "" inline = nodes.inline(text, text) else: text = traceable.attributes.get(attribute_name, "") inline = nodes.inline(text, text) entry += inline return table
class ResourceDirective(Directive): has_content = True required_arguments = 0 option_spec = { 'classname': directives.unchanged_required, 'is-list': directives.flag, 'hide-links': directives.flag, 'hide-examples': directives.flag, } item_http_methods = set(['GET', 'DELETE', 'PUT']) list_http_methods = set(['GET', 'POST']) FILTERED_MIMETYPES = [ 'application/json', 'application/xml', ] type_mapping = { int: 'Integer', str: 'String', unicode: 'String', bool: 'Boolean', dict: 'Dictionary', file: 'Uploaded File', } def run(self): try: resource_class = self.get_resource_class(self.options['classname']) except ResourceNotFound, e: return e.error_node # Add the class's file and this extension to the dependencies. self.state.document.settings.env.note_dependency(__file__) self.state.document.settings.env.note_dependency( sys.modules[resource_class.__module__].__file__) resource = get_resource_from_class(resource_class) is_list = 'is-list' in self.options docname = 'webapi2.0-%s-resource' % \ get_resource_docname(resource, is_list) resource_title = get_resource_title(resource, is_list) targetnode = nodes.target('', '', ids=[docname], names=[docname]) self.state.document.note_explicit_target(targetnode) main_section = nodes.section(ids=[docname]) # Main section main_section += nodes.title(text=resource_title) main_section += parse_text( self, inspect.getdoc(resource), where='%s class docstring' % self.options['classname']) if getattr(resource, 'required_features', False): required_features = nodes.important() required_features += nodes.inline( text='Using this resource requires extra features to be ' 'enabled on the server. See "Required Features" below.') main_section += required_features # Details section details_section = nodes.section(ids=['details']) main_section += details_section details_section += nodes.title(text='Details') details_section += self.build_details_table(resource) # Fields section if (resource.fields and (not is_list or resource.singleton)): fields_section = nodes.section(ids=['fields']) main_section += fields_section fields_section += nodes.title(text='Fields') fields_section += self.build_fields_table(resource.fields) # Links section if 'hide-links' not in self.options: fields_section = nodes.section(ids=['links']) main_section += fields_section fields_section += nodes.title(text='Links') fields_section += self.build_links_table(resource) # HTTP method descriptions for http_method in self.get_http_methods(resource, is_list): method_section = nodes.section(ids=[http_method]) main_section += method_section method_section += nodes.title(text='HTTP %s' % http_method) method_section += self.build_http_method_section(resource, http_method) if 'hide-examples' not in self.options: examples_section = nodes.section(ids=['examples']) examples_section += nodes.title(text='Examples') has_examples = False if is_list: mimetype_key = 'list' else: mimetype_key = 'item' for mimetype in resource.allowed_mimetypes: try: mimetype = mimetype[mimetype_key] except KeyError: continue if mimetype in self.FILTERED_MIMETYPES: # Resources have more specific mimetypes. We want to # filter out the general ones (like application/json) # so we don't show redundant examples. continue if mimetype.endswith('xml'): # JSON is preferred. While we support XML, let's not # continue to advertise it. continue url, headers, data = \ self.fetch_resource_data(resource, mimetype) example_node = build_example(headers, data, mimetype) if example_node: example_section = \ nodes.section(ids=['example_' + mimetype], classes=['examples', 'requests-example']) examples_section += example_section example_section += nodes.title(text=mimetype) accept_mimetype = mimetype if (mimetype.startswith('application/') and mimetype.endswith('+json')): # Instead of telling the user to ask for a specific # mimetype on the request, show them that asking for # application/json works fine. accept_mimetype = 'application/json' curl_text = ( '$ curl http://reviews.example.com%s -H "Accept: %s"' % (url, accept_mimetype) ) example_section += nodes.literal_block( curl_text, curl_text, classes=['cmdline']) example_section += nodes.literal_block( headers, headers, classes=['http-headers']) example_section += example_node has_examples = True if has_examples: main_section += examples_section return [targetnode, main_section]
def _deleted_role (role, rawtext, text, lineno, inliner, options={}, content=[]) : cssc = "deleted" if len (rawtext) > 60 else "deleted-inline" node = nodes.inline (rawtext, text, classes = [cssc], ** options) return [node], []
def nodes_for_arrow(self): """Create inline for an arrow""" arrow = nodes.inline(classes=["arrow", "arrow-closed"]) return [arrow]
def transform(self, node): # type: (nodes.Node) -> None """Transform a single field list *node*.""" typemap = self.typemap entries = [] groupindices = {} # type: Dict[unicode, int] types = {} # type: Dict[unicode, Dict] # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype[0:1].upper() + fieldtype[1:] if fieldarg: new_fieldname += ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = [ n for n in content if isinstance(n, nodes.Inline) or isinstance(n, nodes.Text) ] if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) translatable_content.document = fieldbody.parent.document translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) entry = typedesc.make_entry(fieldarg, [translatable_content]) group[1].append(entry) else: entry = typedesc.make_entry(fieldarg, [translatable_content]) entries.append([typedesc, entry]) # step 2: all entries are collected, construct the new field list new_list = nodes.field_list() for entry in entries: if isinstance(entry, nodes.field): # pass-through old field new_list += entry else: fieldtype, content = entry fieldtypes = types.get(fieldtype.name, {}) new_list += fieldtype.make_field(fieldtypes, self.domain, content) node.replace_self(new_list)
def visitAltblock(self, ctx:TacticNotationsParser.AltblockContext): return [nodes.inline('', '', *self.visitChildren(ctx), classes=['alternative-block'])]
def add_type_node(self, container, type): data_type_node = nodes.inline(text=type) data_type_node["classes"] = ["parameter__type"] container += [data_type_node]
def transform(self, node: nodes.field_list) -> None: """Transform a single field list *node*.""" typemap = self.typemap entries: List[Union[nodes.field, Tuple[Field, Any]]] = [] groupindices: Dict[str, int] = {} types: Dict[str, Dict] = {} # step 1: traverse all fields and collect field types and content for field in cast(List[nodes.field], node): assert len(field) == 2 field_name = cast(nodes.field_name, field[0]) field_body = cast(nodes.field_body, field[1]) try: # split into field type and argument fieldtype_name, fieldarg = field_name.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype_name, fieldarg = field_name.astext(), '' typedesc, is_typefield = typemap.get(fieldtype_name, (None, None)) # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(field_body): paragraph = cast(nodes.paragraph, field_body[0]) content = paragraph.children else: content = field_body.children # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype_name[0:1].upper( ) + fieldtype_name[1:] if fieldarg: new_fieldname += ' ' + fieldarg field_name[0] = nodes.Text(new_fieldname) entries.append(field) # but if this has a type then we can at least link it if (typedesc and is_typefield and content and len(content) == 1 and isinstance(content[0], nodes.Text)): typed_field = cast(TypedField, typedesc) target = content[0].astext() xrefs = typed_field.make_xrefs( typed_field.typerolename, self.directive.domain, target, contnode=content[0], env=self.directive.state.document.settings.env) if _is_single_paragraph(field_body): paragraph = cast(nodes.paragraph, field_body[0]) paragraph.clear() paragraph.extend(xrefs) else: field_body.clear() field_body += nodes.paragraph('', '', *xrefs) continue typename = typedesc.name # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = [ n for n in content if isinstance(n, nodes.Inline) or isinstance(n, nodes.Text) ] if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname translatable_content = nodes.inline(field_body.rawsource, translatable=True) translatable_content.document = field_body.parent.document translatable_content.source = field_body.parent.source translatable_content.line = field_body.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = cast(Tuple[Field, List], entries[groupindices[typename]]) else: groupindices[typename] = len(entries) group = (typedesc, []) entries.append(group) new_entry = typedesc.make_entry(fieldarg, [translatable_content]) group[1].append(new_entry) else: new_entry = typedesc.make_entry(fieldarg, [translatable_content]) entries.append((typedesc, new_entry)) # step 2: all entries are collected, construct the new field list new_list = nodes.field_list() for entry in entries: if isinstance(entry, nodes.field): # pass-through old field new_list += entry else: fieldtype, items = entry fieldtypes = types.get(fieldtype.name, {}) env = self.directive.state.document.settings.env new_list += fieldtype.make_field(fieldtypes, self.directive.domain, items, env=env) node.replace_self(new_list)
literal_block.line = 1 self.add_name(literal_block) if 'number-lines' in self.options: try: startline = int(self.options['number-lines'] or 1) except ValueError: raise self.error(':number-lines: with non-integer ' 'start value') endline = startline + len(include_lines) if text.endswith('\n'): text = text[:-1] tokens = NumberLines([([], text)], startline, endline) for classes, value in tokens: if classes: literal_block += nodes.inline(value, value, classes=classes) else: literal_block += nodes.Text(value, value) else: literal_block += nodes.Text(text, text) return [literal_block] if 'code' in self.options: self.options['source'] = path codeblock = CodeBlock( self.name, [self.options.pop('code')], # arguments self.options, include_lines, # content self.lineno, self.content_offset,
def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: node = nodes.inline(rawtext=self.rawtext, classes=[self.name]) node += nodes.Text(self.text) return [node], []
def _build_summary(self, matrix, content): """Constructs the docutils content for the summary of the support matrix. The summary consists of a giant table, with one row for each feature, and a column for each hypervisor driver. It provides an 'at a glance' summary of the status of each driver """ summarytitle = nodes.subtitle(text="Summary") summary = nodes.table() cols = len(matrix.targets.keys()) cols += 2 summarygroup = nodes.tgroup(cols=cols) summarybody = nodes.tbody() summaryhead = nodes.thead() for i in range(cols): summarygroup.append(nodes.colspec(colwidth=1)) summarygroup.append(summaryhead) summarygroup.append(summarybody) summary.append(summarygroup) content.append(summarytitle) content.append(summary) # This sets up all the column headers - two fixed # columns for feature name & status header = nodes.row() blank = nodes.entry() blank.append(nodes.emphasis(text="Feature")) header.append(blank) blank = nodes.entry() blank.append(nodes.emphasis(text="Status")) header.append(blank) summaryhead.append(header) # then one column for each hypervisor driver impls = matrix.targets.keys() impls.sort() for key in impls: target = matrix.targets[key] implcol = nodes.entry() header.append(implcol) implcol.append(nodes.strong(text=target.title)) # We now produce the body of the table, one row for # each feature to report on for feature in matrix.features: item = nodes.row() # the hyperlink target name linking to details id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # first the to fixed columns for title/status keycol = nodes.entry() item.append(keycol) keyref = nodes.reference(refid=id) keytxt = nodes.inline() keycol.append(keytxt) keytxt.append(keyref) keyref.append(nodes.strong(text=feature.title)) statuscol = nodes.entry() item.append(statuscol) statuscol.append( nodes.inline(text=feature.status, classes=["sp_feature_" + feature.status])) # and then one column for each hypervisor driver impls = matrix.targets.keys() impls.sort() for key in impls: target = matrix.targets[key] impl = feature.implementations[key] implcol = nodes.entry() item.append(implcol) id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) implref = nodes.reference(refid=id) impltxt = nodes.inline() implcol.append(impltxt) impltxt.append(implref) status = "" if impl.status == "complete": status = u"\u2714" elif impl.status == "missing": status = u"\u2716" elif impl.status == "partial": status = u"\u2714" implref.append( nodes.literal( text=status, classes=["sp_impl_summary", "sp_impl_" + impl.status])) summarybody.append(item)
def visitCurlies(self, ctx: TacticNotationsParser.CurliesContext): sp = nodes.inline('', '', classes=["curlies"]) sp += nodes.Text("{") sp.extend(self.visitChildren(ctx)) sp += nodes.Text("}") return [sp]
def _hacked_transform(typemap, node): """ Taken from docfields.py from sphinx. This does all the steps around gathering data, but doesn't actually do the node transformations. """ entries = [] groupindices = {} types = {} # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype[0:1].upper() + fieldtype[1:] if fieldarg: new_fieldname += ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = [n for n in content if isinstance(n, nodes.Inline) or isinstance(n, nodes.Text)] if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) entry = typedesc.make_entry(fieldarg, [translatable_content]) group[1].append(entry) else: entry = typedesc.make_entry(fieldarg, [translatable_content]) entries.append([typedesc, entry]) return (entries, types)
def doctree_read(app: Sphinx, doctree: Node) -> None: env = app.builder.env if not hasattr(env, '_viewcode_modules'): env._viewcode_modules = {} # type: ignore if app.builder.name == "singlehtml": return if app.builder.name.startswith("epub") and not env.config.viewcode_enable_epub: return def has_tag(modname: str, fullname: str, docname: str, refname: str) -> bool: entry = env._viewcode_modules.get(modname, None) # type: ignore if entry is False: return False code_tags = app.emit_firstresult('viewcode-find-source', modname) if code_tags is None: try: analyzer = ModuleAnalyzer.for_module(modname) analyzer.find_tags() except Exception: env._viewcode_modules[modname] = False # type: ignore return False code = analyzer.code tags = analyzer.tags else: code, tags = code_tags if entry is None or entry[0] != code: entry = code, tags, {}, refname env._viewcode_modules[modname] = entry # type: ignore _, tags, used, _ = entry if fullname in tags: used[fullname] = docname return True return False for objnode in doctree.traverse(addnodes.desc): if objnode.get('domain') != 'py': continue names = set() # type: Set[str] for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue modname = signode.get('module') fullname = signode.get('fullname') refname = modname if env.config.viewcode_follow_imported_members: new_modname = app.emit_firstresult( 'viewcode-follow-imported', modname, fullname, ) if not new_modname: new_modname = _get_full_modname(app, modname, fullname) modname = new_modname if not modname: continue fullname = signode.get('fullname') if not has_tag(modname, fullname, env.docname, refname): continue if fullname in names: # only one link per name, please continue names.add(fullname) pagename = '_modules/' + modname.replace('.', '/') inline = nodes.inline('', _('[source]'), classes=['viewcode-link']) onlynode = addnodes.only(expr='html') onlynode += addnodes.pending_xref('', inline, reftype='viewcode', refdomain='std', refexplicit=False, reftarget=pagename, refid=fullname, refdoc=env.docname) signode += onlynode
def cite(self, cmd, refuri, global_keys=None): """ Return a docutils Node consisting of properly formatted citations children nodes. """ if global_keys is not None: self.global_keys = global_keys bo, bc = self.config['brackets'] sep = u'%s ' % self.config['separator'] style = self.config['style'] all_auths = (cmd.endswith('s')) alt = (cmd.startswith('alt') or \ (cmd.startswith('alp')) or \ (style == 'citeyear')) if (cmd.startswith('p') or cmd == 'yearpar') and style != 'super': node = nodes.inline(bo, bo, classes=['citation']) else: node = nodes.inline('', '', classes=['citation']) if self.pre: pre = u"%s " % self.pre.decode('latex') node += nodes.inline(pre, pre, classes=['pre']) for i, ref in enumerate(self.refs): authors = ref.persons.get('author', []) author_text = self.get_author(authors, all_auths).decode('latex') lrefuri = refuri + '#citation-' + nodes.make_id(ref.key) if i > 0 and i < len(self.refs): if style == "authoryear": node += nodes.inline(sep, sep) else: if style == "super": node += nodes.superscript(', ', ', ') else: node += nodes.inline(', ', ', ') if cmd == 'title': title = ref.fields.get('title') if title is None: title = ref.fields.get('key', '') author_text = title if (style == "authoryear" and (cmd.startswith('p') or cmd.startswith('alp'))) or \ (cmd.startswith('t') or cmd.startswith('alt') or cmd.startswith('author')): node += nodes.reference(author_text, author_text, internal=True, refuri=lrefuri) if cmd.startswith('p') or cmd.startswith('alp'): node += nodes.inline(', ', ', ') else: node += nodes.inline(' ', ' ') # Add in either the year or the citation number if cmd == 'title': pass elif cmd.startswith('author'): pass else: if style != 'authoryear': num = self.get_ref_num(ref.key) else: num = ref.fields.get('year') refnode = nodes.reference(str(num), str(num), internal=True, refuri=lrefuri) if cmd.startswith('t') and style != 'super': node += nodes.inline(bo, bo) if style == 'super': node += nodes.superscript('', '', refnode) else: node += refnode if cmd.startswith('t') and style != 'super': node += nodes.inline(bc, bc) if self.post: post = u", %s" % self.post.decode('latex') node += nodes.inline(post, post, classes=['post']) if (cmd.startswith('p') or cmd == 'yearpar') and style != 'super': node += nodes.inline(bc, bc, classes=['citation']) return node
def highlight_using_coqdoc(sentence): """Lex sentence using coqdoc, and yield inline nodes for each token""" tokens = coqdoc.lex(utils.unescape(sentence, 1)) for classes, value in tokens: yield nodes.inline(value, value, classes=classes)
def get_reference_node(self, ref): node = nodes.inline('', '', classes=[ref.type, 'reference']) namestyler = pybtex.style.names.plain.NameStyle() plaintext = pybtex.backends.plaintext.Backend() # Authors authors = ref.persons.get('author', []) for i, author in enumerate(authors): authortext = namestyler.format( author, abbr=True).format().render(plaintext) authortext = authortext.replace('{', '') authortext = authortext.replace('}', '') authortext = authortext.decode('latex') text = authortext text = text.strip() auth_node = latex_to_nodes(text) auth_node['classes'].append('author') node += auth_node if i + 1 < len(authors): node += nodes.inline(', ', ', ') else: ending = '%s ' % ('' if text.endswith('.') else '.') node += nodes.inline(ending, ending) # Title title = ref.fields.get('title') if title is None: title = ref.fields.get('key') if title: title = title.decode('latex') title = title.replace('{', '') title = title.replace('}', '') node += nodes.inline(title, title, classes=['title']) node += nodes.inline('. ', '. ') # @phdthesis if ref.type == 'phdthesis': school = ref.fields.get('school') school = school.decode('latex') text = 'PhD Thesis, %s, ' % school node += nodes.inline(text, text) # Publication pub = ref.fields.get('journal') if not pub: pub = ref.fields.get('booktitle') if pub: pub = pub.decode('latex') pub = pub.replace('{', '') pub = pub.replace('}', '') node += nodes.emphasis(pub, pub, classes=['publication']) node += nodes.inline(', ', ', ') vol = ref.fields.get('volume') pages = ref.fields.get('pages') year = ref.fields.get('year') if pub is None: howpub = ref.fields.get('howpublished') if howpub is not None and howpub.startswith('\url{'): url = howpub[5:-1] refnode = nodes.reference('', '', internal=False, refuri=url) refnode += nodes.Text(url, url) node += refnode if vol or pages or year: node += nodes.inline(', ', ', ') if vol: vol = vol.decode('latex') node += nodes.inline(vol, vol, classes=['volume']) node += nodes.inline(':', ':') if pages: pages = pages.decode('latex') node += nodes.inline(pages, pages, classes=['pages']) node += nodes.inline(', ', ', ') if year: year = year.decode('latex') node += nodes.inline(year, year, classes=['year']) node += nodes.inline('.', '.') return node
def doctree_read(app, doctree): # type: (Sphinx, nodes.Node) -> None env = app.builder.env if not hasattr(env, '_viewcode_modules'): env._viewcode_modules = {} # type: ignore if app.builder.name == "singlehtml": return if app.builder.name.startswith( "epub") and not env.config.viewcode_enable_epub: return def has_tag(modname, fullname, docname, refname): entry = env._viewcode_modules.get(modname, None) # type: ignore try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False # type: ignore return if not isinstance(analyzer.code, text_type): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code if entry is False: return elif entry is None or entry[0] != code: analyzer.find_tags() entry = code, analyzer.tags, {}, refname env._viewcode_modules[modname] = entry # type: ignore _, tags, used, _ = entry if fullname in tags: used[fullname] = docname return True for objnode in doctree.traverse(addnodes.desc): if objnode.get('domain') != 'py': continue names = set() # type: Set[unicode] for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue modname = signode.get('module') fullname = signode.get('fullname') refname = modname if env.config.viewcode_import: modname = _get_full_modname(app, modname, fullname) if not modname: continue fullname = signode.get('fullname') if not has_tag(modname, fullname, env.docname, refname): continue if fullname in names: # only one link per name, please continue names.add(fullname) pagename = '_modules/' + modname.replace('.', '/') onlynode = addnodes.only(expr='html') onlynode += addnodes.pending_xref('', reftype='viewcode', refdomain='std', refexplicit=False, reftarget=pagename, refid=fullname, refdoc=env.docname) onlynode[0] += nodes.inline('', _('[source]'), classes=['viewcode-link']) signode += onlynode
def build_details_table(self, resource): is_list = 'is-list' in self.options table = nodes.table() tgroup = nodes.tgroup(cols=2) table += tgroup tgroup += nodes.colspec(colwidth=30) tgroup += nodes.colspec(colwidth=70) tbody = nodes.tbody() tgroup += tbody # Name if is_list: resource_name = resource.name_plural else: resource_name = resource.name append_detail_row(tbody, "Name", nodes.literal(text=resource_name)) # URI uri_template = get_resource_uri_template(resource, not is_list) append_detail_row(tbody, "URI", nodes.literal(text=uri_template)) # URI Parameters #append_detail_row(tbody, "URI Parameters", '') # Description append_detail_row(tbody, "Description", parse_text(self, inspect.getdoc(resource))) # HTTP Methods allowed_http_methods = self.get_http_methods(resource, is_list) bullet_list = nodes.bullet_list() for http_method in allowed_http_methods: item = nodes.list_item() bullet_list += item paragraph = nodes.paragraph() item += paragraph ref = nodes.reference(text=http_method, refid=http_method) paragraph += ref doc_summary = self.get_doc_for_http_method(resource, http_method) i = doc_summary.find('.') if i != -1: doc_summary = doc_summary[:i + 1] paragraph += nodes.inline(text=" - ") paragraph += parse_text(self, doc_summary, nodes.inline) append_detail_row(tbody, "HTTP Methods", bullet_list) # Parent Resource if is_list or resource.uri_object_key is None: parent_resource = resource._parent_resource is_parent_list = False else: parent_resource = resource is_parent_list = True if parent_resource: paragraph = nodes.paragraph() paragraph += get_ref_to_resource(parent_resource, is_parent_list) else: paragraph = 'None.' append_detail_row(tbody, "Parent Resource", paragraph) # Child Resources if is_list: child_resources = list(resource.list_child_resources) if resource.name != resource.name_plural: if resource.uri_object_key: child_resources.append(resource) are_children_lists = False else: are_children_lists = True else: child_resources = resource.item_child_resources are_children_lists = True if child_resources: tocnode = addnodes.toctree() tocnode['glob'] = None tocnode['maxdepth'] = 1 tocnode['hidden'] = False docnames = sorted([ docname_join(self.state.document.settings.env.docname, get_resource_docname(child_resource, are_children_lists)) for child_resource in child_resources ]) tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docname) for docname in docnames] else: tocnode = nodes.paragraph(text="None") append_detail_row(tbody, "Child Resources", tocnode) # Anonymous Access if is_list and not resource.singleton: getter = resource.get_list else: getter = resource.get if getattr(getter, 'login_required', False): anonymous_access = 'No' elif getattr(getter, 'checks_login_required', False): anonymous_access = 'Yes, if anonymous site access is enabled' else: anonymous_access = 'Yes' append_detail_row(tbody, "Anonymous Access", anonymous_access) return table
def _build_details(self, matrix, content): """Constructs the docutils content for the details of the support matrix. This is generated as a bullet list of features. Against each feature we provide the description of the feature and then the details of the hypervisor impls, with any driver specific notes that exist """ detailstitle = nodes.subtitle(text="Details") details = nodes.bullet_list() content.append(detailstitle) content.append(details) # One list entry for each feature we're reporting on for feature in matrix.features: item = nodes.list_item() status = feature.status if feature.group is not None: status += "(" + feature.group + ")" # The hypervisor target name linked from summary table id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # Highlight the feature title name item.append(nodes.strong(text=feature.title, ids=[id])) para = nodes.paragraph() para.append(nodes.strong(text="Status: " + status + ". ")) if feature.notes is not None: para.append(nodes.inline(text=feature.notes)) item.append(para) if feature.cli: item.append(self._create_cli_paragraph(feature)) para_divers = nodes.paragraph() para_divers.append(nodes.strong(text="drivers:")) # A sub-list giving details of each hypervisor target impls = nodes.bullet_list() for key in feature.implementations: target = matrix.targets[key] impl = feature.implementations[key] subitem = nodes.list_item() id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) subitem += [ nodes.strong(text=target.title + ": "), nodes.literal(text=impl.status, classes=["sp_impl_" + impl.status], ids=[id]), ] if impl.notes is not None: subitem.append(self._create_notes_paragraph(impl.notes)) impls.append(subitem) para_divers.append(impls) item.append(para_divers) details.append(item)
def build_fields_table(self, fields, required_fields={}, show_requirement_labels=False): def get_type_name(field_type): # We may be dealing with a forward-declared class. if isinstance(field_type, basestring) and field_type is not str: field_type = self.get_resource_class(field_type) if type(field_type) is list: return [nodes.inline(text='List of ')] + \ get_type_name(field_type[0]) elif type(field_type) is tuple: value_nodes = [] for value in field_type: if value_nodes: value_nodes.append(nodes.inline(text=', ')) value_nodes.append(nodes.literal(text=value)) return [nodes.inline(text='One of ')] + value_nodes elif (inspect.isclass(field_type) and issubclass(field_type, WebAPIResource)): return [get_ref_to_resource(field_type, False)] elif field_type in self.type_mapping: return [nodes.inline(text=self.type_mapping[field_type])] else: print "Unknown type %s" % (field_type,) assert False table = nodes.table() tgroup = nodes.tgroup(cols=3) table += tgroup tgroup += nodes.colspec(colwidth=25) tgroup += nodes.colspec(colwidth=15) tgroup += nodes.colspec(colwidth=60) thead = nodes.thead() tgroup += thead append_row(thead, ['Field', 'Type', 'Description']) tbody = nodes.tbody() tgroup += tbody if isinstance(fields, dict): for field in sorted(fields.iterkeys()): info = fields[field] name_node = nodes.inline() name_node += nodes.strong(text=field) if show_requirement_labels: if field in required_fields: name_node += nodes.inline(text=" (required)") else: name_node += nodes.inline(text=" (optional)") type_node = nodes.inline() type_node += get_type_name(info['type']) append_row(tbody, [name_node, type_node, parse_text(self, info['description'])]) else: for field in sorted(fields): name = field if show_requirement_labels: if field in required_fields: name += " (required)" else: name += " (optional)" append_row(tbody, [name, "", ""]) return table
def code_block_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """Parse and classify content of a code_block.""" if 'include' in options: try: if 'encoding' in options: encoding = options['encoding'] else: encoding = 'utf-8' content = codecs.open(options['include'], 'r', encoding).read().rstrip() except (IOError, UnicodeError): # no file or problem finding it or reading it log.error('Error reading file: "%s" L %s' % (options['include'], lineno)) content = u'' line_offset = 0 if content: # here we define the start-at and end-at options # so that limit is included in extraction # this is different than the start-after directive of docutils # (docutils/parsers/rst/directives/misc.py L73+) # which excludes the beginning # the reason is we want to be able to define a start-at like # def mymethod(self) # and have such a definition included after_text = options.get('start-at', None) if after_text: # skip content in include_text before *and NOT incl.* a matching text after_index = content.find(after_text) if after_index < 0: raise state_machine.reporter.severe( 'Problem with "start-at" option of "%s" ' 'code-block directive:\nText not found.' % options['start-at']) # patch mmueller start # Move the after_index to the beginning of the line with the # match. for char in content[after_index:0:-1]: # codecs always opens binary. This works with '\n', '\r' and # '\r\n'. We are going backwards, so '\n' is found first # in '\r\n'. # Going with .splitlines() seems more appropriate # but needs a few more changes. if char == u'\n' or char == u'\r': break after_index -= 1 # patch mmueller end content = content[after_index:] line_offset = len(content[:after_index].splitlines()) after_text = options.get('start-after', None) if after_text: # skip content in include_text before *and incl.* a matching text after_index = content.find(after_text) if after_index < 0: raise state_machine.reporter.severe( 'Problem with "start-after" option of "%s" ' 'code-block directive:\nText not found.' % options['start-after']) line_offset = len(content[:after_index + len(after_text)].splitlines()) content = content[after_index + len(after_text):] # same changes here for the same reason before_text = options.get('end-at', None) if before_text: # skip content in include_text after *and incl.* a matching text before_index = content.find(before_text) if before_index < 0: raise state_machine.reporter.severe( 'Problem with "end-at" option of "%s" ' 'code-block directive:\nText not found.' % options['end-at']) content = content[:before_index + len(before_text)] before_text = options.get('end-before', None) if before_text: # skip content in include_text after *and NOT incl.* a matching text before_index = content.find(before_text) if before_index < 0: raise state_machine.reporter.severe( 'Problem with "end-before" option of "%s" ' 'code-block directive:\nText not found.' % options['end-before']) content = content[:before_index] else: line_offset = options.get('linenos_offset') content = u'\n'.join(content) if 'tabsize' in options: tabw = options['tabsize'] else: tabw = int(options.get('tab-width', 8)) content = content.replace('\t', ' ' * tabw) withln = "linenos" in options if not "linenos_offset" in options: line_offset = 0 language = arguments[0] # create a literal block element and set class argument code_block = nodes.literal_block(classes=["code", language]) if withln: lineno = 1 + line_offset total_lines = content.count('\n') + 1 + line_offset lnwidth = len(str(total_lines)) fstr = "\n%%%dd " % lnwidth code_block += nodes.inline(fstr[1:] % lineno, fstr[1:] % lineno, classes=['linenumber']) # parse content with pygments and add to code_block element for cls, value in DocutilsInterface(content, language, options): if withln and "\n" in value: # Split on the "\n"s values = value.split("\n") # The first piece, pass as-is code_block += nodes.Text(values[0], values[0]) # On the second and later pieces, insert \n and linenos linenos = list(range(lineno, lineno + len(values))) for chunk, ln in zip(values, linenos)[1:]: if ln <= total_lines: code_block += nodes.inline(fstr % ln, fstr % ln, classes=['linenumber']) code_block += nodes.Text(chunk, chunk) lineno += len(values) - 1 elif cls in unstyled_tokens: # insert as Text to decrease the verbosity of the output. code_block += nodes.Text(value, value) else: code_block += nodes.inline(value, value, classes=["pygments-" + cls]) return [code_block]
def dox(name, rawtext, text, lineno, inliner: Inliner, options={}, content=[]): title, target, hash = parse_link(text) # Otherwise adding classes to the options behaves globally (uh?) _options = dict(options) set_classes(_options) # Avoid assert on adding to undefined member later if 'classes' not in _options: _options['classes'] = [] # Try linking to the whole docs first for basename, url, css_classes in tagfile_basenames: if basename == target: if not title: # TODO: extract title from index page in the tagfile logger.warning( "Link to main page `{}` requires a title".format(target)) title = target _options['classes'] += css_classes node = nodes.reference(rawtext, title, refuri=url + hash, **_options) return [node], [] for prefix in symbol_prefixes: if prefix + target in symbol_mapping: link_title, url, css_classes = symbol_mapping[prefix + target] if title: use_title = title elif link_title: use_title = link_title else: if link_title is not None: logger.warning( "Doxygen anchor `{}` has no title, using its ID as link title" .format(target)) use_title = target _options['classes'] += css_classes node = nodes.reference(rawtext, use_title, refuri=url + hash, **_options) return [node], [] # TODO: print file and line #msg = inliner.reporter.warning( #'Doxygen symbol %s not found' % target, line=lineno) #prb = inliner.problematic(rawtext, rawtext, msg) if title: logger.warning( "Doxygen symbol `{}` not found, rendering just link title".format( target)) node = nodes.inline(rawtext, title, **_options) else: logger.warning( "Doxygen symbol `{}` not found, rendering as monospace".format( target)) node = nodes.literal(rawtext, target, **_options) return [node], []
def run(self): from docutils.parsers.rst.directives.body import CodeBlock, NumberLines if not self.document.settings.file_insertion_enabled: raise DirectiveError(2, 'Directive "{}" disabled.'.format(self.name)) source_dir = Path(self.document["source"]).absolute().parent include_arg = "".join( [s.strip() for s in self.arguments[0].splitlines()]) if include_arg.startswith("<") and include_arg.endswith(">"): # # docutils "standard" includes path = Path(self.klass.standard_include_path).joinpath( include_arg[1:-1]) else: # if using sphinx interpret absolute paths "correctly", # i.e. relative to source directory try: sphinx_env = self.document.settings.env _, include_arg = sphinx_env.relfn2path(self.arguments[0]) sphinx_env.note_included(include_arg) except AttributeError: pass path = Path(include_arg) path = source_dir.joinpath(path) # read file encoding = self.options.get("encoding", self.document.settings.input_encoding) error_handler = self.document.settings.input_encoding_error_handler # tab_width = self.options.get("tab-width", self.document.settings.tab_width) try: file_content = path.read_text(encoding=encoding, errors=error_handler) except Exception as error: raise DirectiveError( 4, 'Directive "{}": error reading file: {}\n{}.'.format( self.name, path, error), ) # get required section of text startline = self.options.get("start-line", None) endline = self.options.get("end-line", None) file_content = "\n".join(file_content.splitlines()[startline:endline]) startline = startline or 0 for split_on_type in ["start-after", "end-before"]: split_on = self.options.get(split_on_type, None) if not split_on: continue split_index = file_content.find(split_on) if split_index < 0: raise DirectiveError( 4, 'Directive "{}"; option "{}": text not found "{}".'.format( self.name, split_on_type, split_on), ) if split_on_type == "start-after": startline += split_index + len(split_on) file_content = file_content[split_index + len(split_on):] else: file_content = file_content[:split_index] if "literal" in self.options: literal_block = nodes.literal_block(file_content, source=str(path), classes=self.options.get( "class", [])) literal_block.line = 1 # TODO don;t think this should be 1? self.add_name(literal_block) if "number-lines" in self.options: try: startline = int(self.options["number-lines"] or 1) except ValueError: raise DirectiveError( 3, ":number-lines: with non-integer " "start value") endline = startline + len(file_content.splitlines()) if file_content.endswith("\n"): file_content = file_content[:-1] tokens = NumberLines([([], file_content)], startline, endline) for classes, value in tokens: if classes: literal_block += nodes.inline(value, value, classes=classes) else: literal_block += nodes.Text(value) else: literal_block += nodes.Text(file_content) return [literal_block] if "code" in self.options: self.options["source"] = str(path) state_machine = MockStateMachine(self.renderer, self.lineno) state = MockState(self.renderer, state_machine, self.lineno) codeblock = CodeBlock( name=self.name, arguments=[self.options.pop("code")], options=self.options, content=file_content.splitlines(), lineno=self.lineno, content_offset=0, block_text=file_content, state=state, state_machine=state_machine, ) return codeblock.run() # Here we perform a nested render, but temporarily setup the document/reporter # with the correct document path and lineno for the included file. source = self.renderer.document["source"] rsource = self.renderer.reporter.source line_func = getattr(self.renderer.reporter, "get_source_and_line", None) try: self.renderer.document["source"] = str(path) self.renderer.reporter.source = str(path) self.renderer.reporter.get_source_and_line = lambda l: (str(path), l) if "relative-images" in self.options: self.renderer.config["relative-images"] = os.path.relpath( path.parent, source_dir) if "relative-docs" in self.options: self.renderer.config["relative-docs"] = ( self.options["relative-docs"], source_dir, path.parent, ) self.renderer.nested_render_text(file_content, startline + 1) finally: self.renderer.document["source"] = source self.renderer.reporter.source = rsource self.renderer.config.pop("relative-images", None) self.renderer.config.pop("relative-docs", None) if line_func is not None: self.renderer.reporter.get_source_and_line = line_func else: del self.renderer.reporter.get_source_and_line return []
def apply(self): doc = self.document meta = doc.meta_block defs = doc.substitution_defs def getone(name, default=None): """ Get first value. """ if name in meta: return meta[name][0] return default def getmany(name, default=[]): """ Get list of all values. """ return meta.get(name, default) def sub(var, nodes): var.replace_self(nodes) title = getone('DC.Title', 'No Title') short_title = getone('PG.Title', title) short_title = short_title.split('\n', 1)[0] language = getmany('DC.Language', ['en']) language = [ DublinCore.language_map.get(x, 'Unknown').title() for x in language ] language = DublinCore.strunk(language) copyrighted = getone('PG.Rights', '').lower() == 'copyrighted' for variable in doc.traverse(mynodes.variable): name = variable['name'] if name == 'pg.upcase-title': sub(variable, [nodes.inline('', short_title.upper())]) elif name == 'pg.produced-by': producers = getmany('PG.Producer') if producers: sub(variable, [ nodes.inline( '', 'Produced by %s.' % DublinCore.strunk(producers)) ]) else: sub(variable, []) elif name == 'pg.credits': sub(variable, [nodes.inline('', getone('PG.Credits', ''))]) elif name == 'pg.bibrec-url': url = '%sebooks/%s' % (PG_URL, getone('PG.Id', '999999')) sub(variable, [ nodes.reference('', '', nodes.inline('', url), refuri=url) ]) elif name in ('pg.copyrighted-header', 'pg.copyrighted-footer'): if copyrighted: subdef_copy = defs[name].deepcopy() sub(variable, subdef_copy.children) else: sub(variable, []) elif name == 'pg.machine-header': tw = textwrap.TextWrapper(width=72, initial_indent='Title: ', subsequent_indent=' ' * 7) if '\n' in title: maintitle, subtitle = title.split('\n', 1) s = tw.fill(maintitle) s += '\n' tw.initial_indent = tw.subsequent_indent s += tw.fill(subtitle) else: s = tw.fill(title) s += '\n\n' tw.initial_indent = 'Author: ' tw.subsequent_indent = ' ' * 8 s += tw.fill( DublinCore.strunk(getmany('DC.Creator', ['Unknown']))) s += '\n\n' date = getone('PG.Released', '') try: date = datetime.datetime.strptime(date, '%Y-%m-%d') date = datetime.datetime.strftime(date, '%B %d, %Y') except ValueError: date = 'unknown date' s += 'Release Date: %s [EBook #%s]\n' % ( date, getone('PG.Id', '999999')) for item in getmany('PG.Reposted', []): try: date, comment = item.split(None, 1) except ValueError: date = item comment = None try: date = datetime.datetime.strptime(date, '%Y-%m-%d') date = datetime.datetime.strftime(date, '%B %d, %Y') except ValueError: date = 'unknown date' s += 'Reposted: %s' % date if comment: s += ' [%s]' % comment s += '\n' s += '\nLanguage: %s\n\n' % language s += 'Character set encoding: %s' % doc.settings.encoding.upper( ) sub(variable, [nodes.inline('', nodes.Text(s))])
def _quoted_role \ (role, rawtext, text, lineno, inliner, options={}, content=[]) : ql, qr = _quot_map.get (role) or _quot_map ["qd"] text = "%s%s%s" % (_T (ql), text, _T (qr)) node = nodes.inline (rawtext, text, ** options) return [node], []
def add_visible_links(self, tree, show_urls='inline'): """Add visible link targets for external links""" def make_footnote_ref(doc, label): """Create a footnote_reference node with children""" footnote_ref = nodes.footnote_reference('[#]_') footnote_ref.append(nodes.Text(label)) doc.note_autofootnote_ref(footnote_ref) return footnote_ref def make_footnote(doc, label, uri): """Create a footnote node with children""" footnote = nodes.footnote(uri) para = nodes.paragraph() para.append(nodes.Text(uri)) footnote.append(para) footnote.insert(0, nodes.label('', label)) doc.note_autofootnote(footnote) return footnote def footnote_spot(tree): """Find or create a spot to place footnotes. The function returns the tuple (parent, index).""" # The code uses the following heuristic: # a) place them after the last existing footnote # b) place them after an (empty) Footnotes rubric # c) create an empty Footnotes rubric at the end of the document fns = tree.traverse(nodes.footnote) if fns: fn = fns[-1] return fn.parent, fn.parent.index(fn) + 1 for node in tree.traverse(nodes.rubric): if len(node.children) == 1 and \ node.children[0].astext() == FOOTNOTES_RUBRIC_NAME: return node.parent, node.parent.index(node) + 1 doc = tree.traverse(nodes.document)[0] rub = nodes.rubric() rub.append(nodes.Text(FOOTNOTES_RUBRIC_NAME)) doc.append(rub) return doc, doc.index(rub) + 1 if show_urls == 'no': return if show_urls == 'footnote': doc = tree.traverse(nodes.document)[0] fn_spot, fn_idx = footnote_spot(tree) nr = 1 for node in tree.traverse(nodes.reference): uri = node.get('refuri', '') if (uri.startswith('http:') or uri.startswith('https:') or uri.startswith('ftp:')) and uri not in node.astext(): idx = node.parent.index(node) + 1 if show_urls == 'inline': uri = self.link_target_template % {'uri': uri} link = nodes.inline(uri, uri) link['classes'].append(self.css_link_target_class) node.parent.insert(idx, link) elif show_urls == 'footnote': label = FOOTNOTE_LABEL_TEMPLATE % nr nr += 1 footnote_ref = make_footnote_ref(doc, label) node.parent.insert(idx, footnote_ref) footnote = make_footnote(doc, label, uri) fn_spot.insert(fn_idx, footnote) footnote_ref['refid'] = footnote['ids'][0] footnote.add_backref(footnote_ref['ids'][0]) fn_idx += 1
def _char_entity \ (role, rawtext, text, lineno, inliner, options={}, content=[]) : char = _char_entity_map.get (text, text) node = nodes.inline (rawtext, char, ** options) return [node], []
def _render_annotation(self, signode): if self.annotation: annot_node = nodes.inline(self.annotation, self.annotation, classes=['sigannot']) signode += addnodes.desc_annotation(self.annotation, '', annot_node) signode += nodes.Text(' ')
def run(self): """Include a file as part of the content of this reST file.""" if not self.state.document.settings.file_insertion_enabled: raise self.warning('"%s" directive disabled.' % self.name) source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) path = directives.path(self.arguments[0]) if path.startswith('<') and path.endswith('>'): path = os.path.join(self.standard_include_path, path[1:-1]) path = os.path.normpath(os.path.join(source_dir, path)) path = utils.relative_path(None, path) path = nodes.reprunicode(path) encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) tab_width = self.options.get( 'tab-width', self.state.document.settings.tab_width) try: self.state.document.settings.record_dependencies.add(path) include_file = io.FileInput( source_path=path, encoding=encoding, error_handler=(self.state.document.settings.\ input_encoding_error_handler), handle_io_errors=None) except UnicodeEncodeError as error: raise self.severe('Problems with "%s" directive path:\n' 'Cannot encode input file path "%s" ' '(wrong locale?).' % (self.name, SafeString(path))) except IOError as error: raise self.severe('Problems with "%s" directive path:\n%s.' % (self.name, ErrorString(error))) startline = self.options.get('start-line', None) endline = self.options.get('end-line', None) try: if startline or (endline is not None): lines = include_file.readlines() rawtext = ''.join(lines[startline:endline]) else: rawtext = include_file.read() except UnicodeError as error: raise self.severe('Problem with "%s" directive:\n%s' % (self.name, ErrorString(error))) # start-after/end-before: no restrictions on newlines in match-text, # and no restrictions on matching inside lines vs. line boundaries after_text = self.options.get('start-after', None) if after_text: # skip content in rawtext before *and incl.* a matching text after_index = rawtext.find(after_text) if after_index < 0: raise self.severe('Problem with "start-after" option of "%s" ' 'directive:\nText not found.' % self.name) rawtext = rawtext[after_index + len(after_text):] before_text = self.options.get('end-before', None) if before_text: # skip content in rawtext after *and incl.* a matching text before_index = rawtext.find(before_text) if before_index < 0: raise self.severe('Problem with "end-before" option of "%s" ' 'directive:\nText not found.' % self.name) rawtext = rawtext[:before_index] include_lines = statemachine.string2lines(rawtext, tab_width, convert_whitespace=True) if 'literal' in self.options: # Convert tabs to spaces, if `tab_width` is positive. if tab_width >= 0: text = rawtext.expandtabs(tab_width) else: text = rawtext literal_block = nodes.literal_block(rawtext, source=path, classes=self.options.get('class', [])) literal_block.line = 1 self.add_name(literal_block) if 'number-lines' in self.options: try: startline = int(self.options['number-lines'] or 1) except ValueError: raise self.error(':number-lines: with non-integer ' 'start value') endline = startline + len(include_lines) if text.endswith('\n'): text = text[:-1] tokens = NumberLines([([], text)], startline, endline) for classes, value in tokens: if classes: literal_block += nodes.inline(value, value, classes=classes) else: literal_block += nodes.Text(value, value) else: literal_block += nodes.Text(text, text) return [literal_block] if 'code' in self.options: self.options['source'] = path codeblock = CodeBlock(self.name, [self.options.pop('code')], # arguments self.options, include_lines, # content self.lineno, self.content_offset, self.block_text, self.state, self.state_machine) return codeblock.run() self.state_machine.insert_input(include_lines, path) return []
def visitAltsep(self, ctx:TacticNotationsParser.AltsepContext): return [nodes.inline('|', '\u200b', classes=['alternative-separator'])]