class ROSFieldGroup(object): u"""A group of fields and constants. """ def __init__(self, package_name): self.package_name = package_name self.description = StringList() self.fields = [] def append(self, line, source, offset): if line == '' or line[0] == '#': if line: line = line[1:] if self.fields: self.fields[-1].post_comments.append(line, source=source, offset=offset) else: self.description.append(line, source=source, offset=offset) else: if self.fields: pre_comments = self.fields[-1].post_comments else: pre_comments = self.description new_field = ROSField(line, source=source, offset=offset, pre_comments=pre_comments, package_name=self.package_name) # if sucessfully parsed if new_field.name: self.fields.append(new_field) self.pre_comments = StringList() else: # todo print("?? <%s>" % line)
def run(self): node = nodes.Element() node.document = self.state.document jinja_context_name = self.arguments[0] template_filename = self.options.get("file") cxt = self.app.config.jinja_contexts[jinja_context_name] cxt["options"] = { "header_char": self.options.get("header_char") } if template_filename: reference_uri = directives.uri(template_filename) template_path = urllib.url2pathname(reference_uri) encoded_path = template_path.encode(sys.getfilesystemencoding()) imagerealpath = os.path.abspath(encoded_path) with open(imagerealpath) as f: tpl = Template(f.read()) else: tpl = Template("\n".join(self.content)) new_content = tpl.render(**cxt) # transform the text content into a string_list that the nested_parse # can use: new_content = StringList(new_content.split("\n")) self.state.nested_parse(new_content, self.content_offset, node, match_titles=1) return node.children
def parse(self, file_content, package_name): u""" """ all_fields = [] fields = [] pre_comments = StringList() for item in file_content.xitems(): # (source, offset, value) line = item[2].strip() if line and not [c for c in line if not c == '-']: all_fields.append(fields) fields = [] elif line == '' or line[0] == '#': if line: line = line[1:] if fields: fields[-1].post_comments.append(line, source=item[0], offset=item[1]) pre_comments.append(line, source=item[0], offset=item[1]) else: new_field = ROSField(line, source=item[0], offset=item[1], pre_comments=pre_comments, package_name=package_name) # if sucessfully parsed if new_field.name: fields.append(new_field) pre_comments = StringList() else: # todo print("?? <%s>" % line) all_fields.append(fields) return all_fields
def make_docfields(self, field_groups, field_comment_option): docfields = StringList() for field_group_type, field_group in zip(self.field_group_types, field_groups): docfields.extend( field_group_type.make_docfields(field_group, field_comment_option)) return docfields
def join_blocks(blocks): u"""Join list of StringList to single StringList """ strings = StringList() for block in blocks: strings.extend(block) strings.extend(StringList([u''])) # insert a blank line # remove the last blank line if strings and not strings[-1]: del strings[-1] return strings
def read(self): # type: ignore # type: () -> StringList inputstring = super().read() lines = string2lines(inputstring, convert_whitespace=True) content = StringList() for lineno, line in enumerate(lines): content.append(line, self.source_path, lineno) prepend_prolog(content, self.env.config.rst_prolog) append_epilog(content, self.env.config.rst_epilog) return content
def test_append_epilog(app): epilog = 'this is rst_epilog\ngood-bye reST!' content = StringList(['hello Sphinx world', 'Sphinx is a document generator'], 'dummy.rst') append_epilog(content, epilog) assert list(content.xitems()) == [('dummy.rst', 0, 'hello Sphinx world'), ('dummy.rst', 1, 'Sphinx is a document generator'), ('<generated>', 0, ''), ('<rst_epilog>', 0, 'this is rst_epilog'), ('<rst_epilog>', 1, 'good-bye reST!')]
def run(self): node = nodes.Element() node.document = self.state.document env = self.state.document.settings.env docname = env.docname template_filename = self.options.get("file") debug_template = self.options.get("debug") cxt = (self.app.config.jinja_contexts[self.arguments[0]].copy() if self.arguments else {}) cxt["options"] = { "header_char": self.options.get("header_char") } if template_filename: if debug_template is not None: print('') print('********** Begin Jinja Debug Output: Template Before Processing **********') print('********** From {} **********'.format(docname)) reference_uri = directives.uri(os.path.join('source', template_filename)) template_path = urllib.url2pathname(reference_uri) encoded_path = template_path.encode(sys.getfilesystemencoding()) imagerealpath = os.path.abspath(encoded_path) with codecs.open(imagerealpath, encoding='utf-8') as f: print(f.read()) print('********** End Jinja Debug Output: Template Before Processing **********') print('') tpl = Environment( loader=FileSystemLoader( self.app.config.jinja_base, followlinks=True) ).get_template(template_filename) else: if debug_template is not None: print('') print('********** Begin Jinja Debug Output: Template Before Processing **********') print('********** From {} **********'.format(docname)) print('\n'.join(self.content)) print('********** End Jinja Debug Output: Template Before Processing **********') print('') tpl = Environment( loader=FileSystemLoader( self.app.config.jinja_base, followlinks=True) ).from_string('\n'.join(self.content)) new_content = tpl.render(**cxt) if debug_template is not None: print('') print('********** Begin Jinja Debug Output: Template After Processing **********') print(new_content) print('********** End Jinja Debug Output: Template After Processing **********') print('') new_content = StringList(new_content.splitlines(), source='') sphinx.util.nested_parse_with_titles( self.state, new_content, node) return node.children
def test_prepend_prolog_without_CR(app): # prolog not having CR at tail prolog = 'this is rst_prolog\nhello reST!' content = StringList(['hello Sphinx world', 'Sphinx is a document generator'], 'dummy.rst') prepend_prolog(content, prolog) assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'), ('<rst_prolog>', 1, 'hello reST!'), ('<generated>', 0, ''), ('dummy.rst', 0, 'hello Sphinx world'), ('dummy.rst', 1, 'Sphinx is a document generator')]
def read(self): # type: () -> StringList inputstring = SphinxBaseFileInput.read(self) lines = string2lines(inputstring, convert_whitespace=True) content = StringList() for lineno, line in enumerate(lines): content.append(line, self.source_path, lineno) if self.env.config.rst_prolog: self.prepend_prolog(content, self.env.config.rst_prolog) if self.env.config.rst_epilog: self.append_epilog(content, self.env.config.rst_epilog) return content
def read(self): # type: ignore # type: () -> StringList warnings.warn('SphinxRSTFileInput is deprecated.', RemovedInSphinx30Warning, stacklevel=2) inputstring = super(SphinxRSTFileInput, self).read() lines = string2lines(inputstring, convert_whitespace=True) content = StringList() for lineno, line in enumerate(lines): content.append(line, self.source_path, lineno) prepend_prolog(content, self.env.config.rst_prolog) append_epilog(content, self.env.config.rst_epilog) return content
def append_row(*column_texts): # type: (str) -> None row = nodes.row('') source, line = self.state_machine.get_source_and_line() for text in column_texts: node = nodes.paragraph('') vl = StringList() vl.append(text, '%s:%d:<autosummary>' % (source, line)) with switch_source_input(self.state, vl): self.state.nested_parse(vl, 0, node) try: if isinstance(node[0], nodes.paragraph): node = node[0] except IndexError: pass row.append(nodes.entry('', node)) body.append(row)
def test_prepend_prolog(app): prolog = 'this is rst_prolog\nhello reST!' content = StringList([':title: test of SphinxFileInput', ':author: Sphinx team', '', 'hello Sphinx world', 'Sphinx is a document generator'], 'dummy.rst') prepend_prolog(content, prolog) assert list(content.xitems()) == [('dummy.rst', 0, ':title: test of SphinxFileInput'), ('dummy.rst', 1, ':author: Sphinx team'), ('<generated>', 0, ''), ('<rst_prolog>', 0, 'this is rst_prolog'), ('<rst_prolog>', 1, 'hello reST!'), ('<generated>', 0, ''), ('dummy.rst', 2, ''), ('dummy.rst', 3, 'hello Sphinx world'), ('dummy.rst', 4, 'Sphinx is a document generator')]
def run(self): tags_arg = self.options.get("tags") tags = {t.strip() for t in tags_arg.split(",")} if tags_arg else None header_separator = self.options.get('header-separator') new_content = self.render_content(tags=tags, header_separator=header_separator) with switch_source_input(self.state, self.content): new_content = StringList(new_content.splitlines(), source='') node = nodes.section() # type: Element # necessary so that the child nodes get the right source/line set node.document = self.state.document nested_parse_with_titles(self.state, new_content, node) # record all filenames as dependencies -- this will at least # partially make automatic invalidation possible for filepath in get_provider_yaml_paths(): self.state.document.settings.record_dependencies.add(filepath) return node.children
def _build_program_description(self, parser): """ Build program description :param parser: pre-configured ArgumentParser instance :return: node forming program description """ description = self._decorate_references(parser.description) description = description % dict(prog=parser.prog) result = nodes.container() self.state.nested_parse(StringList([description]), 0, result) return result
def make_include(self): include_node = nodes.section() self.state.nested_parse( StringList([ f'.. literalinclude:: {self.arguments[0]}.py\n', f' :lines: {self.options["lines"]}\n' if 'lines' in self.options else '', f' :emphasize-lines: {self.options["emphasize-lines"]}' if 'emphasize-lines' in self.options else '', ]), 0, include_node) return include_node.children
def get_table(self, items): """Return a definition list rather than a table.""" # See original implementation: # https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/autosummary/__init__.py source, line = self.state_machine.get_source_and_line() src = f"{source}:{line}:<dlistsummary>" # We're going to build out a StringList by formatting a definition list and then # parsing it at the end. Definition list syntax: # # **First Item** # First item description, indented by four spaces. # **Second Item** # Second item description, indented by four spaces. s_list = StringList() for name, signature, summary_string, real_name in items: # Add the definition item. s_list.append(f"**{name}**\n", src) # Add the autosummary description for this demo, including a link to the # full demonstration. This is the definition of the item. summary_string += f" :any:`Go to demo ↱ <{real_name}>`\n" s_list.append(" " + summary_string, src) # Now that we have a fully populated StringList, let Sphinx handle the dirty # work of evaluating the rst as actual nodes. node = definition_list("") self.state.nested_parse(s_list, 0, node) try: if isinstance(node[0], definition_list): node = node[0] except IndexError: pass return [node]
def aux(name): module = importlib.import_module(name) contents = StringList() contents.append(f".. automodule:: {name}", sourcename) if hasattr(module, "__all__"): module_attrs = [ attr_name for attr_name in module.__all__ if getattr(module, attr_name).__module__ == name ] if module_attrs: contents.append( f" :members: {', '.join(module_attrs)}", sourcename ) else: contents.append(" :members:", sourcename) contents.append("", sourcename) node = docutils.nodes.section() nested_parse_with_titles(self.state, contents, node) # If this module defines any sections, then submodules should go # inside of the last one. section = node for child in node.children: if isinstance(child, docutils.nodes.section): section = child if hasattr(module, "__path__"): submodules = sorted( module_info.name for module_info in pkgutil.iter_modules( module.__path__, prefix=name + "." ) ) for submodule in submodules: section.extend(aux(submodule)) return node.children
def auto_code_block(self, node): """Try to automatically generate nodes for codeblock syntax. Parameters ---------- node : nodes.literal_block Original codeblock node Returns ------- tocnode: docutils node The converted toc tree node, None if conversion is not possible. """ assert isinstance(node, nodes.literal_block) original_node = node if 'language' not in node: return None self.state_machine.reset( self.document, node.parent, self.current_level ) # content = node.rawsource.split('\n') content = ''.join([child.astext() for child in node.children]).split('\n') language = node['language'] if language == 'math': if self.config['enable_math']: return self.state_machine.run_directive( 'math', content=content ) elif language == 'eval_rst': if self.config['enable_eval_rst']: # allow embed non section level rst node = nodes.section() self.state_machine.state.nested_parse( StringList(content, source=original_node.source), 0, node=node, match_titles=True ) return node.children[:] else: match = re.search(r'[ ]?[\w_-]+::.*', language) if match: parser = Parser() new_doc = new_document(None, self.document.settings) newsource = u'.. ' + match.group(0) + '\n' + node.rawsource parser.parse(newsource, new_doc) return new_doc.children[:] else: return self.state_machine.run_directive( 'code-block', arguments=[language], content=content ) return None
def run(self): for auto in ("autolist", "autolist-classes", "autolist-functions"): if auto in self.options: # Get current module name module_name = self.env.ref_context.get("py:module") # Import module module = import_module(module_name) # Get public names (if possible) try: names = getattr(module, "__all__") except AttributeError: # Get classes defined in the module cls_names = [ name[0] for name in getmembers(module, isclass) if name[-1].__module__ == module_name and not (name[0].startswith("_")) ] # Get functions defined in the module fn_names = [ name[0] for name in getmembers(module, isfunction) if (name[-1].__module__ == module_name) and not (name[0].startswith("_")) ] names = cls_names + fn_names # It may happen that module doesn't have any defined class or func if not names: names = [name[0] for name in getmembers(module)] # Filter out members w/o doc strings names = [name for name in names if getattr(module, name).__doc__ is not None] if auto == "autolist": # Get list of all classes and functions inside module names = [ name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name))) ] else: if auto == "autolist-classes": # Get only classes check = isclass elif auto == "autolist-functions": # Get only functions check = isfunction else: raise NotImplementedError names = [name for name in names if check(getattr(module, name))] # Update content self.content = StringList(names) return super().run()
def run(self): zenpy_client = Zenpy(subdomain="party", email="face@toe", password="******") node_list = [] doc_sections = self.generate_sections(zenpy_client) output = '.. py:class:: Zenpy%s\n\n' % inspect.signature( zenpy_client.__class__) output += ' %s' % zenpy_client.__doc__ node = container() self.state.nested_parse(StringList(output.split('\n')), 0, node) node_list.append(node) for doc_section in doc_sections: node = paragraph() self.state.nested_parse(StringList(doc_section.split('\n')), 0, node) node_list.append(node) return node_list
def run(self): lines = [] body_lines = self._role_lines() for i, line in enumerate(body_lines): suffix = '.' if i == len(body_lines) - 1 else ';' lines.append('{}{}'.format(line, suffix)) node = self.code_links_node('\n'.join(lines)) self.state.nested_parse(StringList(lines), self.content_offset, node) if 'timeout' in self.options: for node in node.traverse(runrole_reference): if node['reftype'] == 'fullnotebook': node['timeout'] = self.options['timeout'] return [node]
def run(self): zenpy = Zenpy.__new__(Zenpy) zenpy.__init__(zenpy, ' ', ' ') node_list = [] cache_node = container() cache_sections = self.generate_cache_sections(zenpy) for cache_section in cache_sections: node = paragraph() self.state.nested_parse(StringList(cache_section.split('\n')), 0, node) node_list.append(node) node_list.append(cache_node) return node_list
def run(self): """Run the directive.""" reporter = self.state.document.reporter self.result = StringList() self.generate() node = nodes.paragraph() node.document = self.state.document self.state.nested_parse(self.result, 0, node) return node.children
def run(self): ytid = self.arguments[0] description = [i if i != "" else "<br><br>" for i in self.content] thumbnail_rst = YOUTUBE_TEMPLATE.format( id=ytid, title=self.options["title"], author=self.options["author"], description=" ".join(description)) thumbnail = StringList(thumbnail_rst.split('\n')) thumb = nodes.paragraph() self.state.nested_parse(thumbnail, self.content_offset, thumb) return [thumb]
def run(self): if len(self.arguments) == 1: spec = get_obj_from_module(self.arguments[0].strip()) else: spec = json.loads('\n'.join(self.content)) content = doc_spec(spec) # parse the RST text node = addnodes.desc_content() self.state.nested_parse(StringList(content), self.content_offset, node) return [node]
def run(self): self.assert_has_content() hidden_until = self.arguments[0] try: hidden_until = parse_date(hidden_until) except: raise self.error('Unknown date format in the "%s" directive; ' '%s' % (self.name, hidden_until)) force_show = self.state.document.settings.force_show_hidden_until after_deadline = hidden_until <= datetime.now() if after_deadline or force_show: output = [] # Add a warning for teachers/tutors/... if not after_deadline and force_show: node = nodes.caution() self.add_name(node) text = "The feedback below will be hidden to the students until %s." % hidden_until.strftime( "%d/%m/%Y %H:%M:%S") self.state.nested_parse(StringList(text.split("\n")), 0, node) output.append(node) text = '\n'.join(self.content) node = nodes.compound(text) self.add_name(node) self.state.nested_parse(self.content, self.content_offset, node) output.append(node) return output else: node = nodes.caution() self.add_name(node) text = "A part of this feedback is hidden until %s. Please come back later and reload the submission to see the full feedback." % \ hidden_until.strftime("%d/%m/%Y %H:%M:%S") self.state.nested_parse(StringList(text.split("\n")), 0, node) return [node]
def run(self): node = nodes.container() node['classes'].append('toggle-content') par = nodes.container() par['classes'].append('toggle-header') if self.arguments and self.arguments[0]: par['classes'].append(self.arguments[0]) self.state.nested_parse(StringList([self.options["header"]]), self.content_offset, par) self.state.nested_parse(self.content, self.content_offset, node) return [par, node]
def run(self): sl = StringList([" " for _ in range(int(self.arguments[0]))]) if not self.content: self.content = sl else: self.content.append(sl) par = nodes.raw( '', """<pre style="background-color: #ffffff; border: 1px solid #000000">%s</pre>""" % "\n".join(self.content), format='html') return [par]
def test_directive(test_module): """Return an instance of HensonCLIDirective.""" return sphinx.HensonCLIDirective( name='hensoncli', arguments=['fake_extension:FakeExtension'], options={}, content=StringList([], items=[]), lineno=1, content_offset=0, block_text='.. hensoncli:: fake_extension:FakeExtension\n', state=None, state_machine=None, )
def _assemble(node, directive): title_text = directive.arguments[0] directive.add_name(node) header = node.HEADER_PRETITLE.format(**node.options).split('\n') directive.state.nested_parse(StringList(header), directive.content_offset, node) textnodes, messages = directive.state.inline_text(title_text, directive.lineno) node += textnodes node += messages header = node.HEADER_POSTTITLE.format(**node.options).split('\n') directive.state.nested_parse(StringList(header), directive.content_offset, node) directive.state.nested_parse(directive.content, directive.content_offset, node) footer = node.FOOTER.format(**node.options).split('\n') directive.state.nested_parse(StringList(footer), directive.content_offset, node)
def sphinx_state(local_app): """ Fixture which will provide a sphinx state for use in testing sphinx directives. Yields: :class:`docutils.parsers.rst.states.State`: A state for use in testing directive functionality. """ # Get the environment and decorate it with what sphinx may need for the # parsing. env = local_app.env env.temp_data["docname"] = "test" # A fake document name # Create a document and inliner object, to be perfectly honest not sure # exactly what these are or do, but needed to get the directive to run. document = new_document(__file__) document.settings.pep_references = 1 document.settings.rfc_references = 1 document.settings.env = env document.settings.tab_width = 4 inliner = Inliner() inliner.init_customizations(document.settings) # Create a state machine so that we can get a state to pass back. statemachine = RSTStateMachine(state_classes=state_classes, initial_state="Body") statemachine.input_lines = StringList([""] * 40) state = statemachine.get_state() state.document = document state.memo = Struct( inliner=inliner, language=en, title_styles=[], reporter=document.reporter, document=document, section_level=0, section_bubble_up_kludge=False, ) state.memo.reporter.get_source_and_line = statemachine.get_source_and_line # The environemnt isn't normally available on the state in sphinx, but it's # done here to make testing easier. state.env = env # Sphinx monkeypatches docutils when run. This is how it get's # monkeypatched so that the python directives and roles can be found with sphinx_domains(env): # Provide the state back to the test. yield state
def run(self) -> Sequence[nodes.Node]: # type: ignore """ Process the content of the directive. """ if "hooks" in self.options: hooks = self.options["hooks"] else: cwd = PathPlus.cwd() for directory in (cwd, *cwd.parents): if (directory / ".pre-commit-hooks.yaml").is_file(): hooks = [ h["id"] for h in yaml.safe_load(( directory / ".pre-commit-hooks.yaml").read_text()) ] break else: warnings.warn( "No hooks specified and no .pre-commit-hooks.yaml file found." ) return [] repo = make_github_url(self.env.config.github_username, self.env.config.github_repository) config: _Config = {"repo": str(repo)} if "rev" in self.options: config["rev"] = self.options["rev"] config["hooks"] = [{"id": hook_name} for hook_name in hooks] if "args" in self.options: config["hooks"][0]["args"] = self.options["args"] targetid = f'pre-commit-{self.env.new_serialno("pre-commit"):d}' targetnode = nodes.section(ids=[targetid]) yaml_output = yaml.round_trip_dump([config], default_flow_style=False) if not yaml_output: return [] content = f".. code-block:: yaml\n\n{indent(yaml_output, ' ')}\n\n" view = StringList(content.split('\n')) pre_commit_node = nodes.paragraph(rawsource=content) self.state.nested_parse(view, self.content_offset, pre_commit_node) pre_commit_node_purger.add_node(self.env, pre_commit_node, targetnode, self.lineno) return [pre_commit_node]
def run(self) -> List[nodes.Node]: """ Process the content of the directive. """ summary = getattr(self.config, "documentation_summary", '').strip() if not summary: return [] # pragma: no cover # if self.env.app.builder.format.lower() == "latex" or not summary: # return [] targetid = f'documentation-summary-{self.env.new_serialno("documentation-summary"):d}' onlynode = addnodes.only(expr="html") content = f'**{summary}**' content_node = nodes.paragraph(rawsource=content, ids=[targetid]) onlynode += content_node self.state.nested_parse(StringList([content]), self.content_offset, content_node) summary_node_purger.add_node(self.env, content_node, content_node, self.lineno) if "meta" in self.options: meta_content = f'.. meta::\n :description: {self.config.project} -- {summary}\n' meta_node = nodes.paragraph(rawsource=meta_content, ids=[targetid]) onlynode += meta_node self.state.nested_parse( StringList(meta_content.split('\n')), self.content_offset, meta_node, ) summary_node_purger.add_node(self.env, meta_node, meta_node, self.lineno) return [onlynode]
def run(self): filename = ''.join(self.content) for folder in self.env.config.ibf_folders: candidate = os.path.join(folder, filename) if os.path.isfile(candidate): filename = candidate break content = open(filename).readlines() content = [line.rstrip('\n') for line in content] # parse the string list node = nodes.Element() nested_parse_with_titles(self.state, StringList(content), node) self.state.document.settings.env.note_dependency(filename) return node.children
def run(self): import faucet.gauge_prom from prometheus_client import CollectorRegistry self.block_text = """\ * - Metric - Type - Description """ gauge_metrics = faucet.gauge_prom.GaugePrometheusClient(reg=CollectorRegistry()) for metric in gauge_metrics._reg.collect(): self.block_text += """\ * - {} - {} - {} """.format(metric.name, metric.type, metric.documentation) self.content = StringList() for lineno, line in enumerate(self.block_text.split('\n')): self.content.append(line, __file__, lineno) return super(GaugePromMetricsTable, self).run()
def run(self): def walk(cls): """Render the given class, then recursively render its descendants depth first. Appends to the outer ``lines`` variable. :param cls: The Jinja ``Node`` class to render. """ lines.append(".. autoclass:: {}({})".format( cls.__name__, ", ".join(cls.fields))) # render member methods for nodes marked abstract if cls.abstract: members = [] for key, value in cls.__dict__.items(): if (not key.startswith("_") and not hasattr(cls.__base__, key) and callable(value)): members.append(key) if members: members.sort() lines.append(" :members: " + ", ".join(members)) # reference the parent node, except for the base node if cls.__base__ is not object: lines.append("") lines.append(" :Node type: :class:`{}`".format( cls.__base__.__name__)) lines.append("") children = cls.__subclasses__() children.sort(key=lambda x: x.__name__.lower()) # render each child for child in children: walk(child) # generate the markup starting at the base class lines = [] target = import_object(self.arguments[0]) walk(target) # parse the generated markup into nodes doc = StringList(lines, "<jinja>") node = nodes.Element() self.state.nested_parse(doc, self.content_offset, node) return node.children
def to_content(self): # type: () -> str res = StringList() for change in self.changesets: res.append(change.to_content()) res.append(ViewList([u""])) return res
def _translate_into_tab_demo(block_text: str) -> StringList: md, rst = _split_by_language(block_text) string_list = StringList() string_list.extend(_rst_demo(rst)) string_list.extend(_md_demo(md)) return string_list
def run(self): # timeout = 20 #default # if 'timeout' in self.options: # timeout = self.options['timeout'] timeout = self.options.get('timeout', 20) raw = 'raw' in self.options fname_sch = get_fname(self) elems = [] if raw: def export_func(fname_sch, fname_img_abs): return raw_partlist(fname_sch, timeout=timeout) s = do_action(fname_sch, None, self, export_func) else: def export_func(fname_sch, fname_img_abs): return structured_partlist(fname_sch, timeout=timeout) (header, data) = do_action(fname_sch, None, self, export_func) if raw: node_class = nodes.literal_block elems = [node_class(s, s)] else: if 'header' in self.options: selected = self.options['header'] selected = selected.split(',') selected = [x.strip() for x in selected] else: selected = header d = [ ','.join(['"' + dic[x] + '"' for x in selected]) for dic in data ] selected = ','.join(['"' + x + '"' for x in selected]) # self.options['header-rows'] = 1 self.options['header'] = selected self.content = StringList(d) self.arguments[0] = '' elems = CSVTable.run(self) return elems
def run(self): args = self.arguments fname = args[-1] env = self.state.document.settings.env fname, abs_fname = env.relfn2path(fname) basename = os.path.basename(fname) dirname = os.path.dirname(fname) try: if 'intro' in self.options: intro = self.options['intro'][:195] + '...' else: _, blocks = sphinx_gallery.gen_rst.split_code_and_text_blocks( abs_fname) intro, _ = sphinx_gallery.gen_rst.extract_intro_and_title( abs_fname, blocks[0][1]) thumbnail_rst = sphinx_gallery.backreferences._thumbnail_div( dirname, basename, intro) if 'figure' in self.options: rel_figname, figname = env.relfn2path(self.options['figure']) save_figname = os.path.join('_static/thumbs/', os.path.basename(figname)) try: os.makedirs('_static/thumbs') except OSError: pass x, y = (400, 280) if 'size' in self.options: x, y = self.options['size'].split(" ") sphinx_gallery.gen_rst.scale_image(figname, save_figname, x, y) # replace figure in rst with simple regex thumbnail_rst = re.sub(r'..\sfigure::\s.*\.png', '.. figure:: /{}'.format(save_figname), thumbnail_rst) thumbnail = StringList(thumbnail_rst.split('\n')) thumb = nodes.paragraph() self.state.nested_parse(thumbnail, self.content_offset, thumb) return [thumb] except FileNotFoundError as e: print(e) return []
def parse_doc(self, doc: str, source: str, idt: int = 0) -> StringList: """Convert doc string to StringList Args: doc: Documentation text source: Source filename idt: Result indentation in characters (default 0) Returns: StringList of re-indented documentation wrapped in newlines """ doc = dedent(doc or "").strip("\n") doc = indent(doc, " " * idt) doclines = [''] + doc.splitlines() + [''] return StringList(doclines, source)
def update_content(self): package_name = self.arguments[0] package = self.find_package(package_name) if not package: return None self.env.note_dependency(self.env.relfn2path(package.filename)[0]) content = StringList() for attr in self.env.config.ros_package_attrs: if attr in self.env.config.ros_package_attrs_formatter: formatter = self.env.config.ros_package_attrs_formatter[attr] elif attr.endswith('_depends'): formatter = 'depend_formatter' else: formatter = self.attr_formatters.get(attr, 'default_formatter') field = format_attr(package, attr, formatter) if field: content.extend(field) content.items = [(source, 0) for source, line in content.items] if len(content) > 0: content.append(StringList([u''])) return content + self.content
def run(self): """ extracts the information in a dictionary, run the script @return a list of nodes """ # settings sett = self.state.document.settings language_code = sett.language_code lineno = self.lineno # add the instance to the global settings if hasattr(sett, "out_runpythonlist"): sett.out_runpythonlist.append(self) # env if hasattr(self.state.document.settings, "env"): env = self.state.document.settings.env else: env = None if env is None: docname = "___unknown_docname___" else: docname = env.docname # post bool_set = (True, 1, "True", "1", "true") bool_set_ = (True, 1, "True", "1", "true", "") p = { "showcode": "showcode" in self.options, "showout": "showout" in self.options, "rst": "rst" in self.options, "sin": self.options.get("sin", TITLES[language_code]["In"]), "sout": self.options.get("sout", TITLES[language_code]["Out"]), "sout2": self.options.get("sout2", TITLES[language_code]["Out2"]), "sphinx": "sphinx" not in self.options or self.options["sphinx"] in bool_set, "setsysvar": self.options.get("setsysvar", None), "process": "process" in self.options and self.options["process"] in bool_set_, "exception": "exception" in self.options and self.options["exception"] in bool_set_, } if p["setsysvar"] is not None and len(p["setsysvar"]) == 0: p["setsysvar"] = "enable_disabled_documented_pieces_of_code" dind = 0 if p["rst"] else 4 p["indent"] = int(self.options.get("indent", dind)) # run the script name = "run_python_script_{0}".format(id(p)) if p["process"]: content = ["if True:"] else: content = ["def {0}():".format(name)] for line in self.content: content.append(" " + line) if not p["process"]: content.append("{0}()".format(name)) script = "\n".join(content) script_disp = "\n".join(self.content) # if an exception is raised, the documentation should report # a warning # return [document.reporter.warning('messagr', line=self.lineno)] out, err = run_python_script( script, comment=' File "{0}", line {1}'.format(docname, lineno), setsysvar=p["setsysvar"], process=p["process"], exception=p["exception"], ) if out is not None: out = out.rstrip(" \n\r\t") if err is not None: err = err.rstrip(" \n\r\t") content = out if len(err) > 0: content += "\n\nERROR:\n\n" + err # add member self.exe_class = p.copy() self.exe_class.update(dict(out=out, err=err, script=script)) # add indent def add_indent(content, nbind): lines = content.split("\n") if nbind > 0: lines = [(" " * nbind + _) for _ in lines] content = "\n".join(lines) return content content = add_indent(content, p["indent"]) # build node node = self.__class__.runpython_class( rawsource=content, indent=p["indent"], showcode=p["showcode"], rst=p["rst"], sin=p["sin"], sout=p["sout"] ) if p["showcode"]: pin = nodes.paragraph(text=p["sin"]) pcode = nodes.literal_block(script_disp, script_disp) node += pin node += pcode elif len(self.options.get("sout", "")) == 0: p["sout"] = "" p["sout2"] = "" if p["rst"]: settings_overrides = {} try: sett.output_encoding except KeyError: settings_overrides["output_encoding"] = "unicode" try: sett.doctitle_xform except KeyError: settings_overrides["doctitle_xform"] = True try: sett.warning_stream except KeyError: settings_overrides["warning_stream"] = StringIO() #'initial_header_level': 2, if len(p["sout"]) > 0: node += nodes.paragraph(text=p["sout"]) try: if p["sphinx"]: st = StringList(content.replace("\r", "").split("\n")) nested_parse_with_titles(self.state, st, node) dt = None else: dt = core.publish_doctree(content, settings=sett, settings_overrides=settings_overrides) except Exception as e: tab = content content = ["::"] st = StringIO() traceback.print_exc(file=st) content.append("") trace = st.getvalue() trace += "\n----------------------OPT\n" + str(p) trace += "\n----------------------EXC\n" + str(e) trace += "\n----------------------SETT\n" + str(sett) trace += "\n----------------------ENV\n" + str(env) trace += "\n----------------------DOCNAME\n" + str(docname) trace += "\n----------------------CODE\n" content.extend(" " + _ for _ in trace.split("\n")) content.append("") content.append("") content.extend(" " + _ for _ in tab.split("\n")) content = "\n".join(content) pout = nodes.literal_block(content, content) node += pout dt = None if dt is not None: for ch in dt.children: node += ch if not p["rst"] or p["showout"]: text = p["sout2"] if p["rst"] else p["sout"] if len(text) > 0: pout2 = nodes.paragraph(text=text) node += pout2 pout = nodes.literal_block(content, content) node += pout p["runpython"] = node # classes node["classes"] += "-runpython" ns = [node] return ns
def __init__(self, package_name): self.package_name = package_name self.description = StringList() self.fields = []
def make_docfields(self, field_group, field_comment_option): docfields = StringList([u'']) for field in field_group.fields: field_type = self.constant_name if field.value else self.field_name name = field.name + field.size desc = field.get_description(field_comment_option) if len(desc) == 0: docfields.append(u':{0} {1}:'.format(field_type, name), source=field.source, offset=field.offset) elif len(desc) == 1: docfields.append(u':{0} {1}: {2}'.format(field_type, name, desc[0].strip()), source=desc.source(0), offset=desc.offset(0)) elif len(desc) > 1: if 'quote' in field_comment_option: align_strings(desc, ' | ') else: align_strings(desc, ' ') docfields.append(u':{0} {1}: {2}'.format(field_type, name, desc[0]), source=desc.source(0), offset=desc.offset(0)) docfields.extend(desc[1:]) docfields.append(u':{0}-{1} {2}: {3}'.format(field_type, TYPE_SUFFIX, name, field.type), source=field.source, offset=field.offset) if field.value: docfields.append(u':{0}-{1} {2}: {3}'.format(field_type, VALUE_SUFFIX, name, field.value), source=field.source, offset=field.offset) return docfields
def update_content(self): package_name, type_name = self.arguments[0].split('/', 1) package = self.find_package(package_name) if not package: return file_path, file_content \ = self.type_file.read(os.path.dirname(package.filename), type_name) if file_content is None: self.state_machine.reporter.warning( 'cannot find file {0}'.format(file_path), line=self.lineno) return type_relfile = os.path.relpath(file_path, self.env.srcdir) self.env.note_dependency(type_relfile) field_groups = self.type_file.parse(file_content, package_name) # fields options = self.options.get('field-comment', '') field_comment_option = options.encode('ascii').lower().split() content = self.type_file.make_docfields(field_groups, field_comment_option) # description if field_groups: desc = field_groups[0].description desc_blocks = split_blocks(desc) if desc_blocks: description_option = [x.strip() for x in self.options.get('description', ''). lower().split(',')] first = second = None for option in description_option: if not option: # ignore empty option pass elif ':' in option: first, second = option.split(':', 1) elif option == 'quote': pass else: raise ValueError( "unkonwn option {0} in " "the description option".format(option)) blocks = desc_blocks[(int(first) if first else None): (int(second) if second else None)] if blocks: description = join_blocks(blocks) if 'quote' in description_option: align_strings(description, '| ') else: align_strings(description) content = content + StringList([u'']) + description content = content + self.content # raw file content raw_option = self.options.get('raw', None) # if raw_option is not None: code_block = StringList([u'', u'.. code-block:: rostype', u'']) code_block.extend(StringList([' '+l for l in file_content.data], items=file_content.items)) if raw_option == 'head': content = code_block + StringList([u'']) + content elif raw_option == 'tail': content = content + code_block return content
def make_docfields(self, all_fields, field_comment_option): docfields = StringList() for field_group, fields in zip(self.groups, all_fields): docfields.extend(field_group.make_docfields(fields, field_comment_option)) return docfields