def run (self): features = {} if not FEATURES.local: for line in self.content: m = re.match("^:([a-z][-a-z]+):\s+(default|auxiliary|required|optional)",line) if not m: error = self.state_machine.reporter.error( 'Invalid context: syntax error in option to initial fields directive', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] FEATURES.defaults[m.group(2)] = m.group(1) FEATURES.set_defaults() else: FEATURES.set_base() for line in self.content: m = re.match("^:([a-z][-a-z]+):\s+(required|optional)",line) if not m: error = self.state_machine.reporter.error( 'Invalid context: syntax error in option to subsequent fields directive. Note that default and auxiliary cannot be used here.', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] if not FEATURES.defaults.auxiliary.has_key(m.group(1)) and not FEATURES.defaults.optional.has_key(m.group(1)) and not FEATURES.defaults.default.has_key(m.group(1)): error = self.state_machine.reporter.error( 'Invalid context: undefined field in subsequent fields directive. Only fields initially declared as default, optional or auxiliary may be declared. line=%s' % self.lineno, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] FEATURES.local[m.group(2)] = m.group(1) return []
def run(self): self.env = self.state.document.settings.env # Make sure we have some content, which should be yaml that # defines some parameters. if not self.content: error = self.state_machine.reporter.error( 'No parameters defined', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] if not len(self.arguments) >= 2: error = self.state_machine.reporter.error( '%s' % self.arguments, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] _, status_defs_file = self.env.relfn2path(self.arguments.pop()) status_type = self.arguments.pop() self.status_defs = self._load_status_file(status_defs_file) # LOG.info("%s" % str(self.status_defs)) if status_type not in self.status_types: error = self.state_machine.reporter.error( 'Type %s is not one of %s' % (status_type, self.status_types), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] self.yaml = self._load_codes() self.max_cols = len(self.headers) # TODO(sdague): it would be good to dynamically set column # widths (or basically make the colwidth thing go away # entirely) self.options['widths'] = [30, 70] self.col_widths = self.get_column_widths(self.max_cols) if isinstance(self.col_widths, tuple): # In docutils 0.13.1, get_column_widths returns a (widths, # colwidths) tuple, where widths is a string (i.e. 'auto'). # See https://sourceforge.net/p/docutils/patches/120/. self.col_widths = self.col_widths[1] # Actually convert the yaml title, messages = self.make_title() # LOG.info("Title %s, messages %s" % (title, messages)) table_node = self.build_table() self.add_name(table_node) title_block = nodes.title( text=status_type.capitalize()) section = nodes.section(ids=title_block) section += title_block section += table_node return [section] + messages
def run(self): self.env = self.state.document.settings.env self.app = self.env.app # Make sure we have some content, which should be yaml that # defines some parameters. if not self.content: error = self.state_machine.reporter.error( 'No parameters defined', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] if not len(self.arguments) >= 1: self.state_machine.reporter.error( 'No reference file defined', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] rel_fpath, fpath = self.env.relfn2path(self.arguments.pop()) self.yaml_file = fpath self.yaml_from_file(self.yaml_file) self.max_cols = len(self.headers) self.options['widths'] = (20, 10, 10, 60) self.col_widths = self.get_column_widths(self.max_cols) # Actually convert the yaml title, messages = self.make_title() table_node = self.build_table() self.add_name(table_node) if title: table_node.insert(0, title) return [table_node] + messages
def parsemeta(self, match): name = self.parse_field_marker(match) indented, indent, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) node = self.meta() pending = nodes.pending(components.Filter, {'component': 'writer', 'format': 'html', 'nodes': [node]}) node['content'] = ' '.join(indented) if not indented: line = self.state_machine.line msg = self.reporter.info( 'No content for meta tag "%s".' % name, nodes.literal_block(line, line), line=self.state_machine.abs_line_number()) return msg, blank_finish tokens = name.split() try: attname, val = utils.extract_name_value(tokens[0])[0] node[attname.lower()] = val except utils.NameValueError: node['name'] = tokens[0] for token in tokens[1:]: try: attname, val = utils.extract_name_value(token)[0] node[attname.lower()] = val except utils.NameValueError, detail: line = self.state_machine.line msg = self.reporter.error( 'Error parsing meta tag attribute "%s": %s.' % (token, detail), nodes.literal_block(line, line), line=self.state_machine.abs_line_number()) return msg, blank_finish
def check_table_dimensions(self, rows, header_rows, stub_columns): if len(rows) < header_rows: error = self.state_machine.reporter.error( '%s header row(s) specified but only %s row(s) of data ' 'supplied ("%s" directive).' % (header_rows, len(rows), self.name), nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) if len(rows) == header_rows > 0: error = self.state_machine.reporter.error( 'Insufficient data supplied (%s row(s)); no data remaining ' 'for table body, required by "%s" directive.' % (len(rows), self.name), nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) for row in rows: if len(row) < stub_columns: error = self.state_machine.reporter.error( '%s stub column(s) specified but only %s columns(s) of ' 'data supplied ("%s" directive).' % (stub_columns, len(row), self.name), nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) if len(row) == stub_columns > 0: error = self.state_machine.reporter.error( 'Insufficient data supplied (%s columns(s)); no data remaining ' 'for table body, required by "%s" directive.' % (len(row), self.name), nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error)
def literal_block(node): """ A block of code """ rendered = [] try: if node.info is not None: l = Lexer(node.literal, node.info, tokennames="long") for _ in l: rendered.append(node.inline(classes=_[0], text=_[1])) except: pass classes = ['code'] if node.info is not None: classes.append(node.info) if len(rendered) > 0: o = nodes.literal_block(classes=classes) for element in rendered: o += element else: o = nodes.literal_block(text=node.literal, classes=classes) o.line = node.sourcepos[0][0] for n in MarkDown(node): o += n return o
def run(self): filename = self.arguments[0] cwd = os.getcwd() os.chdir(TMPDIR) parts = [] try: code = AsdfFile.read(filename, _get_yaml_content=True) code = '{0}{1}\n'.format(ASDF_MAGIC, version_string) + code.strip().decode('utf-8') literal = nodes.literal_block(code, code) literal['language'] = 'yaml' set_source_info(self, literal) parts.append(literal) ff = AsdfFile.read(filename) for i, block in enumerate(ff.blocks.internal_blocks): data = codecs.encode(block.data.tostring(), 'hex') if len(data) > 40: data = data[:40] + '...'.encode() allocated = block._allocated size = block._size data_size = block._data_size flags = block._flags if flags & BLOCK_FLAG_STREAMED: allocated = size = data_size = 0 lines = [] lines.append('BLOCK {0}:'.format(i)) human_flags = [] for key, val in FLAGS.items(): if flags & key: human_flags.append(val) if len(human_flags): lines.append(' flags: {0}'.format(' | '.join(human_flags))) if block.compression: lines.append(' compression: {0}'.format(block.compression)) lines.append(' allocated_size: {0}'.format(allocated)) lines.append(' used_size: {0}'.format(size)) lines.append(' data_size: {0}'.format(data_size)) lines.append(' data: {0}'.format(data)) code = '\n'.join(lines) literal = nodes.literal_block(code, code) literal['language'] = 'yaml' set_source_info(self, literal) parts.append(literal) finally: os.chdir(cwd) result = nodes.admonition() textnodes, messages = self.state.inline_text(filename, self.lineno) title = nodes.title(filename, '', *textnodes) result += title result.children.extend(parts) return [result]
def topic(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine, node_class=nodes.topic): if not (state_machine.match_titles or isinstance(state_machine.node, nodes.sidebar)): error = state_machine.reporter.error( 'The "%s" directive may not be used within topics ' 'or body elements.' % name, nodes.literal_block(block_text, block_text), line=lineno) return [error] if not content: warning = state_machine.reporter.warning( 'Content block expected for the "%s" directive; none found.' % name, nodes.literal_block(block_text, block_text), line=lineno) return [warning] title_text = arguments[0] textnodes, messages = state.inline_text(title_text, lineno) titles = [nodes.title(title_text, '', *textnodes)] # sidebar uses this code if options.has_key('subtitle'): textnodes, more_messages = state.inline_text(options['subtitle'], lineno) titles.append(nodes.subtitle(options['subtitle'], '', *textnodes)) messages.extend(more_messages) text = '\n'.join(content) node = node_class(text, *(titles + messages)) node['classes'] += options.get('class', []) if text: state.nested_parse(content, content_offset, node) return [node]
def code_block(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """ The code-block directive provides syntax highlighting for blocks of code. It is used with the the following syntax:: .. code-block:: python import sys def main(): sys.stdout.write("Hello world") """ language = arguments[0] highlighter = get_highlighter(language) if highlighter is None: error = state_machine.reporter.error( 'The "%s" directive does not support language "%s".' % (name, language), nodes.literal_block(block_text, block_text), line=lineno) if not content: error = state_machine.reporter.error( 'The "%s" block is empty; content required.' % (name), nodes.literal_block(block_text, block_text), line=lineno) return [error] include_text = highlighter("\n".join(content)) html = '<div class="syntax %s">\n%s\n</div>\n' % (language, include_text) raw = nodes.raw('',html, format='html') return [raw]
def as_docutils( self, configuration=None, output_directory=None, ): r'''Creates a docutils node representation of the code output proxy. Returns list of docutils nodes. ''' result = [] try: waiting_for_prompt = False lines = [] for line in self.payload: if not line.startswith(('>>> ', '... ')): waiting_for_prompt = True elif line.startswith('>>> ') and waiting_for_prompt: waiting_for_prompt = False code = u'\n'.join(lines) block = nodes.literal_block(code, code) result.append(block) lines = [] lines.append(line) if lines: code = u'\n'.join(lines) block = nodes.literal_block(code, code) result.append(block) except UnicodeDecodeError: print() print(type(self)) for line in self.payload: print(repr(line)) return result
def get_column_widths(self, max_cols): if type(self.widths) == list: if len(self.widths) != max_cols: error = self.state_machine.reporter.error( '"%s" widths do not match the number of columns in table ' "(%s)." % (self.name, max_cols), nodes.literal_block(self.block_text, self.block_text), line=self.lineno, ) raise SystemMessagePropagation(error) col_widths = self.widths elif max_cols: col_widths = [100 // max_cols] * max_cols else: error = self.state_machine.reporter.error( "No table data detected in CSV file.", nodes.literal_block(self.block_text, self.block_text), line=self.lineno, ) raise SystemMessagePropagation(error) if self.widths == "auto": widths = "auto" elif self.widths: # "grid" or list of integers widths = "given" else: widths = self.widths return widths, col_widths
def run(self): if not self.content: warning = self.state_machine.reporter.warning( 'Content block expected for the "%s" directive; none found.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) return [warning] title, messages = self.make_title() node = nodes.Element() # anonymous container for parsing self.state.nested_parse(self.content, self.content_offset, node) if len(node) != 1 or not isinstance(node[0], nodes.table): error = self.state_machine.reporter.error( 'Error parsing content block for the "%s" directive: exactly ' 'one table expected.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) return [error] table_node = node[0] table_node['classes'] += self.options.get('class', []) if('header-roles' in self.options): table_node.header_roles = self.options['header-roles'].split(',') else: table_node.header_roles = [] if('column-roles' in self.options): table_node.column_roles = self.options['column-roles'].split(',') else: table_node.column_roles = [] self.add_name(table_node) if title: table_node.insert(0, title) return [table_node] + messages
def unicode_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): r""" Convert Unicode character codes (numbers) to characters. Codes may be decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\x``, ``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style numeric character entities (e.g. ``☮``). Text following ".." is a comment and is ignored. Spaces are ignored, and any other text remains as-is. """ if not isinstance(state, states.SubstitutionDef): error = state_machine.reporter.error( 'Invalid context: the "%s" directive can only be used within a ' 'substitution definition.' % (name), nodes.literal_block(block_text, block_text), line=lineno) return [error] substitution_definition = state_machine.node if options.has_key('trim'): substitution_definition.attributes['ltrim'] = 1 substitution_definition.attributes['rtrim'] = 1 if options.has_key('ltrim'): substitution_definition.attributes['ltrim'] = 1 if options.has_key('rtrim'): substitution_definition.attributes['rtrim'] = 1 codes = unicode_comment_pattern.split(arguments[0])[0].split() element = nodes.Element() for code in codes: try: decoded = directives.unicode_code(code) except ValueError, err: error = state_machine.reporter.error( 'Invalid character code: %s\n%s: %s' % (code, err.__class__.__name__, err), nodes.literal_block(block_text, block_text), line=lineno) return [error] element += nodes.Text(decoded)
def run(self): if not self.content: error = self.state_machine.reporter.error( 'The "%s" directive is empty; content required.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] self.bias = self.options.get('bias', 'right') if self.bias not in ['left', 'right']: error = self.state_machine.reporter.error( 'Unable to recognise greedy-line-table bias "%s%. Expecting "left" or "right".' % self.bias, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] title, messages = self.make_title() node = nodes.Element() # anonymous container for parsing self.state.nested_parse(self.content, self.content_offset, node) try: num_cols, col_widths = self.check_list_content(node) table_data = [[item.children for item in row_list[0]] for row_list in node[0]] header_rows = self.options.get('header-rows', 0) stub_columns = self.options.get('stub-columns', 0) except SystemMessagePropagation, detail: return [detail.args[0]]
def check_list_content(self, node): if len(node) != 1 or not isinstance(node[0], nodes.bullet_list): error = self.state_machine.reporter.error( 'Error parsing content block for the "%s" directive: ' 'exactly one bullet list expected.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) list_node = node[0] num_cols = 0 # Check for a two-level bullet list and figure out the largest number # of columns in any row for item_index in range(len(list_node)): item = list_node[item_index] if len(item) != 1 or not isinstance(item[0], nodes.bullet_list): error = self.state_machine.reporter.error( 'Error parsing content block for the "%s" directive: ' 'two-level bullet list expected, but row %s does not ' 'contain a second-level bullet list.' % (self.name, item_index + 1), nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) num_cols = max(len(item[0]), num_cols) col_widths = self.get_column_widths(num_cols) return num_cols, col_widths
def render_indigorenderer_images(app, doctree): for img in doctree.traverse(nodes.image): if not hasattr(img, 'indigorenderer'): continue text = img.indigorenderer['text'] options = img.indigorenderer['options'] try: relative_paths, output = render_indigorenderer(app, text, options, os.path.dirname(doctree.attributes['source']), os.path.abspath(os.curdir)) imgnodes = [] if 'noimage' not in options: for relative_path in relative_paths: newimg = img.copy() newimg['uri'] = relative_path.replace('\\', '/') newimg['scale'] = 1.0 / float(len(relative_paths)) imgnodes.append(newimg) span = img.copy() span['uri'] = relative_uri(app.builder.env.docname, '_static') + '/span.png' imgnodes.append(span) if output: if 'noimage' not in options: newline = nodes.line() imgnodes.append(newline) if 'nooutputtitle' not in options: title = nodes.Text('Output:') imgnodes.append(title) literal = nodes.literal_block(output, output) literal['classes'] += ['output'] imgnodes.append(literal) img.replace_self(imgnodes) except IndigoRendererError, exc: app.builder.warn('indigorenderer error: ' + str(exc)) img.replace_self(nodes.literal_block(text, text)) continue
def run(self): if not self.content: warning = self.state_machine.reporter.warning( 'Content block expected for the "%s" directive; none found.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) return [warning] title, messages = self.make_title() node = nodes.Element() # anonymous container for parsing self.state.nested_parse(self.content, self.content_offset, node) if len(node) != 1 or not isinstance(node[0], nodes.table): error = self.state_machine.reporter.error( 'Error parsing content block for the "%s" directive: exactly ' 'one table expected.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) return [error] table_node = node[0] table_node['classes'] += self.options.get('class', []) tgroup = table_node[0] if type(self.widths) == list: colspecs = [child for child in tgroup.children if child.tagname == 'colspec'] for colspec, col_width in zip(colspecs, self.widths): colspec['colwidth'] = col_width if self.widths == 'auto': tgroup['colwidths'] = 'auto' else: tgroup['colwidths'] = 'given' self.add_name(table_node) if title: table_node.insert(0, title) return [table_node] + messages
def include(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """Include a reST file as part of the content of this reST file.""" if not state.document.settings.file_insertion_enabled: warning = state_machine.reporter.warning( '"%s" directive disabled.' % name, nodes.literal_block(block_text, block_text), line=lineno) return [warning] source = state_machine.input_lines.source( lineno - state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) path = directives.path(arguments[0]) if path.startswith('<') and path.endswith('>'): path = os.path.join(standard_include_path, path[1:-1]) path = os.path.normpath(os.path.join(source_dir, path)) path = utils.relative_path(None, path) encoding = options.get('encoding', state.document.settings.input_encoding) try: state.document.settings.record_dependencies.add(path) include_file = io.FileInput( source_path=path, encoding=encoding, error_handler=state.document.settings.input_encoding_error_handler, handle_io_errors=None) except IOError, error: severe = state_machine.reporter.severe( 'Problems with "%s" directive path:\n%s: %s.' % (name, error.__class__.__name__, error), nodes.literal_block(block_text, block_text), line=lineno) return [severe]
def unicode_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): r""" Convert Unicode character codes (numbers) to characters. Codes may be decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\x``, ``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style numeric character entities (e.g. ``☮``). Text following ".." is a comment and is ignored. Spaces are ignored, and any other text remains as-is. """ if not isinstance(state, states.SubstitutionDef): error = state_machine.reporter.error( 'Invalid context: the "%s" directive can only be used within a ' 'substitution definition.' % (name), nodes.literal_block(block_text, block_text), line=lineno) return [error] codes = arguments[0].split('.. ')[0].split() element = nodes.Element() for code in codes: try: if code.isdigit(): element += nodes.Text(unichr(int(code))) else: match = unicode_pattern.match(code) if match: value = match.group(1) or match.group(2) element += nodes.Text(unichr(int(value, 16))) else: element += nodes.Text(code) except ValueError, err: error = state_machine.reporter.error( 'Invalid character code: %s\n%s' % (code, err), nodes.literal_block(block_text, block_text), line=lineno) return [error]
def raw(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """ Pass through content unchanged Content is included in output based on type argument Content may be included inline (content section of directive) or imported from a file or url. """ if options.has_key('file') or options.has_key('url'): raise NotImplementedError, 'File inclusion not allowed!' if ( not state.document.settings.raw_enabled or (not state.document.settings.file_insertion_enabled and (options.has_key('file') or options.has_key('url'))) ): warning = state_machine.reporter.warning( '"%s" directive disabled.' % name, nodes.literal_block(block_text, block_text), line=lineno) return [warning] attributes = {'format': ' '.join(arguments[0].lower().split())} encoding = options.get('encoding', state.document.settings.input_encoding) if content: if options.has_key('file') or options.has_key('url'): error = state_machine.reporter.error( '"%s" directive may not both specify an external file and ' 'have content.' % name, nodes.literal_block(block_text, block_text), line=lineno) return [error] text = '\n'.join(content) elif options.has_key('file'): if options.has_key('url'): error = state_machine.reporter.error( 'The "file" and "url" options may not be simultaneously ' 'specified for the "%s" directive.' % name, nodes.literal_block(block_text, block_text), line=lineno) return [error] source_dir = os.path.dirname( os.path.abspath(state.document.current_source)) path = os.path.normpath(os.path.join(source_dir, options['file'])) path = utils.relative_path(None, path) try: state.document.settings.record_dependencies.add(path) raw_file = io.FileInput( source_path=path, encoding=encoding, error_handler=state.document.settings.input_encoding_error_handler, handle_io_errors=None) except IOError, error: severe = state_machine.reporter.severe( 'Problems with "%s" directive path:\n%s.' % (name, error), nodes.literal_block(block_text, block_text), line=lineno) return [severe] try: text = raw_file.read() except UnicodeError, error: severe = state_machine.reporter.severe( 'Problem with "%s" directive:\n%s: %s' % (name, error.__class__.__name__, error), nodes.literal_block(block_text, block_text), line=lineno) return [severe]
def run(self): path = os.path.abspath(os.path.join('content', 'uml')) if not os.path.exists(path): os.makedirs(path) nodes = [] body = '\n'.join(self.content) tf = tempfile.NamedTemporaryFile(delete=True) tf.write(body.encode('utf8')) tf.flush() imgext = ".png" # make a name name = tf.name + imgext output_path = os.path.join(path, os.path.basename(name)) alt = self.options.get('alt', 'ditaa diagram') classes = self.options.pop('class', ['ditaa']) cmdline = ['ditaa', '-v', '-o', tf.name, output_path] try: p = Popen(cmdline, stdout=PIPE, stderr=PIPE) out, err = p.communicate() except Exception as exc: error = self.state_machine.reporter.error( 'Failed to run ditaa: %s' % (exc, ), literal_block(self.block_text, self.block_text), line=self.lineno) nodes.append(error) else: if p.returncode == 0: # renaming output image using an hash code, just to not pullate # output directory with a growing number of images name = os.path.join(path, os.path.basename(name)) newname = os.path.join(path, "%08x" % (adler32(body.encode('utf8')) & 0xffffffff))+imgext try: # for Windows os.remove(newname) except Exception as exc: logger.debug('File '+newname+' does not exist, not deleted') os.rename(name, newname) url = global_siteurl + '/uml/' + os.path.basename(newname) imgnode = image(uri=url, classes=classes, alt=alt) nodes.append(imgnode) else: error = self.state_machine.reporter.error( 'Error in "%s" directive: %s' % (self.name, err), literal_block(self.block_text, self.block_text), line=self.lineno) nodes.append(error) return nodes
def run(self): result = [] schema, parts = split_content(self.content) literal = nodes.literal_block( schema.content, schema.content) literal['language'] = 'javascript' literal['classes'] = ['jsonschema'] set_source_info(self, literal) result.append(literal) for part in parts: if self.validate: is_valid = True try: jsonschema.validate(part.json, schema.json) except jsonschema.ValidationError as e: is_valid = False except jsonschema.SchemaError as e: raise ValueError("Schema is invalid:\n{0}\n\n{1}".format( str(e), schema.content)) if is_valid != part.should_pass: if part.should_pass: raise ValueError( "Doc says fragment should pass, " "but it does not validate:\n" + part.content) else: raise ValueError( "Doc says fragment should not pass, " "but it validates:\n" + part.content) else: is_valid = part.should_pass if len(part.comment): paragraph = nodes.paragraph('', '') comment = statemachine.StringList(part.comment) comment.parent = self.content.parent self.state.nested_parse(comment, 0, paragraph) paragraph['classes'] = ['jsonschema-comment'] set_source_info(self, paragraph) result.append(paragraph) literal = nodes.literal_block( part.content, part.content) literal['language'] = 'javascript' if is_valid: literal['classes'] = ['jsonschema-pass'] else: literal['classes'] = ['jsonschema-fail'] set_source_info(self, literal) result.append(literal) return result
def run (self): if not self.options.has_key('court-id'): error = self.state_machine.reporter.error( 'Invalid context: missing court-id option in court directive', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] traveling_jurisdiction[0] = self.options["court-id"] if self.options.has_key("flp-key"): courts_map[self.options["flp-key"]] = self.options["court-id"] court_id_node = courtid() court_id_bubble = courtbubble(rawsource=self.options['court-id'],text=self.options['court-id']) court_id_bubble["href"] = self.mkGitHubUrl("courts",self.options["court-id"]) court_id_node += court_id_bubble court_node = court() court_text = nodes.inline(rawsource=self.arguments[0],text=self.arguments[0]) if self.options.has_key("url"): court_ref = nodes.reference(refuri=self.options["url"]) court_link = octiconlink() court_ref += court_link court_ref += court_text court_node += court_ref else: court_node += court_text # Split in two here, and validate. # notes:: is optional, must be the first element if present, and can occur only once. # reporter:: is the only other permitted element. foundNotes = False foundReporter = False reporter_offset = 0 for line in self.content: if line.startswith('.. reporter::'): foundReporter = True if line.startswith('.. notes::'): if foundReporter: error = self.state_machine.reporter.error( 'Invalid structure: notes:: must come before any reporter:: in court directive', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] if not foundReporter: reporter_offset += 1 note_content = '\n'.join(self.content[0:reporter_offset]) note_node = nodes.generated(rawsource=note_content) self.state.nested_parse(self.content[0:reporter_offset], self.content_offset, note_node) reporter_content = '\n'.join(self.content[reporter_offset:]) reporters_node = reporters() self.state.nested_parse(self.content[reporter_offset:], self.content_offset + reporter_offset, reporters_node) return [court_node,court_id_node,note_node,reporters_node]
def run(self): # create a list of document nodes to return doc_nodes = [] # LEFT side = Old OpenMDAO text = '\n'.join(self.content) left_body = nodes.literal_block(text, text) left_body['language'] = 'python' left_body['classes'].append('rosetta_left') # for RIGHT side, get the code block, and reduce it if requested arg = self.arguments right_method = arg[0] text = get_source_code_of_class_or_method(right_method, remove_docstring=False) if len(arg) == 3: start_txt = arg[1] end_txt = arg[2] lines = text.split('\n') istart = 0 for j, line in enumerate(lines): if start_txt in line: istart = j break lines = lines[istart:] iend = len(lines) for j, line in enumerate(lines): if end_txt in line: iend = j+1 break lines = lines[:iend] # Remove the check suppression. for j, line in enumerate(lines): if "prob.setup(check=False" in line: lines[j] = lines[j].replace('check=False, ', '') lines[j] = lines[j].replace('check=False', '') # prune whitespace down to match first line while lines[0].startswith(' '): lines = [line[4:] for line in lines] text = '\n'.join(lines) # RIGHT side = Current OpenMDAO right_body = nodes.literal_block(text, text) right_body['language'] = 'python' right_body['classes'].append('rosetta_right') doc_nodes.append(left_body) doc_nodes.append(right_body) return doc_nodes
def run(self): self.assert_has_content() # Default values options = self.options for option in ('title', 'comments', 'subject', 'keywords'): if options.get(option) is None: options[option] = u"" if options.get('language') is None: # The website language, not the content language # because the wiki is not multilingual anyway context = get_context() languages = context.site_root.get_property('website_languages') language = context.accept_language.select_language(languages) options['language'] = language # Cover page if self.arguments: # Push cover as an option cover_uri = checkid(self.arguments[0][1:-2]) options['cover'] = directives.uri(cover_uri) # Metadata metadata = ['Book:'] for key in ('toc-depth', 'ignore-missing-pages', 'title', 'comments', 'subject', 'keywords', 'language', 'filename'): value = options.get(key) if not value: continue metadata.append(' %s: %s' % (key, value)) template = options.get('template') if template is not None: metadata.append(' template: ') meta_node = nodes.literal_block('Book Metadata', '\n'.join(metadata)) meta_node.append(nodes.reference(refuri=template, text=template, name=template, wiki_template=True)) else: meta_node = nodes.literal_block('Book Metadata', '\n'.join(metadata)) book_node = book(self.block_text, **options) if self.arguments: # Display the cover cover_text = self.arguments.pop(0) textnodes, messages = self.state.inline_text(cover_text, self.lineno) book_node += nodes.title(cover_text, '', *textnodes) book_node += messages # Parse inner list self.state.nested_parse(self.content, self.content_offset, book_node) # Automatically number pages for bullet_list in book_node.traverse(condition=nodes.bullet_list): bullet_list.__class__ = nodes.enumerated_list bullet_list.tagname = 'enumerated_list' return [meta_node, book_node]
def run(self): """Dynamically create and register a custom interpreted text role.""" if self.content_offset > self.lineno or not self.content: raise self.error('"%s" directive requires arguments on the first ' 'line.' % self.name) args = self.content[0] match = self.argument_pattern.match(args) if not match: raise self.error('"%s" directive arguments not valid role names: ' '"%s".' % (self.name, args)) new_role_name = match.group(1) base_role_name = match.group(3) messages = [] if base_role_name: base_role, messages = roles.role( base_role_name, self.state_machine.language, self.lineno, self.state.reporter) if base_role is None: error = self.state.reporter.error( 'Unknown interpreted text role "%s".' % base_role_name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return messages + [error] else: base_role = roles.generic_custom_role assert not hasattr(base_role, 'arguments'), ( 'Supplemental directive arguments for "%s" directive not ' 'supported (specified by "%r" role).' % (self.name, base_role)) try: converted_role = convert_directive_function(base_role) (arguments, options, content, content_offset) = ( self.state.parse_directive_block( self.content[1:], self.content_offset, converted_role, option_presets={})) except states.MarkupError as detail: error = self.state_machine.reporter.error( 'Error in "%s" directive:\n%s.' % (self.name, detail), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return messages + [error] if 'class' not in options: try: options['class'] = directives.class_option(new_role_name) except ValueError as detail: error = self.state_machine.reporter.error( 'Invalid argument for "%s" directive:\n%s.' % (self.name, SafeString(detail)), nodes.literal_block( self.block_text, self.block_text), line=self.lineno) return messages + [error] role = roles.CustomRole(new_role_name, base_role, options, content) roles.register_local_role(new_role_name, role) return messages
def run(self): env = self.state.document.settings.env app = env.app builder = app.builder # Some builders have no templates manager at all, and some # have the attribute set to None. templates = getattr(builder, 'templates', None) if not templates: app.warn( 'The builder has no template manager, ' 'ignoring the datatemplate directive.') return [] try: data_source = self.options['source'] except KeyError: error = self.state_machine.reporter.error( 'No source set for datatemplate directive', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] try: template_name = self.options['template'] except KeyError: error = self.state_machine.reporter.error( 'No template set for datatemplate directive', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] data = self._load_data(env, data_source) context = { 'make_list_table': helpers.make_list_table, 'make_list_table_from_mappings': helpers.make_list_table_from_mappings, 'data': data, } rendered_template = builder.templates.render( template_name, context, ) result = ViewList() for line in rendered_template.splitlines(): result.append(line, data_source) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): """ Create a list of document nodes to return. """ doc_nodes = [] n = 1 # grabbing source, and output of a test segment method_path = self.arguments[0] src, exc_txt, input_blocks, output_blocks, skipped = get_and_run_test(method_path) if skipped: # do the old way # we want the body of test code to be formatted and code highlighted body = nodes.literal_block(src, src) body['language'] = 'python' doc_nodes.append(body) # We want the output block to also be formatted similarly unless test was skipped if skipped: output = "Test skipped because " + exc_txt output_node = skipped_or_failed_node(text=output, number=n, kind="skipped") elif failed: output_node = skipped_or_failed_node(text=exc_txt, number=n, kind="failed") doc_nodes.append(output_node) else: output_blocks = [cgiesc.escape(ob) for ob in output_blocks] if 'no-split' in self.options: input_block = '\n'.join(input_blocks) output_block = '\n'.join(output_blocks) input_node = nodes.literal_block(input_block, input_block) input_node['language'] = 'python' doc_nodes.append(input_node) output_node = in_or_out_node(kind="Out", number=n, text=output_block) doc_nodes.append(output_node) else: for input_block, output_block in zip(input_blocks, output_blocks): input_node = nodes.literal_block(input_block, input_block) input_node['language'] = 'python' doc_nodes.append(input_node) if len(output_block) > 0: output_node = in_or_out_node(kind="Out", number=n, text=output_block) doc_nodes.append(output_node) n += 1 return doc_nodes
def run(self): result = [] parts = self.split_content(self.content) for part in parts: if len(part.comment): paragraph = nodes.paragraph("", "") comment = statemachine.StringList(part.comment) comment.parent = self.content.parent self.state.nested_parse(comment, 0, paragraph) paragraph["classes"] = ["jsonschema-comment"] set_source_info(self, paragraph) result.append(paragraph) container = jsonschema_node() container["raw_json"] = part.json set_source_info(self, container) pprint_content = pprint_json(part.json) literal = nodes.literal_block(pprint_content, pprint_content) literal["language"] = "json" set_source_info(self, literal) container.children.append(literal) result.append(container) for indx, part in enumerate(parts): for other_part in parts[(indx + 1) :]: p1 = pprint_json(part.json).split("\n") p2 = pprint_json(other_part.json).split("\n") diff_str = "\n".join( difflib.unified_diff( p2, p1, lineterm="", fromfile=(other_part.comment[0] if other_part.comment else ""), tofile=(part.comment[0] if part.comment else ""), ) ) container = diff_node() set_source_info(self, container) literal = nodes.literal_block(diff_str, diff_str) literal["language"] = "diff" set_source_info(self, literal) container.children.append(literal) result.append(container) return result
def run(self): self.assert_has_content() language = self.arguments[0] code = '\n'.join(self.content) pygmentize_args = {} if self.options.get('number-lines', None): pygmentize_args['linenostart'] = int(self.options['number-lines']) pygmentize_args['linenos'] = 'number-lines' in self.options and 'table' node = nodes.table() if pygmentize_args['linenos'] else nodes.literal_block() node['classes'] = self.options.get('class', []) node.attributes['data-language'] = language self.add_name(node) set_source_info(self, node) # if called from "include", set the source if 'source' in self.options: node.attributes['source'] = self.options['source'] if pygmentize_args['linenos']: anchor_id = node['ids'][-1] if node['ids'] else md5(code.encode('utf-8')).hexdigest() pygmentize_args['lineanchors'] = anchor_id pygmentize_args['anchorlinenos'] = True linespec = self.options.get('emphasize-lines') if linespec: try: nlines = len(self.content) pygmentize_args['hl_lines'] = [x + 1 for x in parselinenos(linespec, nlines)] except ValueError as err: document = self.state.document return [document.reporter.warning(str(err), line=self.lineno)] output = pygmentize(code, language, **pygmentize_args) # remove empty span included by Pygments # See: # https://bitbucket.org/birkenfeld/pygments-main/issues/1254/empty-at-the-begining-of-the-highlight output = output.replace('<span></span>', '') pre = re.findall('<pre.*?>(.*?)\n*</pre>', output, re.DOTALL) if len(pre) == 1: node += nodes.raw(pre[0], pre[0], format='html') else: # pygments returned a table row = nodes.row() node += row linenos_cell = nodes.entry(classes=['linenos']) linenos_cell += nodes.literal_block('', '', nodes.raw(pre[0], pre[0], format='html')) code_cell = nodes.entry(classes=['code']) code_cell += nodes.literal_block('', '', nodes.raw(pre[1], pre[1], format='html')) row += linenos_cell row += code_cell return [node]
def run(self): """ Implements the directive """ env = self.state.document.settings.env if not hasattr(env, 'epicsPVs'): env.epicsPVs = {} # Get content and options file_path = self.arguments[0] show_pv = self.options.get('show-pv', None) hide_pv = self.options.get('hide-pv', None) hide_tag = 'hide-tag' in self.options if hide_pv is not None: hide_pv = [re.compile(pv.strip()) for pv in hide_pv.split(',')] if not file_path: return [self._report('file_path -option missing')] # Transform the path suitable for processing file_path = self._get_directive_path(file_path) dbFile = open(file_path, 'r').readlines() file_path = os.path.basename(file_path) node = nodes.section() node['ids'] = [file_path] node += nodes.title(text=file_path) in_record = False hide_record = False tags = {} comments = [] for line in dbFile: # handle dos files line = line.replace('\r\n', '\n') # collect record comments if self.reComment.match(line): if self.reVDCTComment.match(line): # igorne VDCT comments continue tag = self.reTagComment.match(line) if tag is not None: tags[tag.group(1)] = True continue comments.append(line) continue # ignore expand blocks for now if self.reExpand.match(line): hide_record = True print "Ignoring db expand" continue recordMatch = self.reRecord.match(line) if recordMatch: pvName = recordMatch.group(2) if hide_tag and 'HIDE_PV' in tags: print "hiding tagged PV", pvName hide_record = True continue if hide_pv is not None: for regex in hide_pv: if regex.match(pvName): print "hiding found PV", pvName hide_record = True continue in_record = True record_text = '' # where does :ref: role modify the label? label = normalize_ref(pvName) env.epicsPVs[label] = env.docname section = nodes.section() section['ids'] = [label] title = nodes.title(text=pvName) section += title if len(comments) > 0: bullets = nodes.bullet_list() for comment in comments: item = nodes.list_item() item += nodes.paragraph(text=comment.lstrip(' #')) bullets += item section += bullets if in_record: # parse the field for PV names fieldMatch = self.reField.match(line) fieldPV = '1' if fieldMatch: indent, field, fieldPV, attrib = fieldMatch.groups() if not fieldPV.isdigit(): # expand PV names (only non-constants) record_text += '%sfield(%s, ":pv:`%s`%s")\n' % ( indent, field, fieldPV, attrib) else: record_text += line if self.reEndRecord.match(line): if not hide_record: # parse record through inline rst parser to resolve PV links text_nodes, messages = self.state.inline_text( record_text, self.lineno) section += nodes.literal_block(record_text, '', *text_nodes, **self.options) node += section in_record = False hide_record = False comments = [] tags = {} # add the PV to the index indextext = _('%s (PV)') % pvName inode = addnodes.index(entries=[('single', indextext, normalize_ref(pvName), pvName)]) node += inode return [node]
def run(self) -> List[Node]: # noqa: C901 """Implement option method.""" document = self.state.document code = "\n".join(self.content) location = self.state_machine.get_source_and_line(self.lineno) linespec = self.options.get("emphasize-lines") if linespec: try: nlines = len(self.content) hl_lines = parselinenos(linespec, nlines) if any(i >= nlines for i in hl_lines): logger.warning( __("line number spec is out of range(1-%d): %r") % (nlines, self.options["emphasize-lines"]), location=location, ) hl_lines = [x + 1 for x in hl_lines if x < nlines] except ValueError as err: return [document.reporter.warning(err, line=self.lineno)] else: hl_lines = [] # add parsing for hl_added and hl_removed linespec = self.options.get("emphasize-added") if linespec: try: nlines = len(self.content) hl_added = parselinenos(linespec, nlines) if any(i >= nlines for i in hl_added): logger.warning( __("line number spec is out of range(1-%d): %r") % (nlines, self.options["emphasize-added"]), location=location, ) hl_added = [x + 1 for x in hl_added if x < nlines] except ValueError as err: return [document.reporter.warning(err, line=self.lineno)] else: hl_added = [] # add parsing for hl_added and hl_removed linespec = self.options.get("emphasize-removed") if linespec: try: nlines = len(self.content) hl_removed = parselinenos(linespec, nlines) if any(i >= nlines for i in hl_removed): logger.warning( __("line number spec is out of range(1-%d): %r") % (nlines, self.options["emphasize-removed"]), location=location, ) hl_removed = [x + 1 for x in hl_removed if x < nlines] except ValueError as err: return [document.reporter.warning(err, line=self.lineno)] else: hl_removed = [] if "dedent" in self.options: location = self.state_machine.get_source_and_line(self.lineno) lines = code.split("\n") lines = dedent_lines(lines, self.options["dedent"], location=location) code = "\n".join(lines) literal = nodes.literal_block(code, code) if "linenos" in self.options or "lineno-start" in self.options: literal["linenos"] = True literal["classes"] += self.options.get("class", []) literal["force"] = "force" in self.options if self.arguments: # highlight language specified literal["language"] = self.arguments[0] else: # no highlight language specified. Then this directive refers the current # highlight setting via ``highlight`` directive or ``highlight_language`` # configuration. literal["language"] = self.env.temp_data.get( "highlight_language", self.config.highlight_language) extra_args = literal["highlight_args"] = {} if hl_lines: extra_args["hl_lines"] = hl_lines if hl_added: extra_args["hl_added"] = hl_added if hl_removed: extra_args["hl_removed"] = hl_removed if "lineno-start" in self.options: extra_args["linenostart"] = self.options["lineno-start"] if "emphasize-text" in self.options: extra_args["hl_text"] = self.options["emphasize-text"] self.set_source_info(literal) # if there is a caption, we need to wrap this node in a container caption = self.options.get("caption") if caption: try: literal = container_wrapper(self, literal, caption) except ValueError as exc: return [document.reporter.warning(exc, line=self.lineno)] self.add_name(literal) return [literal]
def get_codeblock_node(self, code, language): """this is copied from sphinx.directives.code.CodeBlock.run it has been changed to accept code and language as an arguments instead of reading from self """ document = self.state.document location = self.state_machine.get_source_and_line(self.lineno) linespec = self.options.get("emphasize-lines") if linespec: try: nlines = len(code.split("\n")) hl_lines = parselinenos(linespec, nlines) if any(i >= nlines for i in hl_lines): emph_lines = self.options["emphasize-lines"] log.warning(__( f"line number spec is out of range(1-{nlines}): {emph_lines!r}" ), location=location) hl_lines = [x + 1 for x in hl_lines if x < nlines] except ValueError as err: return [document.reporter.warning(str(err), line=self.lineno)] else: hl_lines = None if "dedent" in self.options: location = self.state_machine.get_source_and_line(self.lineno) lines = code.split("\n") lines = dedent_lines(lines, self.options["dedent"], location=location) code = "\n".join(lines) literal = nodes.literal_block(code, code) literal["language"] = language literal[ "linenos"] = "linenos" in self.options or "lineno-start" in self.options literal["classes"] += self.options.get("class", []) extra_args = literal["highlight_args"] = {} if hl_lines is not None: extra_args["hl_lines"] = hl_lines if "lineno-start" in self.options: extra_args["linenostart"] = self.options["lineno-start"] set_source_info(self, literal) caption = self.options.get("caption") if caption: try: literal = container_wrapper(self, literal, caption) except ValueError as exc: return [document.reporter.warning(str(exc), line=self.lineno)] # literal will be note_implicit_target that is linked from caption and numref. # when options['name'] is provided, it should be primary ID. self.add_name(literal) return [literal]
rawtext = rawtext[after_index + len(after_text):] before_text = self.options.get('end-before', None) if before_text: # skip content in rawtext after *and incl.* a matching text before_index = rawtext.find(before_text) if before_index < 0: raise self.severe('Problem with "end-before" option of "%s" ' 'directive:\nText not found.' % self.name) rawtext = rawtext[:before_index] if 'literal' in self.options: # Convert tabs to spaces, if `tab_width` is positive. if tab_width >= 0: text = rawtext.expandtabs(tab_width) else: text = rawtext literal_block = nodes.literal_block(rawtext, text, source=path) literal_block.line = 1 return [literal_block] else: include_lines = statemachine.string2lines(rawtext, tab_width, convert_whitespace=1) self.state_machine.insert_input(include_lines, path) return [] class Raw(Directive): """ Pass through content unchanged Content is included in output based on type argument
def _construct_manpage_specific_structure(self, parser_info): """ Construct a typical man page consisting of the following elements: NAME (automatically generated, out of our control) SYNOPSIS DESCRIPTION OPTIONS FILES SEE ALSO BUGS """ items = [] # SYNOPSIS section synopsis_section = nodes.section( '', nodes.title(text='Synopsis'), nodes.literal_block(text=parser_info["bare_usage"]), ids=['synopsis-section']) items.append(synopsis_section) # DESCRIPTION section if 'nodescription' not in self.options: description_section = nodes.section( '', nodes.title(text='Description'), nodes.paragraph(text=parser_info.get( 'description', parser_info.get('help', "undocumented").capitalize())), ids=['description-section']) nested_parse_with_titles(self.state, self.content, description_section) items.append(description_section) if parser_info.get('epilog') and 'noepilog' not in self.options: # TODO: do whatever sphinx does to understand ReST inside # docstrings magically imported from other places. The nested # parse method invoked above seem to be able to do this but # I haven't found a way to do it for arbitrary text if description_section: description_section += nodes.paragraph( text=parser_info['epilog']) else: description_section = nodes.paragraph( text=parser_info['epilog']) items.append(description_section) # OPTIONS section options_section = nodes.section('', nodes.title(text='Options'), ids=['options-section']) if 'args' in parser_info: options_section += nodes.paragraph() options_section += nodes.subtitle(text='Positional arguments:') options_section += self._format_positional_arguments(parser_info) for action_group in parser_info['action_groups']: if 'options' in parser_info: options_section += nodes.paragraph() options_section += nodes.subtitle(text=action_group['title']) options_section += self._format_optional_arguments( action_group) # NOTE: we cannot generate NAME ourselves. It is generated by # docutils.writers.manpage # TODO: items.append(files) # TODO: items.append(see also) # TODO: items.append(bugs) if len(options_section.children) > 1: items.append(options_section) if 'nosubcommands' not in self.options: # SUBCOMMANDS section (non-standard) subcommands_section = nodes.section( '', nodes.title(text='Sub-Commands'), ids=['subcommands-section']) if 'children' in parser_info: subcommands_section += self._format_subcommands(parser_info) if len(subcommands_section) > 1: items.append(subcommands_section) if os.getenv("INCLUDE_DEBUG_SECTION"): import json # DEBUG section (non-standard) debug_section = nodes.section( '', nodes.title(text="Argparse + Sphinx Debugging"), nodes.literal_block(text=json.dumps(parser_info, indent=' ')), ids=['debug-section']) items.append(debug_section) return items
def code_block_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """Parse and classify content of a code_block.""" if 'include' in options: try: if 'encoding' in options: encoding = options['encoding'] else: encoding = 'utf-8' content = codecs.open(options['include'], 'r', encoding).read().rstrip() except (IOError, UnicodeError): # no file or problem finding it or reading it log.error('Error reading file: "%s" L %s' % (options['include'], lineno)) content = u'' line_offset = 0 if content: # here we define the start-at and end-at options # so that limit is included in extraction # this is different than the start-after directive of docutils # (docutils/parsers/rst/directives/misc.py L73+) # which excludes the beginning # the reason is we want to be able to define a start-at like # def mymethod(self) # and have such a definition included after_text = options.get('start-at', None) if after_text: # skip content in include_text before *and NOT incl.* a matching text after_index = content.find(after_text) if after_index < 0: raise state_machine.reporter.severe( 'Problem with "start-at" option of "%s" ' 'code-block directive:\nText not found.' % options['start-at']) # patch mmueller start # Move the after_index to the beginning of the line with the # match. for char in content[after_index:0:-1]: # codecs always opens binary. This works with '\n', '\r' and # '\r\n'. We are going backwards, so '\n' is found first # in '\r\n'. # Going with .splitlines() seems more appropriate # but needs a few more changes. if char == u'\n' or char == u'\r': break after_index -= 1 # patch mmueller end content = content[after_index:] line_offset = len(content[:after_index].splitlines()) - 1 after_text = options.get('start-after', None) if after_text: # skip content in include_text before *and incl.* a matching text after_index = content.find(after_text) if after_index < 0: raise state_machine.reporter.severe( 'Problem with "start-after" option of "%s" ' 'code-block directive:\nText not found.' % options['start-after']) after_index = after_index + len(after_text) # Move the after_index to the start of the line after the match for char in content[after_index:]: if char == u'\n': break after_index += 1 line_offset = len(content[:after_index].splitlines()) content = content[after_index:] # same changes here for the same reason before_text = options.get('end-at', None) if before_text: # skip content in include_text after *and incl.* a matching text before_index = content.find(before_text) if before_index < 0: raise state_machine.reporter.severe( 'Problem with "end-at" option of "%s" ' 'code-block directive:\nText not found.' % options['end-at']) content = content[:before_index + len(before_text)] before_text = options.get('end-before', None) if before_text: # skip content in include_text after *and NOT incl.* a matching text before_index = content.find(before_text) if before_index < 0: raise state_machine.reporter.severe( 'Problem with "end-before" option of "%s" ' 'code-block directive:\nText not found.' % options['end-before']) content = content[:before_index] else: line_offset = options.get('linenos_offset') content = u'\n'.join(content) if 'tabsize' in options: tabw = options['tabsize'] else: tabw = int(options.get('tab-width', 8)) content = content.replace('\t', ' ' * tabw) hl_lines = options.get('hl_lines', []) withln = "linenos" in options if not "linenos_offset" in options: line_offset = 0 language = arguments[0] # create a literal block element and set class argument code_block = nodes.literal_block(classes=["code", language]) lineno = 1 + line_offset total_lines = content.count('\n') + 1 + line_offset if withln: lnwidth = len(str(total_lines)) fstr = "\n%%%dd " % lnwidth linenumber_cls = 'linenumber' if hl_lines and lineno not in hl_lines: linenumber_cls = 'pygments-diml' code_block += nodes.inline(fstr[1:] % lineno, fstr[1:] % lineno, classes=[linenumber_cls]) # parse content with pygments and add to code_block element for cls, value in DocutilsInterface(content, language, options): if hl_lines and lineno not in hl_lines: cls = "diml" if withln and "\n" in value: linenumber_cls = 'linenumber' if hl_lines and ( lineno + 1 ) not in hl_lines: # use lineno+1 as we're on the previous line when we render the next line number linenumber_cls = 'pygments-diml' # Split on the "\n"s values = value.split("\n") # The first piece, pass as-is code_block += nodes.Text(values[0], values[0]) # On the second and later pieces, insert \n and linenos linenos = range(lineno, lineno + len(values)) for chunk, ln in zip(values, linenos)[1:]: if ln <= total_lines: code_block += nodes.inline(fstr % ln, fstr % ln, classes=[linenumber_cls]) code_block += nodes.Text(chunk, chunk) lineno += len(values) - 1 elif cls in unstyled_tokens: if "\n" in value: lineno = lineno + value.count("\n") # insert as Text to decrease the verbosity of the output. code_block += nodes.Text(value, value) else: if "\n" in value: lineno = lineno + value.count("\n") code_block += nodes.inline(value, value, classes=["pygments-" + cls]) return [code_block]
def run(self): filename = self.arguments[0] cwd = os.getcwd() os.chdir(TMPDIR) parts = [] try: ff = AsdfFile() code = AsdfFile._open_impl(ff, filename, _get_yaml_content=True) code = '{0} {1}\n'.format(ASDF_MAGIC, version_string) + code.strip().decode('utf-8') literal = nodes.literal_block(code, code) literal['language'] = 'yaml' set_source_info(self, literal) parts.append(literal) with AsdfFile.open(filename) as ff: for i, block in enumerate(ff.blocks.internal_blocks): data = codecs.encode(block.data.tostring(), 'hex') if len(data) > 40: data = data[:40] + '...'.encode() allocated = block._allocated size = block._size data_size = block._data_size flags = block._flags if flags & BLOCK_FLAG_STREAMED: allocated = size = data_size = 0 lines = [] lines.append('BLOCK {0}:'.format(i)) human_flags = [] for key, val in FLAGS.items(): if flags & key: human_flags.append(val) if len(human_flags): lines.append(' flags: {0}'.format(' | '.join(human_flags))) if block.compression: lines.append(' compression: {0}'.format(block.compression)) lines.append(' allocated_size: {0}'.format(allocated)) lines.append(' used_size: {0}'.format(size)) lines.append(' data_size: {0}'.format(data_size)) lines.append(' data: {0}'.format(data)) code = '\n'.join(lines) literal = nodes.literal_block(code, code) literal['language'] = 'yaml' set_source_info(self, literal) parts.append(literal) internal_blocks = list(ff.blocks.internal_blocks) if (len(internal_blocks) and internal_blocks[-1].array_storage != 'streamed'): buff = io.BytesIO() ff.blocks.write_block_index(buff, ff) block_index = buff.getvalue() literal = nodes.literal_block(block_index, block_index) literal['language'] = 'yaml' set_source_info(self, literal) parts.append(literal) finally: os.chdir(cwd) result = nodes.admonition() textnodes, messages = self.state.inline_text(filename, self.lineno) title = nodes.title(filename, '', *textnodes) result += title result += parts return [result]
def code(self, language, text): node = nodes.literal_block(text, text, language=language) self.current_node.append(node)
def run(self): def genSigTable(parameters): table_html = "<table class='docutils' border='1'><thead><tr><th class='head'>Name</th><th class='head'>Type</th><th class='head'>Description</th></tr></thead><tbody>" for param in parameters: required = "optional" if param["usage"] == "required": required = "required" summary = "" if "summary" in param: summary += param["summary"] if "description" in param: summary += param["description"] table_html += "<tr><td>%s</td><td class='%s'>%s</td><td>%s</td></tr>" % ( param["name"], required, param["type"], summary) table_html += "</tbody></table>" return table_html def genReturns(mid, method, returns): return_types = "" if (len(returns) > 1): return_list = [] for return_type in returns: return_list.append(return_type["type"]) return_types += "<code>%s</code>" % ( "</code> or <code>".join(return_list)) else: return_types += "<code>%s</code>" % (returns[0]["type"]) return "<p><code>%s::%s()</code> returns %s.</p>" % (mid, method, return_types) def genReturnsTop(mid, returns): return_types = "" if len(returns) > 1: return_list = [] for return_type in returns: return_list.append(return_type["type"]) return_types += "<code>%s</code>" % ( "</code> or <code>".join(return_list)) else: return_types += "<code>%s</code>" % (returns[0]["type"]) return "<p><code>%s()</code> returns %s.</p>" % (mid, return_types) def genReturnsInline(returns): return_list = [] for return_type in returns: return_list.append(return_type["type"]) return "|".join(return_list) def genTopFunc(topfunc, summary_flag, description_flag, sig_flag, returns_flag): out = "" if summary_flag and ("summary" in topfunc): out += topfunc["summary"] if description_flag and ("description" in topfunc): out += topfunc["description"] if sig_flag and (len(topfunc["parameters"])): out += genSigTable(topfunc["parameters"]) if returns_flag and ("return-types" in topfunc) and (len( topfunc["return-types"])): out += genReturnsTop(api, topfunc["return-types"]) return out def genMethod(methods, head_level, headers_flag, summary_flag, description_flag, sig_flag, returns_flag): out = "" for method in methods: if headers_flag: out += "<h%s>%s()</h%s>" % (head_level, method["name"], head_level) if summary_flag and ("summary" in method): out += method["summary"] if description_flag and ("description" in method): out += method["description"] if sig_flag and (len(method["parameters"])): out += genSigTable(method["parameters"]) if returns_flag and ("return-types" in method) and (len( method["return-types"])): out += genReturns(api, method["name"], method["return-types"]) return out def genMethodTable(methods, summary_flag, description_flag): table_html = "<table class='docutils' border='1'><thead><tr><th class='head'>Method</th><th class='head'>Returns</th><th class='head'>Description</th></tr></thead><tbody>" for method in methods: summary = "" if summary_flag and ("summary" in method): summary += method["summary"] if description_flag and ("description" in method): summary += method["description"] sig = [] if (len(method["parameters"])): for param in method["parameters"]: required = "optional" if param["usage"] == "required": required = "required" sig.append("<span class='%s'>%s</span>" % (required, param["name"])) table_html += "<tr><td>%s(%s)</td><td>%s</td><td>%s</td></tr>" % ( method["name"], ",".join(sig), genReturnsInline(method["return-types"]), summary) table_html += "</tbody></table>" return table_html def genProperty(props, head_level, headers_flag, summary_flag, description_flag): out = "" for prop in props: if headers_flag: out += "<h%s>%s</h%s>" % (head_level, prop["name"], head_level) if summary_flag and ("summary" in prop): out += prop["summary"] if description_flag and ("description" in prop): out += prop["description"] return out def genPropertyTable(props, summary_flag, description_flag): table_html = "<table class='docutils' border='1'><thead><tr><th class='head'>Property</th><th class='head'>Type</th><th class='head'>Description</th></tr></thead><tbody>" for prop in props: summary = "" if summary_flag and ("summary" in prop): summary += prop["summary"] if description_flag and ("description" in prop): summary += prop["description"] table_html += "<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % ( prop["name"], prop["type"], summary) table_html += "</tbody></table>" return table_html def genEvent(events, head_level, headers_flag, summary_flag, description_flag): out = "" for event in events: if headers_flag: out += "<h%s>%s</h%s>" % (head_level, event["name"], head_level) if summary_flag and ("summary" in event): out += event["summary"] if description_flag and ("description" in event): out += event["description"] return out def genEventTable(events): return "" arguments = self.arguments api = self.arguments[0] arg_count = len(arguments) apislashed = api.replace(".", "/") out = "" try: api_config = cherrypy.request.app.config.get("api") except: api_config = {"base_url": self.base_url} target_url = api_config["base_url"] + apislashed try: if not target_url in dojo_api_inline_cache: # maybe add a local caching mechaism here too. eg, save the read() stream to a local fs cherrypy.log("Not In Cache - Request and Cache: " + target_url) data = urllib.urlopen(target_url).read() dojo_api_inline_cache[target_url] = json.loads(data) info = dojo_api_inline_cache[target_url] cherrypy.log("API RPC Request Successful") except ValueError: cherrypy.log("Failed to retrieve API RPC") error = self.state_machine.reporter.error( 'The API Could not be fetched for %s' % api, literal_block(self.content, self.content)) return [error] table = ":table:" in arguments topfunc = ":topfunc:" in arguments methods = ":methods:" in arguments properties = ":properties:" in arguments events = ":events:" in arguments headers = ":no-headers:" not in arguments summary = ":summary:" in arguments description = ":description:" in arguments returns = ":returns:" in arguments inherited = ":no-inherited:" not in arguments base = ":no-base:" not in arguments extensions = ":extensions:" in arguments privates = ":privates:" in arguments sig = ":sig:" in arguments title = info["title"] if ":level:" in arguments: idx = arguments.index(":level:") + 1 try: head_level = arguments[idx] cherrypy.log("Setting Heading Level to: " + head_level) except: cherrypy.log("Heading level exception.") head_level = 2 else: head_level = 2 if topfunc and ("topfunc" in info): out += genTopFunc(info["topfunc"], summary, description, sig, returns) if methods: methods_out = [] idx = arguments.index(":methods:") + 1 method_list = False if idx < arg_count: if arguments[idx][0] != ":": method_list = True if method_list: next_arg = arguments[idx] while next_arg[0] != ":": method = next_arg if method in info["methods"]: methods_out.append(info["methods"][method]) idx = idx + 1 if idx < arg_count: next_arg = arguments[idx] else: break else: for method in info["methods"]: mthd = info["methods"][method] if privates or (mthd["visibility"] != "private"): if (inherited and mthd["inherited"]) or ( extensions and mthd["extension"]) or ( base and (mthd["from"] == title)): methods_out.append(mthd) methods_out = sorted(methods_out, key=lambda mthd: mthd["name"]) if table: out += genMethodTable(methods_out, summary, description) else: out += genMethod(methods_out, head_level, headers, summary, description, sig, returns) if properties: props_out = [] idx = arguments.index(":properties:") + 1 prop_list = False if idx < arg_count: if arguments[idx][0] != ":": prop_list = True if prop_list: next_arg = arguments[idx] while next_arg[0] != ":": prop = next_arg if prop in info["properties"]: props_out.append(info["properties"][prop]) idx = idx + 1 if idx < arg_count: next_arg = arguments[idx] else: break else: for prop in info["properties"]: p = info["properties"][prop] if privates or (p["visibility"] != "private"): if (inherited and p["inherited"]) or ( extensions and p["extension"]) or (base and (p["from"] == title)): props_out.append(p) props_out = sorted(props_out, key=lambda p: p["name"]) if table: out += genPropertyTable(props_out, summary, description) else: out += genProperty(props_out, head_level, headers, summary, description) if events: events_out = [] idx = arguments.index(":events:") + 1 event_list = False if idx < arg_count: if arguments[idx][0] != ":": event_list = True if event_list: next_arg = arguments[idx] while next_arg[0] != ":": event = next_arg if event in info["events"]: events_out.append(info["events"][event]) idx = idx + 1 if idx < arg_count: next_arg = arguments[idx] else: break else: for event in info["events"]: evt = info["events"][event] if privates or (evt["visibility"] != "private"): if (inherited and evt["inherited"]) or ( extensions and evt["extension"]) or ( base and (evt["from"] == title)): events_out.append(evt) events_out = sorted(events_out, key=lambda evt: evt["name"]) if table: out += genEventTable(events_out) else: out += genEvent(events_out, head_level, headers, summary, description) return [nodes.raw('', out, format='html')]
def run(self): arguments = self.arguments if len(arguments) == 1: includelink = showmethods = showproperties = showexamples = showtitles = showsummary = showsignature = showlongsignature = showreturns = True else: showexamples = ":examples:" in arguments showtitles = ":no-titles:" not in arguments showsummary = ":summary:" in arguments showsignature = ":signature:" in arguments showlongsignature = ":longsignature:" in arguments showreturns = ":returns:" in arguments showmethods = ":methods:" in arguments showproperties = ":properties:" in arguments includelink = ":includelink:" in arguments api = self.arguments[0] apislashed = api.replace(".", "/") apidotted = api.replace("/", ".") target_url = self.base_url + apislashed try: if not target_url in dojo_api_inline_cache: # maybe add a local caching mechaism here too. eg, save the read() stream to a local fs print "Not in local cache, fetching: ", target_url data = urllib.urlopen(target_url).read() dojo_api_inline_cache[target_url] = json.loads(data) # print data info = dojo_api_inline_cache[target_url] except ValueError: error = self.state_machine.reporter.error( 'The API Could not be fetched for %s' % api, literal_block(self.content, self.content)) return [error] out = "" if showtitles: out = "\nAPI Info\n========\n\n" if includelink: out += ":full API:\t%s%s\n" % ("http://dojotoolkit.org/api/", apislashed) if showsummary and "summary" in info: out += ":summary:\t%s\n" % info["summary"] if showreturns and "returns" in info: out += ":returns:\t%s\n" % info["returns"] out += "\n" sig = "" if "type" in info and info["type"] == "Constructor": out += ".. api-inline :: %s\n\t:no-titles:\n\t:signature:\n\t:constructor:\n\n" % ( apidotted + ".constructor") if "parameters" in info and (showsignature or showlongsignature): if showtitles: out += "Parameters\n----------\n\n" # determine if ClassLike and add a `new ` if ":constructor:" in arguments: sig += "var thing = new " apidotted = apidotted[:-12] sig += apidotted + "(" tab = "" for param in info["parameters"]: type = param.get("type") name = param.get("name") desc = param.get("description", "").strip() body = "".join(desc.split("\n")) tab += "* **%s** `%s`\n\t\t\%s\n" % (name, type, body) sig += " /* %s */ %s, " % (type, name) sig = sig[:-2] + ")" if showsignature: out += "Signature\n\n.. js ::\n\n\t%s\n\n" % sig if showlongsignature: out += "Overview\n\n%s\n" % tab if showexamples and "examples" in info: if showtitles: out += "Examples\n---------\n\n" for example in info['examples']: parts = example.split("\n") intabs = False for part in parts: part = part.rstrip() if part.startswith("\t\t"): if not intabs: # make a new tab block intabs = True out += "\n\n.. js ::\n\n%s\n" % part else: # keep just pumping # if part.endswith("\n"): part = part[:-1] out += "%s\n" % part # make a new text block else: if intabs: out += "\n\n" out += "%s\n" % part.strip() intabs = False out += "\n" # if showproperties and "properties" in info: # if ":showproperties:" in arguments: # only = arguments[":showproperties:"] # print only # # if showtitles: # out += "Properties\n----------\n\n" # # for prop in info["properties"]: # """""" # propinfo = info["properties"][prop] # defines = propinfo.get("defines", []) # # print "property: %s - %s \n" % (prop, propinfo) # if apidotted in defines: # print "displaying" # out += ":%s:\t%s\n\n" % (prop, propinfo.get("summary", "")) # # out += "\n" # # if showmethods and "methods" in info: # # if showtitles: # out += "Methods\n-------\n\n" # # for method in info["methods"]: # """""" # infos = info["methods"][method] # if apidotted in infos.get("defines", []): # print "methods: %s \n %s \n" % (method, infos) # out += ":**%s**:\t%s\n\n" % (method, infos.get("summary", "")) # # out += "\n" # #out += "</pre>" #print out; try: lines = statemachine.string2lines(out) self.state_machine.insert_input(lines, "/") except SystemMessage: print "fooooooo" return []
def get_csv_data(self): """ Get CSV data from the directive content, from an external file, or from a URL reference. """ encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) error_handler = self.state.document.settings.input_encoding_error_handler if self.content: # CSV data is from directive content. if 'file' in self.options or 'url' in self.options: error = self.state_machine.reporter.error( '"%s" directive may not both specify an external file and' ' have content.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) source = self.content.source(0) csv_data = self.content elif 'file' in self.options: # CSV data is from an external file. if 'url' in self.options: error = self.state_machine.reporter.error( 'The "file" and "url" options may not be simultaneously' ' specified for the "%s" directive.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) source_dir = os.path.dirname( os.path.abspath(self.state.document.current_source)) source = os.path.normpath(os.path.join(source_dir, self.options['file'])) source = utils.relative_path(None, source) try: self.state.document.settings.record_dependencies.add(source) csv_file = io.FileInput(source_path=source, encoding=encoding, error_handler=error_handler) csv_data = csv_file.read().splitlines() except IOError as error: severe = self.state_machine.reporter.severe( u'Problems with "%s" directive path:\n%s.' % (self.name, SafeString(error)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) elif 'url' in self.options: # CSV data is from a URL. # Do not import urllib2 at the top of the module because # it may fail due to broken SSL dependencies, and it takes # about 0.15 seconds to load. if sys.version_info >= (3, 0): from urllib.request import urlopen from urllib.error import URLError else: from urllib2 import urlopen, URLError source = self.options['url'] try: csv_text = urlopen(source).read() except (URLError, IOError, OSError, ValueError) as error: severe = self.state_machine.reporter.severe( 'Problems with "%s" directive URL "%s":\n%s.' % (self.name, self.options['url'], SafeString(error)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) csv_file = io.StringInput( source=csv_text, source_path=source, encoding=encoding, error_handler=(self.state.document.settings.\ input_encoding_error_handler)) csv_data = csv_file.read().splitlines() else: error = self.state_machine.reporter.warning( 'The "%s" directive requires content; none supplied.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) return csv_data, source
def run(self): # Re-run on the current document if this directive's source changes. self.state.document.settings.env.note_dependency(__file__) # Parse directive options. Don't use os.path.sep or os.path.join here! # That would break if building the docs on Windows. app = self.options.get('app', None) zephyr_app = self.options.get('zephyr-app', None) generator = self.options.get('generator', 'ninja').lower() host_os = self.options.get('host-os', 'all').lower() board = self.options.get('board', None) conf = self.options.get('conf', None) gen_args = self.options.get('gen-args', None) build_args = self.options.get('build-args', None) build_dir_append = self.options.get('build-dir', '').strip('/') goals = self.options.get('goals').split() skip_config = 'maybe-skip-config' in self.options compact = 'compact' in self.options if app and zephyr_app: raise self.error('Both app and zephyr-app options were given.') if generator not in self.GENERATORS: raise self.error('Unknown generator {}; choose from: {}'.format( generator, self.GENERATORS)) if host_os not in self.HOST_OS: raise self.error('Unknown host-os {}; choose from: {}'.format( generator, self.HOST_OS)) if compact and skip_config: raise self.error( 'Both compact and maybe-skip-config options were given.') # Allow build directories which are nested. build_dir = ('build' + '/' + build_dir_append).rstrip('/') num_slashes = build_dir.count('/') source_dir = '/'.join(['..' for i in range(num_slashes + 1)]) mkdir = 'mkdir' if num_slashes == 0 else 'mkdir -p' # Create host_os array host_os = [host_os] if host_os != "all" else self.HOST_OS run_config = { 'board': board, 'conf': conf, 'gen_args': gen_args, 'build_args': build_args, 'source_dir': source_dir, 'goals': goals, 'compact': compact } # Build the command content as a list, then convert to string. content = [] comment = None if len(host_os) > 1: comment = '# On {}' if zephyr_app: if "unix" in host_os: if comment: content.append('{}'.format(comment.format('Linux/macOS'))) content.append('cd $ZEPHYR_BASE/{}'.format(zephyr_app)) content.extend( self._mkdir(mkdir, build_dir, "unix", skip_config, compact)) if comment: content.append('') if "win" in host_os: if comment: content.append('{}'.format(comment.format('Windows'))) zephyr_app = zephyr_app.replace('/', '\\') content.append('cd %ZEPHYR_BASE%\{}'.format(zephyr_app)) content.extend( self._mkdir(mkdir, build_dir, "win", skip_config, compact)) if not compact or comment: content.append('') elif app: content.append('cd {}'.format(app)) if not compact: content.append('') if not compact: content.append( '# Use cmake to configure a {}-based build system:'.format( generator.capitalize())) # noqa: E501 if generator == 'make': content.extend(self._generate_make(**run_config)) elif generator == 'ninja': content.extend(self._generate_ninja(**run_config)) content = '\n'.join(content) # Create the nodes. literal = nodes.literal_block(content, content) self.add_name(literal) literal['language'] = 'console' return [literal]
def file2literal(fname): with open(fname, "r") as src: data = src.read() node = nodes.literal_block(data, data) return node
if before_index < 0: raise self.severe('Problem with "end-before" option of "%s" ' 'directive:\nText not found.' % self.name) rawtext = rawtext[:before_index] include_lines = statemachine.string2lines(rawtext, tab_width, convert_whitespace=True) if 'literal' in self.options: # Convert tabs to spaces, if `tab_width` is positive. if tab_width >= 0: text = rawtext.expandtabs(tab_width) else: text = rawtext literal_block = nodes.literal_block(rawtext, source=path, classes=self.options.get( 'class', [])) literal_block.line = 1 self.add_name(literal_block) if 'number-lines' in self.options: try: startline = int(self.options['number-lines'] or 1) except ValueError: raise self.error(':number-lines: with non-integer ' 'start value') endline = startline + len(include_lines) if text.endswith('\n'): text = text[:-1] tokens = NumberLines([([], text)], startline, endline) for classes, value in tokens: if classes:
class PySideInclude(Directive): """ Like ``.. include:: :literal:``, but only warns if the include file is not found, and does not raise errors. Also has several options for selecting what to include. """ has_content = False required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False option_spec = { 'linenos': directives.flag, 'tab-width': int, 'language': directives.unchanged_required, 'encoding': directives.encoding, 'pyobject': directives.unchanged_required, 'lines': directives.unchanged_required, 'start-after': directives.unchanged_required, 'end-before': directives.unchanged_required, 'prepend': directives.unchanged_required, 'append': directives.unchanged_required, 'snippet': directives.unchanged_required, } def run(self): document = self.state.document filename = self.arguments[0] if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', line=self.lineno)] env = document.settings.env if filename.startswith('/') or filename.startswith(os.sep): rel_fn = filename[1:] else: docdir = path.dirname(env.doc2path(env.docname, base=None)) rel_fn = path.join(docdir, filename) try: fn = path.join(env.srcdir, rel_fn) except UnicodeDecodeError: # the source directory is a bytestring with non-ASCII characters; # let's try to encode the rel_fn in the file system encoding rel_fn = rel_fn.encode(sys.getfilesystemencoding()) fn = path.join(env.srcdir, rel_fn) if 'pyobject' in self.options and 'lines' in self.options: return [document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno)] encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) try: f = codecs.StreamReaderWriter(open(fn, 'U'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() f.close() except (IOError, OSError): return [document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno)] except UnicodeError: return [document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename))] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(fn, '') tags = analyzer.find_tags() if objectname not in tags: return [document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno)] else: lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] lines = [lines[i] for i in linelist] startafter = self.options.get('start-after') endbefore = self.options.get('end-before') prepend = self.options.get('prepend') append = self.options.get('append') snippet = self.options.get('snippet') if snippet: startafter = "//![%s]" % snippet endbefore = "//![%s]" % snippet if startafter is not None or endbefore is not None: use = not startafter res = [] for line in lines: if not use and startafter and startafter in line: use = True elif use and endbefore and endbefore in line: use = False break elif use: if not line.startswith("//!"): res.append(line) lines = res if prepend: lines.insert(0, prepend + '\n') if append: lines.append(append + '\n') text = ''.join(lines) if self.options.get('tab-width'): text = text.expandtabs(self.options['tab-width']) retnode = nodes.literal_block(text, text, source=fn) retnode.line = 1 retnode.attributes['line_number'] = self.lineno if self.options.get('language', ''): retnode['language'] = self.options['language'] if 'linenos' in self.options: retnode['linenos'] = True document.settings.env.note_dependency(rel_fn) return [retnode]
def run(self): code = u'\n'.join(self.content) literal = nodes.literal_block(code, code) filename = self.arguments[0] addCode(self.state.document.settings.env, filename, code) return [literal]
def run(self) -> List[Node]: document = self.state.document code = '\n'.join(self.content) location = self.state_machine.get_source_and_line(self.lineno) linespec = self.options.get('emphasize-lines') if linespec: try: nlines = len(self.content) hl_lines = parselinenos(linespec, nlines) if any(i >= nlines for i in hl_lines): logger.warning( __('line number spec is out of range(1-%d): %r') % (nlines, self.options['emphasize-lines']), location=location) hl_lines = [x + 1 for x in hl_lines if x < nlines] except ValueError as err: return [document.reporter.warning(err, line=self.lineno)] else: hl_lines = None if 'dedent' in self.options: location = self.state_machine.get_source_and_line(self.lineno) lines = code.split('\n') lines = dedent_lines(lines, self.options['dedent'], location=location) code = '\n'.join(lines) literal = nodes.literal_block(code, code) # type: Element if 'linenos' in self.options or 'lineno-start' in self.options: literal['linenos'] = True literal['classes'] += self.options.get('class', []) literal['force'] = 'force' in self.options if self.arguments: # highlight language specified literal['language'] = self.arguments[0] else: # no highlight language specified. Then this directive refers the current # highlight setting via ``highlight`` directive or ``highlight_language`` # configuration. literal['language'] = self.env.temp_data.get( 'highlight_language', self.config.highlight_language) extra_args = literal['highlight_args'] = {} if hl_lines is not None: extra_args['hl_lines'] = hl_lines if 'lineno-start' in self.options: extra_args['linenostart'] = self.options['lineno-start'] self.set_source_info(literal) caption = self.options.get('caption') if caption: try: literal = container_wrapper(self, literal, caption) except ValueError as exc: return [document.reporter.warning(exc, line=self.lineno)] # literal will be note_implicit_target that is linked from caption and numref. # when options['name'] is provided, it should be primary ID. self.add_name(literal) return [literal]
def apply(self): defs = self.document.substitution_defs normed = self.document.substitution_names nested = {} line_length_limit = getattr(self.document.settings, "line_length_limit", 10000) subreflist = list(self.document.traverse(nodes.substitution_reference)) for ref in subreflist: msg = '' refname = ref['refname'] if refname in defs: key = refname else: normed_name = refname.lower() key = normed.get(normed_name, None) if key is None: msg = self.document.reporter.error( 'Undefined substitution referenced: "%s".' % refname, base_node=ref) else: subdef = defs[key] if len(subdef.astext()) > line_length_limit: msg = self.document.reporter.error( 'Substitution definition "%s" exceeds the' ' line-length-limit.' % (key)) if msg: msgid = self.document.set_id(msg) prb = nodes.problematic(ref.rawsource, ref.rawsource, refid=msgid) prbid = self.document.set_id(prb) msg.add_backref(prbid) ref.replace_self(prb) continue parent = ref.parent index = parent.index(ref) if ('ltrim' in subdef.attributes or 'trim' in subdef.attributes): if index > 0 and isinstance(parent[index - 1], nodes.Text): parent[index - 1] = parent[index - 1].rstrip() if ('rtrim' in subdef.attributes or 'trim' in subdef.attributes): if (len(parent) > index + 1 and isinstance(parent[index + 1], nodes.Text)): parent[index + 1] = parent[index + 1].lstrip() subdef_copy = subdef.deepcopy() try: # Take care of nested substitution references: for nested_ref in subdef_copy.traverse( nodes.substitution_reference): nested_name = normed[nested_ref['refname'].lower()] if nested_name in nested.setdefault(nested_name, []): raise CircularSubstitutionDefinitionError nested[nested_name].append(key) nested_ref['ref-origin'] = ref subreflist.append(nested_ref) except CircularSubstitutionDefinitionError: parent = ref.parent if isinstance(parent, nodes.substitution_definition): msg = self.document.reporter.error( 'Circular substitution definition detected:', nodes.literal_block(parent.rawsource, parent.rawsource), line=parent.line, base_node=parent) parent.replace_self(msg) else: # find original ref substitution which caused this error ref_origin = ref while ref_origin.hasattr('ref-origin'): ref_origin = ref_origin['ref-origin'] msg = self.document.reporter.error( 'Circular substitution definition referenced: ' '"%s".' % refname, base_node=ref_origin) msgid = self.document.set_id(msg) prb = nodes.problematic(ref.rawsource, ref.rawsource, refid=msgid) prbid = self.document.set_id(prb) msg.add_backref(prbid) ref.replace_self(prb) continue ref.replace_self(subdef_copy.children) # register refname of the replacment node(s) # (needed for resolution of references) for node in subdef_copy.children: if isinstance(node, nodes.Referential): # HACK: verify refname attribute exists. # Test with docs/dev/todo.txt, see. |donate| if 'refname' in node: self.document.note_refname(node)
self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) elif self.options.has_key('url'): # CSV data is from a URL. # Do not import urllib2 at the top of the module because # it may fail due to broken SSL dependencies, and it takes # about 0.15 seconds to load. import urllib2 source = self.options['url'] try: csv_text = urllib2.urlopen(source).read() except (urllib2.URLError, IOError, OSError, ValueError), error: severe = self.state_machine.reporter.severe( 'Problems with "%s" directive URL "%s":\n%s.' % (self.name, self.options['url'], error), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) csv_file = io.StringInput( source=csv_text, source_path=source, encoding=encoding, error_handler=(self.state.document.settings.\ input_encoding_error_handler)) csv_data = csv_file.read().splitlines() else: error = self.state_machine.reporter.warning( 'The "%s" directive requires content; none supplied.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) return csv_data, source
after_index = include_text.find(after_text) if after_index < 0: raise self.severe('Problem with "start-after" option of "%s" ' 'directive:\nText not found.' % self.name) include_text = include_text[after_index + len(after_text):] before_text = self.options.get('end-before', None) if before_text: # skip content in include_text after *and incl.* a matching text before_index = include_text.find(before_text) if before_index < 0: raise self.severe('Problem with "end-before" option of "%s" ' 'directive:\nText not found.' % self.name) include_text = include_text[:before_index] if 'literal' in self.options: literal_block = nodes.literal_block(include_text, include_text, source=path) literal_block.line = 1 return [literal_block] else: include_lines = statemachine.string2lines(include_text, convert_whitespace=1) self.state_machine.insert_input(include_lines, path) return [] class Raw(Directive): """ Pass through content unchanged Content is included in output based on type argument
class CSVTable(Table): option_spec = { 'header-rows': directives.nonnegative_int, 'stub-columns': directives.nonnegative_int, 'header': directives.unchanged, 'widths': directives.positive_int_list, 'file': directives.path, 'url': directives.uri, 'encoding': directives.encoding, 'class': directives.class_option, 'name': directives.unchanged, # field delimiter char 'delim': directives.single_char_or_whitespace_or_unicode, # treat whitespace after delimiter as significant 'keepspace': directives.flag, # text field quote/unquote char: 'quote': directives.single_char_or_unicode, # char used to escape delim & quote as-needed: 'escape': directives.single_char_or_unicode, } class DocutilsDialect(csv.Dialect): """CSV dialect for `csv_table` directive.""" delimiter = ',' quotechar = '"' doublequote = True skipinitialspace = True lineterminator = '\n' quoting = csv.QUOTE_MINIMAL def __init__(self, options): if 'delim' in options: self.delimiter = str(options['delim']) if 'keepspace' in options: self.skipinitialspace = False if 'quote' in options: self.quotechar = str(options['quote']) if 'escape' in options: self.doublequote = False self.escapechar = str(options['escape']) csv.Dialect.__init__(self) class HeaderDialect(csv.Dialect): """CSV dialect to use for the "header" option data.""" delimiter = ',' quotechar = '"' escapechar = '\\' doublequote = False skipinitialspace = True lineterminator = '\n' quoting = csv.QUOTE_MINIMAL def check_requirements(self): pass def run(self): try: if (not self.state.document.settings.file_insertion_enabled and ('file' in self.options or 'url' in self.options)): warning = self.state_machine.reporter.warning( 'File and URL access deactivated; ignoring "%s" ' 'directive.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [warning] self.check_requirements() title, messages = self.make_title() csv_data, source = self.get_csv_data() table_head, max_header_cols = self.process_header_option() rows, max_cols = self.parse_csv_data_into_rows( csv_data, self.DocutilsDialect(self.options), source) max_cols = max(max_cols, max_header_cols) header_rows = self.options.get('header-rows', 0) stub_columns = self.options.get('stub-columns', 0) self.check_table_dimensions(rows, header_rows, stub_columns) table_head.extend(rows[:header_rows]) table_body = rows[header_rows:] col_widths = self.get_column_widths(max_cols) self.extend_short_rows_with_empty_cells(max_cols, (table_head, table_body)) except SystemMessagePropagation, detail: return [detail.args[0]] except csv.Error, detail: error = self.state_machine.reporter.error( 'Error with CSV data in "%s" directive:\n%s' % (self.name, detail), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error]
line=self.lineno) raise SystemMessagePropagation(severe) elif 'url' in self.options: # CSV data is from a URL. # Do not import urllib2 at the top of the module because # it may fail due to broken SSL dependencies, and it takes # about 0.15 seconds to load. import urllib2 source = self.options['url'] try: csv_text = urllib2.urlopen(source).read() except (urllib2.URLError, IOError, OSError, ValueError), error: severe = self.state_machine.reporter.severe( 'Problems with "%s" directive URL "%s":\n%s.' % (self.name, self.options['url'], SafeString(error)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) csv_file = io.StringInput( source=csv_text, source_path=source, encoding=encoding, error_handler=(self.state.document.settings.\ input_encoding_error_handler)) csv_data = csv_file.read().splitlines() else: error = self.state_machine.reporter.warning( 'The "%s" directive requires content; none supplied.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) return csv_data, source
def make_block(command, opt, content): h = "$ {} {}\n".format(command, opt) + content return section(None, title(None, command), literal_block(None, h, language='console'), ids=[make_id(command)], names=[command])
def run(self): if 'module' in self.options and 'func' in self.options: module_name = self.options['module'] attr_name = self.options['func'] elif 'ref' in self.options: _parts = self.options['ref'].split('.') module_name = '.'.join(_parts[0:-1]) attr_name = _parts[-1] elif 'filename' in self.options and 'func' in self.options: mod = {} try: f = open(self.options['filename']) except IOError: # try open with abspath f = open(os.path.abspath(self.options['filename'])) code = compile(f.read(), self.options['filename'], 'exec') exec(code, mod) attr_name = self.options['func'] func = mod[attr_name] else: raise self.error( ':module: and :func: should be specified, or :ref:, or :filename: and :func:' ) # Skip this if we're dealing with a local file, since it obviously can't be imported if 'filename' not in self.options: try: mod = __import__(module_name, globals(), locals(), [attr_name]) except: raise self.error('Failed to import "%s" from "%s"' % (attr_name, module_name)) if not hasattr(mod, attr_name): raise self.error( ('Module "%s" has no attribute "%s"\n' 'Incorrect argparse :module: or :func: values?') % (module_name, attr_name)) func = getattr(mod, attr_name) if isinstance(func, ArgumentParser): parser = func elif 'passparser' in self.options: parser = ArgumentParser() func(parser) else: parser = func() if 'path' not in self.options: self.options['path'] = '' path = str(self.options['path']) if 'prog' in self.options: parser.prog = self.options['prog'] result = parse_parser(parser, skip_default_values='nodefault' in self.options, skip_default_const_values='nodefaultconst' in self.options) result = parser_navigate(result, path) if 'manpage' in self.options: return self._construct_manpage_specific_structure(result) # Handle nested content, where markdown needs to be preprocessed items = [] nested_content = nodes.paragraph() if 'markdown' in self.options: from sphinxarg.markdown import parseMarkDownBlock items.extend(parseMarkDownBlock('\n'.join(self.content) + '\n')) else: self.state.nested_parse(self.content, self.content_offset, nested_content) nested_content = nested_content.children # add common content between for item in nested_content: if not isinstance(item, nodes.definition_list): items.append(item) markDownHelp = False if 'markdownhelp' in self.options: markDownHelp = True if 'description' in result and 'nodescription' not in self.options: if markDownHelp: items.extend(renderList([result['description']], True)) else: items.append( self._nested_parse_paragraph(result['description'])) items.append(nodes.literal_block(text=result['usage'])) items.extend( print_action_groups(result, nested_content, markDownHelp, settings=self.state.document.settings)) if 'nosubcommands' not in self.options: items.extend( print_subcommands(result, nested_content, markDownHelp, settings=self.state.document.settings)) if 'epilog' in result and 'noepilog' not in self.options: items.append(self._nested_parse_paragraph(result['epilog'])) # Traverse the returned nodes, modifying the title IDs as necessary to avoid repeats ensureUniqueIDs(items) return items
use = not startafter res = [] for line in lines: if not use and startafter in line: use = True elif use and endbefore in line: use = False break elif use: res.append(line) lines = res text = ''.join(lines) text = re.sub("\r\n", "\n", text) retnode = nodes.literal_block(text, text, source=fn) retnode.line = 1 if options.get('language', ''): retnode['language'] = options['language'] if 'linenos' in options: retnode['linenos'] = True state.document.settings.env.note_dependency(rel_fn) return [retnode] literalinclude_directive.options = { 'linenos': directives.flag, 'language': directives.unchanged_required, 'encoding': directives.encoding, 'pyobject': directives.unchanged_required, 'lines': directives.unchanged_required,
def _dbapi_node(self): dialect_name, dbapi_name = self.dialect_name.split("+") try: dialect_directive = self._dialects[dialect_name] except KeyError: raise Exception( "No .. dialect:: %s directive has been established" % dialect_name) output = [] content = self._parse_content() parent_section_ref = self.state.parent.children[0]['ids'][0] self._append_dbapi_bullet(dialect_name, dbapi_name, content['name'], parent_section_ref) p = nodes.paragraph( '', '', nodes.Text( "Support for the %s database via the %s driver." % (dialect_directive.database_name, content['name']), "Support for the %s database via the %s driver." % (dialect_directive.database_name, content['name'])), ) self.state.nested_parse(content['text'], 0, p) output.append(p) if "url" in content or "driverurl" in content: sec = nodes.section( '', nodes.title("DBAPI", "DBAPI"), ids=["dialect-%s-%s-url" % (dialect_name, dbapi_name)]) if "url" in content: text = "Documentation and download information (if applicable) "\ "for %s is available at:\n" % content["name"] uri = content['url'] sec.append( nodes.paragraph( '', '', nodes.Text(text, text), nodes.reference( '', '', nodes.Text(uri, uri), refuri=uri, ))) if "driverurl" in content: text = "Drivers for this database are available at:\n" sec.append( nodes.paragraph( '', '', nodes.Text(text, text), nodes.reference('', '', nodes.Text(content['driverurl'], content['driverurl']), refuri=content['driverurl']))) output.append(sec) if "connectstring" in content: sec = nodes.section( '', nodes.title("Connecting", "Connecting"), nodes.paragraph( '', '', nodes.Text("Connect String:", "Connect String:"), nodes.literal_block(content['connectstring'], content['connectstring'])), ids=["dialect-%s-%s-connect" % (dialect_name, dbapi_name)]) output.append(sec) return output
def run(self): """Include a file as part of the content of this reST file.""" # from sphynx Include Directive in https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/other.py # type: () -> List[nodes.Node] env = self.state.document.settings.env if self.arguments[0].startswith('<') and \ self.arguments[0].endswith('>'): # docutils "standard" includes, do not do path processing return BaseInclude.run(self) rel_filename, filename = env.relfn2path(self.arguments[0]) self.arguments[0] = filename env.note_included(filename) #end if not self.state.document.settings.file_insertion_enabled: raise self.warning('"%s" directive disabled.' % self.name) source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) path = directives.path(self.arguments[0]) if path.startswith('<') and path.endswith('>'): path = os.path.join(self.standard_include_path, path[1:-1]) path = os.path.normpath(os.path.join(source_dir, path)) path = utils.relative_path(None, path) path = nodes.reprunicode(path) encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) e_handler = self.state.document.settings.input_encoding_error_handler tab_width = self.options.get('tab-width', self.state.document.settings.tab_width) try: self.state.document.settings.record_dependencies.add(path) include_file = io.FileInput(source_path=path, encoding=encoding, error_handler=e_handler) except UnicodeEncodeError as error: raise self.severe(u'Problems with "%s" directive path:\n' 'Cannot encode input file path "%s" ' '(wrong locale?).' % (self.name, SafeString(path))) except IOError as error: raise self.severe(u'Problems with "%s" directive path:\n%s.' % (self.name, ErrorString(error))) startline = self.options.get('start-line', None) endline = self.options.get('end-line', None) try: if startline or (endline is not None): lines = include_file.readlines() rawtext = ''.join(lines[startline:endline]) else: rawtext = include_file.read() except UnicodeError as error: raise self.severe(u'Problem with "%s" directive:\n%s' % (self.name, ErrorString(error))) # start-after/end-before: no restrictions on newlines in match-text, # and no restrictions on matching inside lines vs. line boundaries after_text = self.options.get('start-after', None) if after_text: # skip content in rawtext before *and incl.* a matching text after_index = rawtext.find(after_text) if after_index < 0: raise self.severe('Problem with "start-after" option of "%s" ' 'directive:\nText not found.' % self.name) rawtext = rawtext[after_index + len(after_text):] before_text = self.options.get('end-before', None) if before_text: # skip content in rawtext after *and incl.* a matching text before_index = rawtext.find(before_text) if before_index < 0: raise self.severe('Problem with "end-before" option of "%s" ' 'directive:\nText not found.' % self.name) rawtext = rawtext[:before_index] rawtext = self.filterText(rawtext) #if (path == "../examples/neuropil_hydra.c"): #raise self.severe('filterd text from %s:\n%s' % (path, rawtext)) include_lines = statemachine.string2lines(rawtext, tab_width, convert_whitespace=True) if 'literal' in self.options: # Convert tabs to spaces, if `tab_width` is positive. if tab_width >= 0: text = rawtext.expandtabs(tab_width) else: text = rawtext literal_block = nodes.literal_block(rawtext, source=path, classes=self.options.get( 'class', [])) literal_block.line = 1 self.add_name(literal_block) if 'number-lines' in self.options: try: startline = int(self.options['number-lines'] or 1) except ValueError: raise self.error(':number-lines: with non-integer ' 'start value') endline = startline + len(include_lines) if text.endswith('\n'): text = text[:-1] tokens = NumberLines([([], text)], startline, endline) for classes, value in tokens: if classes: literal_block += nodes.inline(value, value, classes=classes) else: literal_block += nodes.Text(value, value) else: literal_block += nodes.Text(text, text) return [literal_block] if 'code' in self.options: self.options['source'] = path codeblock = CodeBlock( self.name, [self.options.pop('code')], # arguments self.options, include_lines, # content self.lineno, self.content_offset, self.block_text, self.state, self.state_machine) return codeblock.run() self.state_machine.insert_input(include_lines, path) return []
def run(self): debug = False #TODO, any reason block_parser can't be a method of embeddable shell # then we wouldn't have to carry these around rgxin, rgxout, promptin, promptout = self.setup() options = self.options self.shell.is_suppress = 'suppress' in options self.shell.is_doctest = 'doctest' in options self.shell.is_verbatim = 'verbatim' in options # handle pure python code if 'python' in self.arguments: content = self.content self.content = self.shell.process_pure_python2(content) parts = '\n'.join(self.content).split('\n\n') lines = ['.. code-block:: ipython', ''] figures = [] for part in parts: block = block_parser(part, rgxin, rgxout, promptin, promptout) if len(block): rows, figure = self.shell.process_block(block) for row in rows: # hack # if row == '': # continue # lines.extend([' %s'% row.strip()]) lines.extend( [' %s' % line for line in re.split('[\n]+', row)]) if figure is not None: figures.append(figure) #text = '\n'.join(lines) #figs = '\n'.join(figures) for figure in figures: lines.append('') lines.extend(figure.split('\n')) lines.append('') #print lines if len(lines) > 2: if debug: print '\n'.join(lines) else: #NOTE: this raises some errors, what's it for? #print 'INSERTING %d lines'%len(lines) self.state_machine.insert_input( lines, self.state_machine.input_lines.source(0)) text = '\n'.join(lines) txtnode = nodes.literal_block(text, text) txtnode['language'] = 'ipython' #imgnode = nodes.image(figs) # cleanup self.teardown() return [] #, imgnode]
return [document.reporter.warning(str(err), line=self.lineno)] else: hl_lines = None specified_language = self.options.get('language') light = self.options.get('light', False) nodeList = [] languageDict = {'python': 'Python', 'java': 'Java', 'csharp': 'C#'} for language in languageDict: if specified_language is not None and specified_language != language: continue if not light: language_title = nodes.Text(languageDict[language] + ':', language) nodeList.append(language_title) literal = nodes.literal_block(self.translateCode(code, language), self.translateCode(code, language)) literal['language'] = language if hl_lines is not None: literal['highlight_args'] = {'hl_lines': hl_lines} set_source_info(self, literal) nodeList.append(literal) return nodeList def setup(app): app.add_config_value('codeexample', False, False) app.add_directive('codeexample', CodeExampleDirective)
def process_solutions(self, doctree: nodes.document, src: str) -> None: """Handle any solutions contained in the document. This ensures that a ``*.py`` file is created in the ``resources`` directory containing the actual solution. It then also rewrites the given doctree to output a pair of code cells in the resulting notebook. The first is a prompt for the user to input their solution and the second contains a :magic:`ipython:load` declaration to give the user the option to load in the solution if they wish to see it. Parameters ---------- doctree: The doctree to process src: The path to the file containing the document being processed """ docpath = pathlib.Path(src) logger.debug("[tutorial]: processing solutions for: %s", docpath) basename = f"{docpath.stem}-soln" for idx, soln in enumerate(doctree.traverse(condition=solution)): name = f"{basename}-{idx+1:02d}.py" destination = pathlib.Path("resources", docpath.with_suffix(""), name) refuri = relative_uri(src, str(destination)) # Convert the solution to a valid Python document that can be executed. document = new_document("<solution>") document += soln # Rather than go through the trouble of maintaining 2 document translators, # one for notebooks and another for Python files. Let's just use the notebook # translator and do some post-processing on the result - much easier. translator = NotebookTranslator(document) document.walkabout(translator) notebook = translator.asnotebook() blocks = [] for cell in notebook.cells: source = cell.source # Comment out the lines containing markdown. if cell.cell_type == "markdown": source = textwrap.indent(source, "# ") blocks.append(source) self.resources[str(destination)] = ("create", "\n".join(blocks)) # TODO: Expose config options for these # TODO: Translations? your_soln = nodes.literal_block( "", "# Write your solution here...\n", language="python" ) load_soln = nodes.literal_block( "", f"# Execute this cell to load the example solution\n%load {refuri}\n", language="python", ) # Replace the actual solution with the 2 cells defined above. soln.children = [your_soln, load_soln]