def _parseInline(self, text): string = statemachine.StringList([text]) para = nodes.container() self.state.nested_parse(string, 0, para) return para
def run(self): try: code = inspect.cleandoc(""" def usermethod(): {} """).format("\n ".join(self.content)) exec(code) result = locals()["usermethod"]() if result is None: raise Exception( "Return value needed! The body of your `.. exec::` is used as a " "function call that must return a value.") para = nodes.container() # tab_width = self.options.get('tab-width', self.state.document.settings.tab_width) lines = statemachine.StringList(result.split("\n")) self.state.nested_parse(lines, self.content_offset, para) return [para] except Exception as e: docname = self.state.document.settings.env.docname return [ nodes.error( None, nodes.paragraph( text="Unable to execute python code at {}:{} ... {}". format(docname, self.lineno, datetime.datetime.now())), nodes.paragraph(text=str(e)), nodes.literal_block(text=str(code)), ) ]
def _convert_content(self, text): list_lines = statemachine.string2lines(str_unicode(text)) # Adding a source and line number to each line text warnings may appear when writing if there are issues with a line # if left None the warnings would be counted but don't appear in the output then you don't know the source of it items = [(self.state.document.current_source, self.lineno)] * len(list_lines) return statemachine.StringList(list_lines, items=items)
def run(self): try: args = list(self.arguments) args.append("--project") args.append(f"{args[0]}") args.append("-opng") # cannot use "pylint.pyreverse.main.Run" because it calls `sys.exit`. why? fig_name = self.options.get("filename", "classes_{}.png".format(args[0])) command = [sys.executable, "-m", "pylint.pyreverse.main"] print("Running {}".format(command + args)) env = dict(os.environ) # apply any runtime path mods to the pythonpath env variable (e.g. sys.path # mods made during doc confs) env["PYTHONPATH"] = os.pathsep.join(sys.path) subprocess.check_call(command + args, env=env) try: os.remove(os.path.join(APIDOC_DIR, fig_name)) except: pass shutil.move(fig_name, APIDOC_DIR) # add .gitignore helper prefix shutil.move( os.path.join(APIDOC_DIR, fig_name), os.path.join(APIDOC_DIR, f"pyr_{fig_name}"), ) new_content = [f".. figure:: /{APIDOC_DIR}/pyr_{fig_name}"] # assume we don't need the packages_, and delete. try: os.remove("packages_{}.png".format(args[0])) except: pass # pass the other args through (figure args like align) for opt, val in self.options.items(): if opt in ("filename", ): continue new_content.append(" :{}: {}\n".format(opt, val)) new_content.append("\n") for line in self.content: new_content.append(" " + line) para = nodes.container() # tab_width = self.options.get('tab-width', self.state.document.settings.tab_width) lines = statemachine.StringList(new_content) self.state.nested_parse(lines, self.content_offset, para) return [para] except Exception as e: docname = self.state.document.settings.env.docname # add the error message directly to the built documentation and also tell the # builder raise self.error( "Unable to execute embedded doc code at {}:{} ... {}\n{}". format(docname, self.lineno, datetime.datetime.now(), str(e)))
def create_row(self, line): row = [] for cell_text in line: row.append( (0, 0, 0, statemachine.StringList(cell_text.splitlines()))) return row
def run(self): result = [] schema, parts = split_content(self.content) container = jsonschema_node() set_source_info(self, container) literal = nodes.literal_block(schema.content, schema.content) literal['language'] = 'javascript' literal['classes'] = container['classes'] = ['jsonschema'] set_source_info(self, literal) container.children.append(literal) result.append(container) for part in parts: if self.validate: is_valid = True try: jsonschema.validate(part.json, schema.json) except jsonschema.ValidationError as e: is_valid = False except jsonschema.SchemaError as e: raise ValueError("Schema is invalid:\n{0}\n\n{1}".format( str(e), schema.content)) if is_valid != part.should_pass: if part.should_pass: raise ValueError("Doc says fragment should pass, " "but it does not validate:\n" + part.content) else: raise ValueError("Doc says fragment should not pass, " "but it validates:\n" + part.content) else: is_valid = part.should_pass if len(part.comment): paragraph = nodes.paragraph('', '') comment = statemachine.StringList(part.comment) comment.parent = self.content.parent self.state.nested_parse(comment, 0, paragraph) paragraph['classes'] = ['jsonschema-comment'] set_source_info(self, paragraph) result.append(paragraph) container = jsonschema_node() set_source_info(self, container) literal = nodes.literal_block(part.content, part.content) literal['language'] = 'javascript' if is_valid: literal['classes'] = container['classes'] = ['jsonschema-pass'] else: literal['classes'] = container['classes'] = ['jsonschema-fail'] set_source_info(self, literal) container.children.append(literal) result.append(container) return result
def run(self): self.env = self.state.document.settings.env self.language = self.options.get('language', self.env.config.language) self.env.temp_data['language'] = self.language # catch exceptions and report them together with the name of # the guilty file try: output = self.get_rst() except Exception as e: traceback.print_exc() document = self.state.document return [document.reporter.warning(str(e), line=self.lineno)] #~ output = output.decode('utf-8') if 'debug' in self.options: print(self.env.docname) print('-' * 50) print(output) print('-' * 50) content = statemachine.StringList(output.splitlines(), self.state.document.current_source) # content = RSTStateMachine(output.splitlines()) if self.raw_insert: self.state_machine.insert_input(content, output) return [] # print("20180821 {} {}".format( # self.name, self.state.document.current_source)) if self.titles_allowed: node = nodes.section() # necessary so that the child nodes get the right source/line set # self.state.parent.setup_child(node) # node.document = self.state.document nested_parse_with_titles(self.state, content, node) else: node = nodes.paragraph() # self.state.parent.setup_child(node) # node.document = self.state.document self.state.nested_parse(content, self.content_offset, node) # following lines originally copied from # docutils.parsers.rst.directives.tables.RSTTable #~ title, messages = self.make_title() # ~ node = nodes.Element() # anonymous container for parsing #~ self.state.nested_parse(content, self.content_offset, node) #~ if len(node) != 1 or not isinstance(node[0], nodes.table): #~ error = self.state_machine.reporter.error( #~ 'Error parsing content block for the "%s" directive: exactly ' #~ 'one table expected.' % self.name, nodes.literal_block( #~ self.block_text, self.block_text), line=self.lineno) #~ return [error] #~ return [x for x in node] return list(node)
def process_header_option(self): """Returns table_head """ res = [] colnames = self.options.get("header", "") for col in colnames.split(","): res.append( [0, 0, 0, statemachine.StringList(col.strip().splitlines())]) return [res]
def add_caption(self, caption_string, node): cnode = nodes.Element() # anonymous container for parsing sl = statemachine.StringList([caption_string], source='') self.state.nested_parse(sl, self.content_offset, cnode) caption = nodes.caption(caption_string, '', *cnode) if 'align' in self.options: caption['align'] = self.options['align'] else: caption['align'] = 'center' node += caption
def run(self): env = self.state.document.settings.env link = env.docname.replace('\\', '_') + '.html' self.content = statemachine.StringList([SNIPTEXT % link]) targetid = "codesnippet-%d" % env.new_serialno('codesnippet') targetnode = nodes.target('', '', ids=[targetid]) ad = make_admonition(CodeSnippet, self.name, ['CodeSnippet'], self.options, self.content, self.lineno, self.content_offset, self.block_text, self.state, self.state_machine) return [targetnode] + ad
def run(self): self.content = statemachine.StringList([ '|cover|', '', '(中略)詳細は書籍 `自走プログラマー <https://gihyo.jp/book/2020/978-4-297-11197-7>`__ をご参照ください', ]) text = '\n'.join(self.content) admonition_node = nodes.admonition(text, **{'classes': [self.name]}) self.add_name(admonition_node) self.state.nested_parse(self.content, self.content_offset, admonition_node) return [admonition_node]
def process_header_option(self): '''Returns table_head ''' res = [] colnames = self.options.get('header', '') for col in colnames.split(','): res.append( [0, 0, 0, statemachine.StringList(col.strip().splitlines())]) return [ res, ]
def run(self): parts = split_content(self.content) container = language_specific_pages(parts=parts) for part in parts: paragraph = nodes.paragraph('', '') content = statemachine.StringList(part.content) content.parent = self.content.parent self.state.nested_parse(content, 0, paragraph) part.paragraph = paragraph return [container]
def _process_rows(self, rows, source): """ Add table cell boilerplace to cells in rows """ p_rows = [] for row in rows: p_row = [] for cell in row: cell_content = statemachine.StringList(cell.splitlines(), source=source) p_row.append((0, 0, 0, cell_content)) p_rows.append(p_row) return p_rows
def run(self): result = [] parts = self.split_content(self.content) for part in parts: if len(part.comment): paragraph = nodes.paragraph('', '') comment = statemachine.StringList(part.comment) comment.parent = self.content.parent self.state.nested_parse(comment, 0, paragraph) paragraph['classes'] = ['jsonschema-comment'] set_source_info(self, paragraph) result.append(paragraph) container = jsonschema_node() container['raw_json'] = part.json set_source_info(self, container) pprint_content = pprint_json(part.json) literal = nodes.literal_block(pprint_content, pprint_content) literal['language'] = 'json' set_source_info(self, literal) container.children.append(literal) result.append(container) for indx, part in enumerate(parts): for other_part in parts[(indx + 1):]: p1 = pprint_json(part.json).split('\n') p2 = pprint_json(other_part.json).split('\n') diff_str = '\n'.join( difflib.unified_diff( p2, p1, lineterm='', fromfile=(other_part.comment[0] if other_part.comment else ''), tofile=(part.comment[0] if part.comment else ''), )) container = diff_node() set_source_info(self, container) literal = nodes.literal_block(diff_str, diff_str) literal['language'] = 'diff' set_source_info(self, literal) container.children.append(literal) result.append(container) return result
def parse_csv_data_into_rows(csv_data, dialect, source, options): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([line.encode('utf-8') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = unicode(cell, 'utf-8') cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
def get_sql_data(self): """Returns rows, max_cols """ # Load the specified driver and get a connection to the database driver = self.options.get("driver", "xlsx") source_dir = os.path.dirname( os.path.abspath(self.state.document.current_source)) if driver == "xlsx": # Load content to an in-memory SQLite database. # Yeah, it's ugly, but it works pretty well actually. from .xls2sql import Xls2Sql import sqlite3 dbconn = sqlite3.connect(":memory:") loader = Xls2Sql(dbconn) source_path = os.path.normpath( os.path.join(source_dir, self.options.get("source", "data.xlsx"))) source_path = utils.relative_path(None, source_path) loader.load(os.path.abspath(source_path)) else: #default_src = 'database="data.xlsx",driver="xslx"' default_src = 'database="csv-data",driver="csv"' exec("import %s as DBDRV" % self.options.get("driver", "SnakeSQL")) cnstr = "dbconn = DBDRV.connect(%s)" % self.options.get( "source", default_src) exec(cnstr) cursor = dbconn.cursor() SQL = str(self.options.get("sql")) res = cursor.execute(SQL) rows = [] max_cols = 0 row = cursor.fetchone() while row is not None: row_data = [] for cell in row: cell_text = str(cell) cell_data = (0, 0, 0, statemachine.StringList(cell_text.splitlines())) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row_data)) row = cursor.fetchone() dbconn.close() return rows, max_cols
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
def run(self): self.init_locale() widget_name = self.arguments[0] if len(self.arguments) <= 1 or self.arguments[1] == "all": doc_parts = ['since', 'author'] else: doc_parts = [self.arguments[1]] # find widget widget_path = os.path.join(root_dir, "src", "structure", "pure", "%s.js" % widget_name) if not os.path.exists(widget_path): # try plugin widget_path = os.path.join(root_dir, "src", "plugins", widget_name, "structure_plugin.js") if not os.path.exists(widget_path): print("No widget or plugin named '%s' found" % widget_name) return [] content = {} with open(widget_path, "rb") as f: for line in f: for doc_part in doc_parts: match = re.search("^\s+\*\s+@%s\s(.+)$" % doc_part, line.decode('utf-8').rstrip()) if match: content[doc_part] = match.group(1) res_nodes = [] if len(content): cnode = nodes.Element() # anonymous container for parsing list = ['| **%s**: %s' % (self.normalize_part(part), content) for part, content in content.items()] list.append('|') sl = statemachine.StringList(list, source='') self.state.nested_parse(sl, self.content_offset, cnode) node = nodes.line('', '', *cnode) res_nodes.append(node) return res_nodes
def get_sql_data(self): '''Returns rows, max_cols ''' #Load the specified driver and get a connection to the database driver = self.options.get('driver', 'xlsx') if driver == 'xlsx': #Load content to an in-memory SQLite database. #Yeah, it's ugly, but it works pretty well actually. from utils.xls2sql import Xls2Sql import sqlite3 dbconn = sqlite3.connect(':memory:') loader = Xls2Sql(dbconn) loader.load(self.options.get('source', 'data.xlsx')) else: default_src = 'database="data.xlsx",driver="xslx"' default_src = 'database="csv-data",driver="csv"' exec('import %s as DBDRV' % self.options.get('driver', 'SnakeSQL')) cnstr = 'dbconn = DBDRV.connect(%s)' % self.options.get( 'source', default_src) exec(cnstr) cursor = dbconn.cursor() SQL = str(self.options.get('sql')) res = cursor.execute(SQL) rows = [] max_cols = 0 row = cursor.fetchone() while row is not None: row_data = [] for cell in row: cell_text = unicode(cell) cell_data = (0, 0, 0, statemachine.StringList(cell_text.splitlines())) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row_data)) row = cursor.fetchone() dbconn.close() return rows, max_cols
def parse_csv_data_into_rows(self, csv_data, _dialect, source): csv_data = "\n".join(csv_data) # Use StringIO so we can set filename, so that that PyYAML errors are # better. f = StringIO(csv_data) if 'file' in self.options: f.name = self.options['file'] data = yaml.safe_load(f) data = self.transform_yaml(data) rows = [] max_cols = 0 for row in data: row_data = [] for cell in row: cell_text = str(cell) if cell else "" cell_data = (0, 0, 0, statemachine.StringList(cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
def parse_csv_data_into_rows(self, csv_data, dialect, source): columns = self.get_columns() if columns is None: return super(CSVFTable, self).parse_csv_data_into_rows( csv_data, dialect, source) # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader( [self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = len(columns) for row in csv_reader: row_data = [''] * len(columns) i = 1 for cell in row: if i in columns: cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data[columns.index(i)] = cell_data i += 1 rows.append(row_data) return rows, max_cols
def _process_rst(self, output): """ Process RST output :param output: The output to process :type output: str :return: The processed node(s) :rtype: list """ try: node = nodes.paragraph() node.source, node.line = self.state_machine.get_source_and_line(self.lineno) converted = statemachine.StringList( initlist=output.split('\n'), source=self.content.source, parent=self.content.parent, parent_offset=self.content.parent_offset, ) self.state.nested_parse(converted, self.content_offset, node, match_titles=True) except Exception as error: raise self.error('%s' % error) return node.children
def run(self): env = self.state.document.settings.env config = env.config mode = config.substitute_mode subs = get_substitutions(config) original = self.content # Find the ID, if any exists if len(self.arguments) >= 1: id_ = self.arguments[0] else: id_ = 'NO_ID' # Get the replacement value, don't use it yet if id_ in subs: replacement = subs[id_] replacement = statemachine.StringList(replacement.splitlines(), source='file') else: replacement = None # Save list of substitutions for the sub-list directive def stringify(s): if isinstance(s, (list, statemachine.StringList)): return '\n'.join(s) return s if not hasattr(env, 'substitute_all_subs'): env.substitute_all_subs = {} if id_ != 'NO_ID': env.substitute_all_subs[id_] = dict( original=stringify(original), replacement=stringify(replacement), docname=env.docname) # Create our new text ("result") based on mode, content, and # replacement. if mode == 'both': result = [] content = nodes.admonition() content['classes'].append('substitute-original') title_text = "%s (original)" % id_ content += nodes.title(title_text, '', nodes.Text(title_text)) node = nodes.paragraph() self.state.nested_parse(original, self.content_offset, node) content += node result.append(content) if replacement: content = nodes.admonition() content['classes'].append('substitute-replacement') title_text = "(replacement)" content += nodes.title(title_text, '', nodes.Text(title_text)) node = nodes.paragraph() self.state.nested_parse(replacement, self.content_offset, node) content += node result[-1].append(content) elif mode == 'original' or replacement is None: node = nodes.paragraph() self.state.nested_parse(original, self.content_offset, node) result = [node] elif mode == 'replace': # default if replacement is None: replacement = original node = nodes.paragraph() self.state.nested_parse(replacement, self.content_offset, node) result = [node] else: raise ValueError("bad value of substitute_mode") return result
def run(self): subs = get_substitutions(self.state.document.settings.env.config) has_local = 'site-name' in subs # Get ID from the argument to the directive, or from the filename if # not given. if self.arguments: id_ = self.arguments[0] else: file_basename = os.path.splitext(os.path.basename(self.state.document.current_source))[0] id_ = file_basename max_cols = 2 course_data = COURSES[id_] course_name = COURSES[id_]['id'] # Generate the table data in a list of tuples. table_data = [ ('Description', make_sub_rst(id_+'-desc', course_data.get('desc', ''))), ('Video intro', make_sub_rst(id_+'-video', course_data.get('video', ''))), ('Reading', make_sub_rst(id_+'-reading', course_data.get('reading', ''))), ('Questions', make_sub_rst(id_+'-questions', course_data.get('questions', ''))), #course_data.get('exercises', ''), ] if has_local: table_data.append( (make_sub_rst('site-name', ""), make_sub_rst(id_+'-local', "")) ) # Parse all the above text (this is magic to me) rows = [] for row in table_data: row_data = [] for cell in row: cell_text = str(cell) if cell else "" cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines())) row_data.append(cell_data) rows.append(row_data) table_head = [] table_body = rows col_widths = [100 // max_cols] * max_cols table = (col_widths, table_head, table_body) table_node = self.state.build_table(table, self.content_offset, stub_columns=0) textnodes, messages = self.state.inline_text(course_name, self.lineno) def parse_sub(sub_id): """General function to get a text block: - If there is a local substitution, use that (subsitution name: COURSE_ID-SUB_ID) - Otherwise, us the default from the course YAML. (key: SUB_ID). """ content = course_data.get(sub_id, '') if id_+'-'+sub_id in subs: content = subs[id_+'-'+sub_id] if not content: return [ ] sub_node = nodes.paragraph(rawsource=content) self.state.nested_parse(ViewList(content.split('\n')), self.content_offset, node=sub_node) return [ sub_node ] # Get prolog and epilogs prolog_list = parse_sub('prolog') epilog_list = parse_sub('epilog') prolog_local_list = parse_sub('prolog-local') epilog_local_list = parse_sub('epilog-local') # Replace the document title with the full course ID. if isinstance(len(self.state.document) > 0 and self.state.document[0], nodes.section): self.state.document[0][0][0] = nodes.Text(course_name) # python 2 support, otherwise we'd use [*prolog_local_list, # *prolog_list, ...] sorry. return ( prolog_local_list + prolog_list + [table_node] + epilog_list + epilog_local_list )
def generate_complex_table(self, element_name, include_name=False, mandatory=False, table_body=None, sub_run=False, parent=None): """ needs to be fixed """ if table_body is None: table_body = [] if not element_name == "#text": attributes = schema.get_widget_attributes(element_name) elements = schema.get_widget_elements(element_name, locale=self.locale) if include_name: rowspan = len(attributes) - 1 line = 0 for attr in attributes: if 'name' in attr.attrib: name = attr.get('name') atype, values, enums = schema.get_attribute_type(attr) description = schema.get_node_documentation( attr, self.locale) if description is not None: description = description.text elif enums is not None: description = self.get_description(enums) else: description = '' elif 'ref' in attr.attrib: name = attr.get('ref') type_def = schema.get_attribute(name) atype, values, enums = schema.get_attribute_type(type_def) description = schema.get_node_documentation( type_def, self.locale) if description is not None: description = description.text elif enums is not None: description = self.get_description(enums) else: description = '' #name = ":ref:`%s <%s>`" % (name, name) if attr.get('use', 'optional') == "required": name = ":abbr:`%s (%s)`" % (name, _('mandatory')) atype = self.normalize_type(atype) if len( values) == 0 else self.normalize_values(values) if include_name: if line == 0: element_title = element_name if mandatory: element_title = ":abbr:`%s (%s)`" % ( element_title, _('mandatory')) if parent: element_title = "%s\n * %s" % (parent, element_title) row = [(rowspan, 0, 0, statemachine.StringList( element_title.splitlines())), self.get_cell_data(name), self.get_cell_data(atype), self.get_cell_data(description)] else: row = [ None, self.get_cell_data(name), self.get_cell_data(atype), self.get_cell_data(description) ] else: row = [ self.get_cell_data(name), self.get_cell_data(atype), self.get_cell_data(description) ] table_body.append(row) line += 1 for sub_element in elements: if not isinstance(sub_element, tuple): name = sub_element.get("name") mandatory = sub_element.get( "minOccurs") is not None and int( sub_element.get("minOccurs")) > 0 if parent is not None: sub_parent = "%s\n * %s" % (parent, element_name) else: sub_parent = element_name #no recursions if name != element_name: self.generate_complex_table(name, include_name=include_name, mandatory=mandatory, table_body=table_body, sub_run=True, parent=sub_parent) else: (sub_element, atype, doc) = sub_element indent = 2 if parent is not None else 1 element_title = "%s\n\n%s* %s" % (element_name, " " * indent, sub_element) if parent: element_title = "%s\n * %s" % (parent, element_title) row = [ self.get_cell_data(element_title), self.get_cell_data(""), self.get_cell_data(self.normalize_type(atype)), self.get_cell_data(doc) ] table_body.append(row) line += 1 else: # text node if include_name: row = [ self.get_cell_data(element_name), self.get_cell_data(""), self.get_cell_data(self.normalize_type("string")), self.get_cell_data("") ] else: row = [ self.get_cell_data(element_name), self.get_cell_data(self.normalize_type("string")), self.get_cell_data("") ] table_body.append(row) if sub_run is False: if len(table_body) == 0: elem = schema.find(".//xs:element[@name='%s']" % element_name) doc = schema.get_node_documentation(elem, self.locale) if doc is not None: if include_name: row = [ self.get_cell_data(element_name), self.get_cell_data(""), self.get_cell_data(self.normalize_type("string")), self.get_cell_data(doc.text) ] else: row = [ self.get_cell_data(element_name), self.get_cell_data(self.normalize_type("string")), self.get_cell_data(doc) ] table_body.append(row) else: return None if include_name: table_head = [[ self.get_cell_data(_('Element')), (0, 3, 0, statemachine.StringList(_('Attribute').splitlines())), None, None ], [ self.get_cell_data(_('Structure')), self.get_cell_data(_('Name')), self.get_cell_data(_('Content')), self.get_cell_data(_('Description')) ]] table = ([15, 15, 15, 55], table_head, table_body) else: table_head = [[ (0, 3, 0, statemachine.StringList(_('Attribute').splitlines())), None, None ], [ self.get_cell_data(_('Name')), self.get_cell_data(_('Content')), self.get_cell_data(_('Description')) ]] table = ([20, 20, 60], table_head, table_body) table_node = self.state.build_table(table, self.content_offset) table_node['classes'] += self.options.get('class', []) table_node['classes'] += ["schema-table"] return table_node
def get_cell_data(self, content): return 0, 0, self.content_offset, statemachine.StringList( content.splitlines())
def get_name(self, name): name = ":ref:`%s`" % name cnode = nodes.Element() # anonymous container for parsing sl = statemachine.StringList([name], source='') self.state.nested_parse(sl, self.content_offset, cnode) return nodes.label(name, '', *cnode)
def generate_table(self, element_name, include_name=False, mandatory=False): table_body = [] attributes = schema.get_widget_attributes(element_name) if include_name: rowspan = len(attributes) - 1 line = 0 for attr in attributes: if 'name' in attr.attrib: name = attr.get('name') atype, values, enums = schema.get_attribute_type(attr) description = schema.get_node_documentation(attr, self.locale) if description is not None: description = re.sub("\n\s+", " ", description.text).strip() elif enums is not None: description = self.get_description(enums) else: description = '' elif 'ref' in attr.attrib: name = attr.get('ref') type_def = schema.get_attribute(name) atype, values, enums = schema.get_attribute_type(type_def) # check if there is some documentation here description = schema.get_node_documentation(attr, self.locale) if description is None: # check the referenced definition for documentation as fallback description = schema.get_node_documentation( type_def, self.locale) if description is not None: description = re.sub("\n\s+", " ", description.text).strip() elif enums is not None: description = self.get_description(enums) else: description = '' #name = ":ref:`%s <%s>`" % (name, name) if attr.get('use', 'optional') == "required": name = ":abbr:`%s(%s)`" % (name, _('mandatory')) atype = self.normalize_type(atype) if len( values) == 0 else self.normalize_values(values) if include_name: if line == 0: if mandatory: element_name = ":abbr:`%s(%s)`" % (element_name, _('mandatory')) row = [ (rowspan, 0, 0, statemachine.StringList(element_name.splitlines())), self.get_cell_data(name), self.get_cell_data(atype), self.get_cell_data(description) ] else: row = [ None, self.get_cell_data(name), self.get_cell_data(atype), self.get_cell_data(description) ] else: row = [ self.get_cell_data(name), self.get_cell_data(atype), self.get_cell_data(description) ] table_body.append(row) line += 1 if len(table_body) == 0: return None if include_name: table_head = [[ self.get_cell_data(_('Element')), (0, 3, 0, statemachine.StringList(_('Attribute').splitlines())), None, None ], [ self.get_cell_data(''), self.get_cell_data(_('Name')), self.get_cell_data(_('Content')), self.get_cell_data(_('Description')) ]] table = ([10, 15, 20, 55], table_head, table_body) else: table_head = [[ (0, 3, 0, statemachine.StringList(_('Attribute').splitlines())), None, None ], [ self.get_cell_data(_('Name')), self.get_cell_data(_('Content')), self.get_cell_data(_('Description')) ]] table = ([20, 20, 60], table_head, table_body) table_node = self.state.build_table(table, self.content_offset) table_node['classes'] += self.options.get('class', []) return table_node
def run(self): stdStreams = sys.stdout, sys.stderr sys.stdout, sys.stderr = StringIO(), StringIO() try: args = list(self.arguments) args.append("--project") args.append(f"{args[0]}") args.append("-opng") # cannot use "pylint.pyreverse.main.Run" because it calls `sys.exit`. why? fig_name = self.options.get("filename", "classes_{}.png".format(args[0])) command = [sys.executable, "-m", "pylint.pyreverse.main"] print("Running {}".format(command + args)) subprocess.check_call(command + args) try: os.remove(os.path.join(APIDOC_DIR, fig_name)) except: pass shutil.move(fig_name, APIDOC_DIR) # add .gitignore helper prefix shutil.move( os.path.join(APIDOC_DIR, fig_name), os.path.join(APIDOC_DIR, f"pyr_{fig_name}"), ) new_content = [f".. figure:: /{APIDOC_DIR}/pyr_{fig_name}"] # assume we don't need the packages_, and delete. try: os.remove("packages_{}.png".format(args[0])) except: pass # pass the other args through (figure args like align) for opt, val in self.options.items(): if opt in ("filename",): continue new_content.append(" :{}: {}\n".format(opt, val)) new_content.append("\n") for line in self.content: new_content.append(" " + line) para = nodes.container() # tab_width = self.options.get('tab-width', self.state.document.settings.tab_width) lines = statemachine.StringList(new_content) self.state.nested_parse(lines, self.content_offset, para) return [para] except Exception as e: docname = self.state.document.settings.env.docname return [ nodes.error( None, nodes.paragraph( text="Unable to generate figure from {}:{} with command {} ... {}".format( docname, self.lineno, command, datetime.datetime.now() ) ), nodes.paragraph(text=str(e)), nodes.literal_block(text=str(sys.stdout.getvalue())), nodes.literal_block(text=str(sys.stderr.getvalue())), ) ] finally: sys.stdout, sys.stderr = stdStreams