def run(self): docutils.parsers.rst.roles.set_classes({"class": "timeline"}) nested_node = docutils.nodes.paragraph() nested_parse_with_titles(self.state, self.content, nested_node) # find milestones section milestones_sections = list( nested_node.traverse( lambda(node): utils.node_is_section_with_title(node, 'milestones'))) # find deadlines section deadlines_sections = list( nested_node.traverse( lambda(node): utils.node_is_section_with_title(node, 'deadlines'))) # create a timeline node timeline = TimelineNode() results = [timeline] for milestones_section in milestones_sections: timeline.add_milestones_from_section(milestones_section) for deadline_section in deadlines_sections: timeline.add_deadlines_from_section(deadline_section) return results
def run(self): module_path, class_name, attribute_name = self.arguments mod = importlib.import_module(module_path) klass = getattr(mod, class_name) options = getattr(klass(), attribute_name) if not isinstance(options, OptionsDictionary): raise TypeError("Object '%s' is not an OptionsDictionary." % attribute_name) lines = ViewList() n = 0 for line in options.__rst__(): lines.append(line, "options table", n) n += 1 # Note applicable to System, Solver and Driver 'options', but not to 'recording_options' if attribute_name != 'recording_options': lines.append("", "options table", n+1) # Blank line required after table. # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, lines, node) # And return the result. return node.children
def run(self): size = self.options.get('size', 4) shuffle = 'shuffle' in self.options seed = self.options.get('seed', 42) titles = self.options.get('titles', False) width = self.options.get('width', None) env = self.state.document.settings.env app = env.app gallery_dir = app.builder.config.altair_gallery_dir gallery_ref = app.builder.config.altair_gallery_ref examples = populate_examples(shuffle=shuffle, shuffle_seed=seed, num_examples=size, gallery_dir=gallery_dir, gallery_ref=gallery_ref, code_below=True) include = MINIGALLERY_TEMPLATE.render(image_dir='/_images', gallery_dir=gallery_dir, examples=examples, titles=titles, width=width) # parse and return documentation result = ViewList() for line in include.split('\n'): result.append(line, "<altair-minigallery>") node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): path_to_model = self.arguments[0] np = os.path.normpath(os.path.join(os.getcwd(), path_to_model)) # check that the file exists if not os.path.isfile(np): raise IOError('File does not exist({0})'.format(np)) html_name = os.path.join(os.getcwd(), (os.path.basename(path_to_model).split('.')[0] + "_n2.html")) cmd = subprocess.Popen(['openmdao', 'view_model', np, '--no_browser', '--embed', '-o' + html_name]) cmd_out, cmd_err = cmd.communicate() rst = ViewList() # Add the content one line at a time. # Second argument is the filename to report in any warnings # or errors, third argument is the line number. env = self.state.document.settings.env docname = env.doc2path(env.docname) rst.append(".. raw:: html", docname, self.lineno) rst.append(" :file: %s" % html_name, docname, self.lineno) # Create a node. node = nodes.section() # Parse the rst. nested_parse_with_titles(self.state, rst, node) # And return the result. return node.children
def run(self): env = self.state.document.settings.env app = env.app namespace = " ".join(self.content).strip() app.info("documenting plugins from %r" % namespace) overline_style = self.options.get("overline-style", "") underline_style = self.options.get("underline-style", "=") def report_load_failure(mgr, ep, err): app.warn("Failed to load %s: %s" % (ep.module_name, err)) mgr = extension.ExtensionManager(namespace, on_load_failure_callback=report_load_failure) result = ViewList() if "detailed" in self.options: data = _detailed_list(mgr, over=overline_style, under=underline_style) else: data = _simple_list(mgr) for text, source in data: for line in text.splitlines(): result.append(line, source) # Parse what we have into a new section. node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): # figure out what attributes to exclude: obj = import_obj_from_args(self.arguments) if not issubclass(obj, traitlets.HasTraits): raise ValueError('altair-class directive should only be used ' 'on altair classes; not {0}'.format(obj)) exclude = ['skip'] exclude.extend(getattr(obj, 'skip', [])) exclude.extend([attr for attr in obj.class_traits()]) exclude.extend([attr for attr in dir(traitlets.HasTraits) if not attr.startswith('_')]) # generate the documentation string rst_text = ALTAIR_CLASS_TEMPLATE.render( classname=self.arguments[0], exclude_members=','.join(exclude) ) # parse and return documentation result = ViewList() for line in rst_text.split("\n"): result.append(line, "<altair-class>") node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): env = self.state.document.settings.env app = env.app classname = self.arguments[0].split('(')[0].strip() try: obj = import_obj(classname, default_module='altair') except ImportError: raise warnings.warn('Could not make table for {0}. Unable to import' ''.format(object)) # create the table from the object include_vl_link = ('include-vegalite-link' in self.options) table = altair_rst_table(obj, include_description=include_vl_link) # parse and return documentation result = ViewList() for line in table: result.append(line, "<altair-class>") node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): env = self.state.document.settings.env app = env.app iface_type = ' '.join(self.content).strip() app.info('documenting service interface %r' % iface_type) source_name = '<' + __name__ + '>' api_map = interfaces.construct_map(trakt.Trakt.client) iface_map = {iface_type: api_map.get(iface_type)} result = ViewList() for api_path, api_ref, api_methods in _format_apis(iface_map): result.append(api_path, source_name) result.append('', source_name) result.append(api_ref, source_name) result.append('', source_name) for method in api_methods: result.append(method, source_name) result.append('', source_name) # Parse what we have into a new section. node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): env = self.state.document.settings.env app = env.app split_namespaces = 'split-namespaces' in self.options config_file = self.options.get('config-file') if config_file: app.info('loading config file %s' % config_file) conf = cfg.ConfigOpts() conf.register_opts(generator._generator_opts) conf( args=['--config-file', config_file], project='oslo.config.sphinxext', ) namespaces = conf.namespace[:] else: namespaces = [ c.strip() for c in self.content if c.strip() ] result = ViewList() source_name = '<' + __name__ + '>' for line in _format_option_help(app, namespaces, split_namespaces): result.append(line, source_name) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): if "READTHEDOCS" in os.environ: project = os.environ["READTHEDOCS_PROJECT"] version = os.environ["READTHEDOCS_VERSION"] is_rtd = os.environ["READTHEDOCS"] == "True" link = "https://readthedocs.org/projects/" \ + "{}/downloads/pdf/{}/".format(project, version) else: is_rtd = False rst = [] if is_rtd: rst = "This documentation is also available as a " \ + "`PDF <{}>`_.".format(link) rst = [rst] vl = ViewList(rst, "fakefile.rst") # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, vl, node) return node.children
def run(self): namespace = ' '.join(self.content).strip() LOG.info('documenting plugins from %r' % namespace) overline_style = self.options.get('overline-style', '') underline_style = self.options.get('underline-style', '=') def report_load_failure(mgr, ep, err): LOG.warning(u'Failed to load %s: %s' % (ep.module_name, err)) mgr = extension.ExtensionManager( namespace, on_load_failure_callback=report_load_failure, ) result = ViewList() titlecase = 'titlecase' in self.options if 'detailed' in self.options: data = _detailed_list( mgr, over=overline_style, under=underline_style, titlecase=titlecase) else: data = _simple_list(mgr) for text, source in data: for line in text.splitlines(): result.append(line, source) # Parse what we have into a new section. node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): node = nodes.section() node.document = self.state.document result = ViewList() for line in self.make_rst(): result.append(line, '<autoroutr>') nested_parse_with_titles(self.state, result, node) return node.children
def run(self): node = nodes.section() node.document = self.state.document result = ViewList() for line in self.make_rst(): result.append(line, '<{0}>'.format(self.__class__.__name__)) nested_parse_with_titles(self.state, result, node) return node.children
def _rest2node(self, rest, container=None): vl = ViewList(prepare_docstring(rest)) if container is None: node = nodes.container() else: node = container() nested_parse_with_titles(self.state, vl, node) return node
def _parse(self, rst_text, annotation): result = ViewList() for line in rst_text.split("\n"): result.append(line, annotation) node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): # This directive is obsolete, AgreeItems can be placed alone. classes = [u'form-group'] if 'class' in self.options: classes.extend(self.options['class']) node = aplus_nodes.html(u'div', { u'class': u" ".join(classes) }) nested_parse_with_titles(self.state, self.content, node) return [node]
def run(self): config = self.state.document.settings.env.config if config._raw_config['tags'].eval_condition(self.arguments[0]): node = nodes.Element() nested_parse_with_titles(self.state, self.content, node) return node.children return []
def run(self): self.env = self.state.document.settings.env self.language = self.options.get('language', self.env.config.language) self.env.temp_data['language'] = self.language # catch exceptions and report them together with the name of # the guilty file try: output = self.get_rst() except Exception as e: traceback.print_exc() document = self.state.document return [document.reporter.warning(str(e), line=self.lineno)] #~ output = output.decode('utf-8') if 'debug' in self.options: print(self.env.docname) print('-' * 50) print(output) print('-' * 50) content = statemachine.StringList( output.splitlines(), self.state.document.current_source) # content = RSTStateMachine(output.splitlines()) if self.raw_insert: self.state_machine.insert_input(content, output) return [] # print("20180821 {} {}".format( # self.name, self.state.document.current_source)) if self.titles_allowed: node = nodes.section() # necessary so that the child nodes get the right source/line set # self.state.parent.setup_child(node) # node.document = self.state.document nested_parse_with_titles(self.state, content, node) else: node = nodes.paragraph() # self.state.parent.setup_child(node) # node.document = self.state.document self.state.nested_parse(content, self.content_offset, node) # following lines originally copied from # docutils.parsers.rst.directives.tables.RSTTable #~ title, messages = self.make_title() # ~ node = nodes.Element() # anonymous container for parsing #~ self.state.nested_parse(content, self.content_offset, node) #~ if len(node) != 1 or not isinstance(node[0], nodes.table): #~ error = self.state_machine.reporter.error( #~ 'Error parsing content block for the "%s" directive: exactly ' #~ 'one table expected.' % self.name, nodes.literal_block( #~ self.block_text, self.block_text), line=self.lineno) #~ return [error] #~ return [x for x in node] return list(node)
def run(self): env = self.state.document.settings.env app = env.app def info(msg): app.info('[reno] %s' % (msg,)) title = ' '.join(self.content) branch = self.options.get('branch') reporoot_opt = self.options.get('reporoot', '.') reporoot = os.path.abspath(reporoot_opt) relnotessubdir = self.options.get('relnotessubdir', defaults.RELEASE_NOTES_SUBDIR) conf = config.Config(reporoot, relnotessubdir) opt_overrides = {} if 'notesdir' in self.options: opt_overrides['notesdir'] = self.options.get('notesdir') version_opt = self.options.get('version') # FIXME(dhellmann): Force these flags True for now and figure # out how Sphinx passes a "false" flag later. # 'collapse-pre-releases' in self.options opt_overrides['collapse_pre_releases'] = True opt_overrides['stop_at_branch_base'] = True if 'earliest-version' in self.options: opt_overrides['earliest_version'] = self.options.get( 'earliest-version') if branch: opt_overrides['branch'] = branch conf.override(**opt_overrides) notesdir = os.path.join(relnotessubdir, conf.notesdir) info('scanning %s for %s release notes' % (os.path.join(conf.reporoot, notesdir), branch or 'current branch')) ldr = loader.Loader(conf) if version_opt is not None: versions = [ v.strip() for v in version_opt.split(',') ] else: versions = ldr.versions info('got versions %s' % (versions,)) text = formatter.format_report( ldr, versions, title=title, ) source_name = '<' + __name__ + '>' result = statemachine.ViewList() for line in text.splitlines(): result.append(line, source_name) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): self.result = ViewList() path = self.arguments[0] filename = os.path.basename(path) node = nodes.section() node.document = self.state.document self.add_lines(JavaScriptDocument(path).to_rst(self.options)) nested_parse_with_titles(self.state, self.result, node) return node.children
def run(self): command = sh_split(' '.join(self.arguments[0:])) stdout = Popen(command, stdout=PIPE, stdin=open(os.devnull) ).communicate()[0] node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, ViewList(stdout.splitlines()), node) return node.children
def run(self): assert self.document_type in ["yaml", "json"] node = nodes.section() node.document = self.state.document result = ViewList() for line in self.make_rst(): result.append(line, "<rx>") nested_parse_with_titles(self.state, result, node) return node.children
def _rest2node(self, rest, container=None): """ creates a docutils node from some rest-markup """ vl = ViewList(prepare_docstring(rest)) if container is None: node = nodes.container() else: node = container() nested_parse_with_titles(self.state, vl, node) return node
def add_instructions(self, node, data, plain_content): if not plain_content: return parent = aplus_nodes.html(u'div', {}) parent.store_html(u'more') nested_parse_with_titles(self.state, plain_content, parent) node.append(parent) data[u'more'] = (u'#!html', u'more')
def parse(self, text): node = nodes.paragraph() vl = ViewList() for line in text.splitlines(): vl.append(line, '<xmlschema>') nested_parse_with_titles(self.state, vl, node) try: return node[0] except IndexError: return build_paragraph(text)
def run(self): result = ViewList() for line in _get_cogbin_data(): result.append(line, '<cogbin>') node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): path = self.state.document.settings.env.config.fancy_include_path full_path = op.join(path, self.arguments[0]) with io.open(full_path, "r") as myfile: text = myfile.read() # add reference name = op.basename(full_path)[:-3] rst = [".. _example_{}:".format(name), "", ] # add docstring source = text.split('"""') doc = source[1].split("\n") doc.insert(1, "~" * len(doc[0])) # make title heading code = source[2].split("\n") for line in doc: rst.append(line) # image for ext in [".png", ".jpg"]: image_path = full_path[:-3] + ext if op.exists(image_path): break else: image_path = "" if image_path: rst.append(".. figure:: {}".format(image_path)) rst.append("") # download file rst.append(":download:`{}<{}>`".format( op.basename(full_path), full_path)) # code rst.append("") rst.append(".. code-block:: python") rst.append(" :linenos:") rst.append("") for line in code: rst.append(" {}".format(line)) rst.append("") vl = ViewList(rst, "fakefile.rst") # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, vl, node) return node.children
def run(self): node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, self.content, node) try: title_node = node[0][0] if isinstance(title_node, nodes.title): del node[0][0] except IndexError: pass return [node]
def run(self): chunk = self.get_chunk_for_node(self.state.document, self.state) nested_node = docutils.nodes.paragraph() nested_parse_with_titles(self.state, self.content, nested_node) arguments = self.check_argument_is_roman(self.arguments) for argument in arguments: chunk.parse_dependencies(nested_node, argument) return []
def run(self): try: lines = self.parse_lit(self.parse_file(self.arguments[0])) except IOError as exc: document = self.state.document return [document.reporter.warning(str(exc), line=self.lineno)] node = nodes.container() node['classes'] = ['lit-container'] node.document = self.state.document enum = nodes.enumerated_list() enum['classes'] = ['lit-docs'] node.append(enum) # make first list item list_item = nodes.list_item() list_item['classes'] = ['lit-item'] for is_doc, line in lines: if is_doc and line == ['']: continue section = nodes.section() if is_doc: section['classes'] = ['lit-annotation'] nested_parse_with_titles(self.state, ViewList(line), section) else: section['classes'] = ['lit-content'] code = '\n'.join(line) literal = nodes.literal_block(code, code) literal['language'] = 'yaml' set_source_info(self, literal) section.append(literal) list_item.append(section) # If we have a pair of annotation/content items, append the list # item and create a new list item if len(list_item.children) == 2: enum.append(list_item) list_item = nodes.list_item() list_item['classes'] = ['lit-item'] # Non-semantic div for styling bg = nodes.container() bg['classes'] = ['lit-background'] node.append(bg) return [node]
def _generate_nodes( self, name: str, command: click.Command, parent: ty.Optional[click.Context], nested: str, commands: ty.Optional[ty.List[str]] = None, semantic_group: bool = False, ) -> ty.List[nodes.section]: """Generate the relevant Sphinx nodes. Format a `click.Group` or `click.Command`. :param name: Name of command, as used on the command line :param command: Instance of `click.Group` or `click.Command` :param parent: Instance of `click.Context`, or None :param nested: The granularity of subcommand details. :param commands: Display only listed commands or skip the section if empty :param semantic_group: Display command as title and description for `click.CommandCollection`. :returns: A list of nested docutil nodes """ ctx = click.Context(command, info_name=name, parent=parent) if command.hidden: return [] # Title section = nodes.section( '', nodes.title(text=name), ids=[nodes.make_id(ctx.command_path)], names=[nodes.fully_normalize_name(ctx.command_path)], ) # Summary source_name = ctx.command_path result = statemachine.ViewList() if semantic_group: lines = _format_description(ctx) else: lines = _format_command(ctx, nested, commands) for line in lines: LOG.debug(line) result.append(line, source_name) sphinx_nodes.nested_parse_with_titles(self.state, result, section) # Subcommands if nested == NESTED_FULL: if isinstance(command, click.CommandCollection): for source in command.sources: section.extend( self._generate_nodes( source.name, source, parent=ctx, nested=nested, semantic_group=True, )) else: commands = _filter_commands(ctx, commands) for command in commands: parent = ctx if not semantic_group else ctx.parent section.extend( self._generate_nodes(command.name, command, parent=parent, nested=nested)) return [section]
def run(self): self.assert_has_content() key, difficulty, points = self.extract_exercise_arguments() # Parse options. classes = [u'exercise'] is_feedback = False if 'chapter-feedback' in self.options: classes.append(u'chapter-feedback') is_feedback = True if 'weekly-feedback' in self.options: classes.append(u'weekly-feedback') is_feedback = True if 'appendix-feedback' in self.options: classes.append(u'appendix-feedback') is_feedback = True if 'course-feedback' in self.options: classes.append(u'course-feedback-questionnaire') is_feedback = True if 'feedback' in self.options: is_feedback = True if is_feedback: key = u'feedback' category = u'feedback' classes.append(u'feedback') else: category = u'questionnaire' if difficulty: classes.append(u'difficulty-' + difficulty) env = self.state.document.settings.env name = u"{}_{}".format(env.docname.replace(u'/', u'_'), key) override = env.config.override env.questionnaire_is_feedback = is_feedback env.question_count = 0 # Create document elements. node = aplus_nodes.html( u'div', { u'class': u' '.join(classes), u'data-aplus-exercise': u'yes', u'data-aplus-quiz': u'yes', }) form = aplus_nodes.html(u'form', { u'action': key, u'method': u'post', }) nested_parse_with_titles(self.state, self.content, form) submit = aplus_nodes.html(u'input', { u'type': u'submit', u'value': translations.get(env, u'submit'), u'class': u'btn btn-primary', }, skip_html=True) form.append(submit) node.append(form) # Write configuration file. data = { u'key': name, u'category': category, u'max_points': points, u'difficulty': difficulty or '', u'max_submissions': self.options.get( 'submissions', 0 if is_feedback else env.config.questionnaire_default_submissions), u'min_group_size': 1 if is_feedback else env.config.default_min_group_size, u'max_group_size': 1 if is_feedback else env.config.default_max_group_size, u'points_to_pass': self.options.get('points-to-pass', 0), u'feedback': is_feedback, u'view_type': u'access.types.stdsync.createForm', u'title|i18n': translations.opt('feedback') if is_feedback else translations.opt( 'exercise', postfix=u" {}".format(key)), u'fieldgroups': [{ u'title': '', u'fields': (u'#!children', None), }], } if not 'no-override' in self.options and category in override: data.update(override[category]) if 'url' in data: data['url'] = data['url'].format(key=name) if "pick_randomly" in self.options: pick_randomly = self.options.get('pick_randomly', 0) if pick_randomly < 1: raise SphinxError( u'Number of fields to sample randomly should greater than zero.' ) data[u'fieldgroups'][0]['pick_randomly'] = pick_randomly form.write_yaml(env, name, data, 'exercise') return [node]
def run(self): key, difficulty, points = self.extract_exercise_arguments() env = self.state.document.settings.env name = u"{}_{}".format(env.docname.replace(u'/', u'_'), key) override = env.config.override classes = [u'exercise'] if 'class' in self.options: classes.extend(self.options['class']) if difficulty: classes.append(u'difficulty-' + difficulty) # Add document nodes. args = { u'class': u' '.join(classes), u'data-aplus-exercise': u'yes', } if 'quiz' in self.options: args[u'data-aplus-quiz'] = u'yes' if 'ajax' in self.options: args[u'data-aplus-ajax'] = u'yes' node = aplus_nodes.html(u'div', args) key_title = u"{} {}".format(translations.get(env, 'exercise'), key) # Load or create exercise configuration. if 'config' in self.options: path = os.path.join(env.app.srcdir, self.options['config']) if not os.path.exists(path): raise SphinxError('Missing config path {}'.format(self.options['config'])) data = yaml_writer.read(path) config_title = data.get(u'title', None) else: data = { u'_external': True } if 'url' in self.options: data[u'url'] = ensure_unicode(self.options['url']) if 'lti' in self.options: data.update({ u'lti': ensure_unicode(self.options['lti']), u'lti_context_id': ensure_unicode(self.options.get('lti_context_id', u'')), u'lti_resource_link_id': ensure_unicode(self.options.get('lti_resource_link_id', u'')), }) if 'lti_aplus_get_and_post' in self.options: data.update({u'lti_aplus_get_and_post': True}) if 'lti_open_in_iframe' in self.options: data.update({u'lti_open_in_iframe': True}) config_title = None config_title = self.options.get('title', config_title) if "radar_tokenizer" in self.options or "radar_minimum_match_tokens" in self.options: data[u'radar_info'] = { u'tokenizer': self.options.get("radar_tokenizer"), u'minimum_match_tokens': self.options.get("radar_minimum_match_tokens"), } category = u'submit' data.update({ u'key': name, u'title': env.config.submit_title.format( key_title=key_title, config_title=config_title ), u'category': u'submit', u'scale_points': points, u'difficulty': difficulty or '', u'max_submissions': self.options.get('submissions', data.get('max_submissions', env.config.program_default_submissions)), u'min_group_size': data.get('min_group_size', env.config.default_min_group_size), u'max_group_size': data.get('max_group_size', env.config.default_max_group_size), u'points_to_pass': self.options.get('points-to-pass', data.get('points_to_pass', 0)), }) self.set_assistant_permissions(data) if self.content: self.assert_has_content() nested_parse_with_titles(self.state, self.content, node) # Sphinx can not compile the nested RST into HTML at this stage, hence # the HTML instructions defined in this directive body are added to # the exercise YAML file only at the end of the build. Sphinx calls # the visit functions of the nodes in the last writing phase. # The instructions are added to the YAML file in the depart_html # function in aplus_nodes.py. else: paragraph = aplus_nodes.html(u'p', {}) paragraph.append(nodes.Text(translations.get(env, 'submit_placeholder'))) node.append(paragraph) data.setdefault('status', self.options.get('status', 'unlisted')) if category in override: data.update(override[category]) if 'url' in data: data['url'] = data['url'].format(key=name) if 'category' in self.options: data['category'] = str(self.options['category']) node.write_yaml(env, name, data, 'exercise') return [node]
def run(self): if self.arguments and self.arguments[0]: module_path = self.arguments[0] if self.arguments and self.arguments[1]: class_name = self.arguments[1] if self.arguments and self.arguments[2]: attribute_name = self.arguments[2] exec('from {} import {}'.format(module_path, class_name), globals()) exec('obj = {}()'.format(class_name), globals()) options = getattr(obj, attribute_name) outputs = [] for option_name, option_data in sorted(iteritems(options._dict)): name = option_name default = option_data['value'] values = option_data['values'] types = option_data['types'] desc = option_data['desc'] if types is None: types = "N/A" elif types is not None: if not isinstance(types, (tuple, list)): types = (types, ) types = [type_.__name__ for type_ in types] if values is None: values = "N/A" elif values is not None: if not isinstance(values, (tuple, list)): values = (values, ) values = [value for value in values] outputs.append([name, default, values, types, desc]) lines = ViewList() col_heads = [ 'Option', 'Default', 'Acceptable Values', 'Acceptable Types', 'Description' ] max_sizes = {} for j, col in enumerate(col_heads): max_sizes[j] = len(col) for output in outputs: for j, item in enumerate(output): length = len(str(item)) if max_sizes[j] < length: max_sizes[j] = length header = "" titles = "" for key, val in iteritems(max_sizes): header += '=' * val + ' ' for j, head in enumerate(col_heads): titles += "%s " % head size = max_sizes[j] space = size - len(head) if space > 0: titles += space * ' ' lines.append(header, "options table", 1) lines.append(titles, "options table", 2) lines.append(header, "options table", 3) n = 3 for output in outputs: line = "" for j, item in enumerate(output): line += "%s " % str(item) size = max_sizes[j] space = size - len(str(item)) if space > 0: line += space * ' ' lines.append(line, "options table", n) n += 1 lines.append(header, "options table", n) # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, lines, node) # And return the result. return node.children
def run(self): self.assert_has_content() # Detect paragraphs: any number of plain content, choices and optional feedback. empty_lines = list(loc for loc,line in enumerate(self.content) if line == '') plain_content = None choices = [] feedback = [] if len(empty_lines) > 0: last = self.content[(empty_lines[-1] + 1):] def split_second_last(empty_lines): if len(empty_lines) > 1: return self.content[:empty_lines[-2]], self.content[(empty_lines[-2] + 1):empty_lines[-1]] else: return None, self.content[:empty_lines[-1]] # Backwards compatibility for skipping feedback paragraph. if len(last) == 1 and last[0].startswith('I hereby declare that no feedback '): plain_content, choices = split_second_last(empty_lines) elif all('§' in line for line in last): plain_content, choices = split_second_last(empty_lines) feedback = last else: plain_content = self.content[:empty_lines[-1]] choices = last else: choices = self.content # Create question. env, node, data = self.create_question() self.add_instructions(node, data, plain_content) data['options'] = ('#!children', 'option') if 'partial-points' in self.options: data['partial_points'] = True dropdown = None if self.grader_field_type() == 'dropdown': # The HTML select element has a different structure compared # to the input elements (radio buttons and checkboxes). dropdown = aplus_nodes.html('select', { 'name': 'field_{:d}'.format(env.question_count - 1), }) correct_count = 0 # Travel all answer options. for i,line in slicer(choices): # Split choice key off. key,content = line[0].split(' ', 1) key = key.strip() line[0] = content.strip() # Trim the key. correct = False selected = False if key.startswith('+'): selected = True key = key[1:] if key.startswith('*'): correct = True key = key[1:] correct_count += 1 elif key.startswith('?'): correct = "neutral" key = key[1:] if key.endswith('.'): key = key[:-1] # Add YAML configuration data. optdata = { 'value': key, } if correct: optdata['correct'] = correct if selected: optdata['selected'] = True # Create document elements. if dropdown is None: # One answer alternative as a radio button or a checkbox. choice = aplus_nodes.html('div', {'class': 'radio'}) label = aplus_nodes.html('label', {}) attrs = { 'type': self.input_type(), 'name': 'field_{:d}'.format(env.question_count - 1), 'value': key, } if selected: attrs['checked'] = 'checked' label.append(aplus_nodes.html('input', attrs)) choice.append(label) node.append(choice) text = aplus_nodes.html('span', {}) text.store_html('label') nested_parse_with_titles(self.state, line, text) label.append(text) optdata['label'] = ('#!html', 'label') choice.set_yaml(optdata, 'option') else: # Add option elements to the select. # Options may only contain plain text, not formatted HTML. attrs = { 'value': key, } if selected: attrs['selected'] = 'selected' option = aplus_nodes.html('option', attrs) text = line[0] option.append(nodes.Text(text)) dropdown.append(option) optdata['label'] = html.escape(text) option.set_yaml(optdata, 'option') if dropdown is not None: node.append(dropdown) if 'randomized' in self.options: data['randomized'] = self.options.get('randomized', 1) if data['randomized'] > len(choices): source, line = self.state_machine.get_source_and_line(self.lineno) raise SphinxError(source + ": line " + str(line) + "\nThe option 'randomized' can not be greater than the number of answer choices!") if 'correct-count' in self.options: data['correct_count'] = self.options.get('correct-count', 0) if data['correct_count'] > correct_count or data['correct_count'] > data['randomized']: source, line = self.state_machine.get_source_and_line(self.lineno) raise SphinxError(source + ": line " + str(line) + "\nThe option 'correct-count' can not be greater than " "the number of correct choices or the value of 'randomized'!") if 'preserve-questions-between-attempts' in self.options: data['resample_after_attempt'] = False env.aplus_random_question_exists = True self.add_feedback(node, data, feedback) return [node]
def run(self): env = self.state.document.settings.env app = env.app # The series value is optional for some directives. If it is # present but an empty string, convert to None so the # Deliverables class will treat it like a wildcard. series = self.options.get('series') or None # If the user specifies a team, track only the deliverables # for that team. self.team_name = self.options.get('team') or None result = ViewList() # Assemble all of the deliverable data to be displayed and # build the RST representation. # get_deliverables() -> (team, series, deliverable, info) if self.team_name: # All deliverables are shown, in alphabetical order. They # are organized by series but not type. d_source = itertools.groupby(sorted( _deliverables.get_deliverables(self.team_name, series)), key=operator.attrgetter('series')) for s, d in d_source: self._add_deliverables( None, d, s, app, result, ) else: # Only the deliverables for the given series are # shown. They are categorized by type, which we need to # extract from the data. raw_deliverables = [(_get_category(deliv), deliv) for deliv in _deliverables.get_deliverables( self.team_name, series, )] grouped = itertools.groupby( sorted(raw_deliverables), key=operator.itemgetter(0), # the category ) # Convert the grouping iterators to a dictionary mapping # type to the list of tuples with deliverable name and # parsed deliverable info that _add_deliverables() needs. by_category = {} for deliverable_category, deliverables in grouped: by_category[deliverable_category] = [ d[1] for d in deliverables ] for category in self._CATEGORY_ORDER: if category not in by_category: app.info('No %r for %s' % (category, (self.team_name, series))) continue self._add_deliverables( category, by_category[category], series, app, result, ) # NOTE(dhellmann): Useful for debugging. # print('\n'.join(result)) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): env = self.state.document.settings.env app = env.app namespace = ' '.join(self.content) opts = generator._list_opts([namespace]) result = ViewList() source_name = '<' + __name__ + '>' def _add(text): "Append some text to the output result view to be parsed." result.append(text, source_name) def _add_indented(text): """Append some text, indented by a couple of spaces. Indent everything under the option name, to format it as a definition list. """ _add(_indent(text)) by_section = {} for ignore, opt_list in opts: for group_name, opts in opt_list: by_section.setdefault(group_name, []).extend(opts) for group_name, opt_list in sorted(by_section.items()): group_name = group_name or 'DEFAULT' app.info('[oslo.config] %s %s' % (namespace, group_name)) _add(group_name) _add('=' * len(group_name)) _add('') for opt in opt_list: opt_type = self._TYPE_DESCRIPTIONS.get(type(opt), 'unknown type') _add('``%s``' % opt.dest) _add('') _add_indented(':Type: %s' % opt_type) for default in generator._format_defaults(opt): if default: default = '``' + default + '``' _add_indented(':Default: %s' % default) if getattr(opt.type, 'min', None): _add_indented(':Minimum Value: %s' % opt.type.min) if getattr(opt.type, 'max', None): _add_indented(':Maximum Value: %s' % opt.type.max) if getattr(opt.type, 'choices', None): choices_text = ', '.join([ self._get_choice_text(choice) for choice in opt.type.choices ]) _add_indented(':Valid Values: %s' % choices_text) _add('') _add_indented(opt.help) _add('') if opt.deprecated_opts: _list_table( _add_indented, ['Group', 'Name'], ((d.group or 'DEFAULT', d.name or opt.dest or 'UNSET') for d in opt.deprecated_opts), title='Deprecated Variations', ) if opt.deprecated_for_removal: _add_indented('.. warning::') _add_indented(' This option is deprecated for removal.') _add_indented(' Its value may be silently ignored ') _add_indented(' in the future.') if opt.deprecated_reason: _add_indented(' Reason: ' + opt.deprecated_reason) _add('') _add('') node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): path_to_model = self.arguments[0] n2_dims = [1200, 700] show_toolbar = False if len(self.arguments) > 1 and self.arguments[1]: n2_dim_idx = 0 for idx in range(1, len(self.arguments)): if self.arguments[idx] == "toolbar": show_toolbar = True else: n2_dims[n2_dim_idx] = self.arguments[idx] n2_dim_idx = 1 np = os.path.normpath(os.path.join(os.getcwd(), path_to_model)) # check that the file exists if not os.path.isfile(np): raise IOError("File does not exist({0})".format(np)) # Generate N2 files into the target_dir. Those files are later copied # into the top of the HTML hierarchy, so the HTML doc file needs a # relative path to them. target_dir = os.path.join(os.getcwd(), "_n2html") rel_dir = os.path.relpath( os.getcwd(), os.path.dirname(self.state.document.settings._source)) html_base_name = os.path.basename(path_to_model).split( ".")[0] + "_n2.html" html_name = os.path.join(target_dir, html_base_name) html_rel_name = os.path.join(rel_dir, html_base_name) if show_toolbar: html_rel_name += "#toolbar" cmd = subprocess.Popen([ "openmdao", "n2", np, "--no_browser", "--embed", "-o" + html_name ]) cmd_out, cmd_err = cmd.communicate() rst = ViewList() # Add the content one line at a time. # Second argument is the filename to report in any warnings # or errors, third argument is the line number. env = self.state.document.settings.env docname = env.doc2path(env.docname) object_tag = ("<iframe width='" + str(n2_dims[0]) + "'" " height='" + str(n2_dims[1]) + "'" " style='border: 1px solid lightgray; resize: both;'" " src='" + html_rel_name + "'></iframe>") rst.append(".. raw:: html", docname, self.lineno) rst.append("", docname, self.lineno) # leave an empty line rst.append(" %s" % object_tag, docname, self.lineno) # Create a node. node = nodes.section() # Parse the rst. nested_parse_with_titles(self.state, rst, node) # And return the result. return node.children
def _construct_manpage_specific_structure(self, parser_info): """ Construct a typical man page consisting of the following elements: NAME (automatically generated, out of our control) SYNOPSIS DESCRIPTION OPTIONS FILES SEE ALSO BUGS """ # SYNOPSIS section synopsis_section = nodes.section( '', nodes.title(text='Synopsis'), nodes.literal_block(text=parser_info["bare_usage"]), ids=['synopsis-section']) # DESCRIPTION section description_section = nodes.section( '', nodes.title(text='Description'), nodes.paragraph(text=parser_info.get( 'description', parser_info.get('help', "undocumented").capitalize())), ids=['description-section']) nested_parse_with_titles(self.state, self.content, description_section) if parser_info.get('epilog'): # TODO: do whatever sphinx does to understand ReST inside # docstrings magically imported from other places. The nested # parse method invoked above seem to be able to do this but # I haven't found a way to do it for arbitrary text description_section += nodes.paragraph(text=parser_info['epilog']) # OPTIONS section options_section = nodes.section('', nodes.title(text='Options'), ids=['options-section']) if 'args' in parser_info: options_section += nodes.paragraph() options_section += nodes.subtitle(text='Positional arguments:') options_section += self._format_positional_arguments(parser_info) for action_group in parser_info['action_groups']: if 'options' in parser_info: options_section += nodes.paragraph() options_section += nodes.subtitle(text=action_group['title']) options_section += self._format_optional_arguments( action_group) items = [ # NOTE: we cannot generate NAME ourselves. It is generated by # docutils.writers.manpage synopsis_section, description_section, # TODO: files # TODO: see also # TODO: bugs ] if len(options_section.children) > 1: items.append(options_section) if 'nosubcommands' not in self.options: # SUBCOMMANDS section (non-standard) subcommands_section = nodes.section( '', nodes.title(text='Sub-Commands'), ids=['subcommands-section']) if 'children' in parser_info: subcommands_section += self._format_subcommands(parser_info) if len(subcommands_section) > 1: items.append(subcommands_section) if os.getenv("INCLUDE_DEBUG_SECTION"): import json # DEBUG section (non-standard) debug_section = nodes.section( '', nodes.title(text="Argparse + Sphinx Debugging"), nodes.literal_block(text=json.dumps(parser_info, indent=' ')), ids=['debug-section']) items.append(debug_section) return items
def run(self): """ Extracts the information in a dictionary, runs the script. @return a list of nodes """ # settings sett = self.state.document.settings language_code = sett.language_code lineno = self.lineno # add the instance to the global settings if hasattr(sett, "out_runpythonlist"): sett.out_runpythonlist.append(self) # env if hasattr(self.state.document.settings, "env"): env = self.state.document.settings.env else: env = None if env is None: docname = "___unknown_docname___" else: docname = env.docname # post bool_set = (True, 1, "True", "1", "true") bool_set_ = (True, 1, "True", "1", "true", '') p = { 'showcode': 'showcode' in self.options, 'linenos': 'linenos' in self.options, 'showout': 'showout' in self.options, 'rst': 'rst' in self.options, 'sin': self.options.get('sin', TITLES[language_code]["In"]), 'sout': self.options.get('sout', TITLES[language_code]["Out"]), 'sout2': self.options.get('sout2', TITLES[language_code]["Out2"]), 'sphinx': 'sphinx' not in self.options or self.options['sphinx'] in bool_set, 'setsysvar': self.options.get('setsysvar', None), 'process': 'process' in self.options and self.options['process'] in bool_set_, 'exception': 'exception' in self.options and self.options['exception'] in bool_set_, 'nopep8': 'nopep8' in self.options and self.options['nopep8'] in bool_set_, 'warningout': self.options.get('warningout', '').strip(), 'toggle': self.options.get('toggle', '').strip(), 'current': 'current' in self.options and self.options['current'] in bool_set_, 'assert': self.options.get('assert', '').strip(), 'language': self.options.get('language', '').strip(), 'store_in_file': self.options.get('store_in_file', None), 'numpy_precision': self.options.get('numpy_precision', '3').strip(), 'store': 'store' in self.options and self.options['store'] in bool_set_, 'restore': 'restore' in self.options and self.options['restore'] in bool_set_, } if p['setsysvar'] is not None and len(p['setsysvar']) == 0: p['setsysvar'] = 'enable_disabled_documented_pieces_of_code' dind = 0 if p['rst'] else 4 p['indent'] = int(self.options.get("indent", dind)) # run the script name = "run_python_script_{0}".format(id(p)) if p['process']: content = ["if True:"] else: content = ["def {0}():".format(name)] if "numpy" in "\n".join( self.content) and p['numpy_precision'] not in (None, 'None', '-', ''): try: import numpy # pylint: disable=W0611 prec = int(p['numpy_precision']) content.append(" import numpy") content.append(" numpy.set_printoptions(%d)" % prec) except (ImportError, ValueError): pass content.append(' ## __WD__ ##') if p["restore"]: context = getattr(env, "runpython_context", None) for k in sorted(context): content.append( " {0} = globals()['__runpython__{0}']".format(k)) else: context = None modified_content = self.modify_script_before_running("\n".join( self.content)) if p['assert']: footer = [] assert_condition = p['assert'].split('\n') for cond in assert_condition: footer.append("if not({0}):".format(cond)) footer.append( " raise AssertionError('''Condition '{0}' failed.''')". format(cond)) modified_content += "\n\n" + "\n".join(footer) for line in modified_content.split("\n"): content.append(" " + line) if p["store"]: content.append(' for __k__, __v__ in locals().copy().items():') content.append( " globals()['__runpython__' + __k__] = __v__") if not p['process']: content.append("{0}()".format(name)) script = "\n".join(content) script_disp = "\n".join(self.content) if not p["nopep8"]: try: script_disp = remove_extra_spaces_and_pep8(script_disp, is_string=True) except Exception as e: # pragma: no cover if '.' in docname: comment = ' File "{0}", line {1}'.format(docname, lineno) else: comment = ' File "{0}.rst", line {1}\n File "{0}.py", line {1}\n'.format( docname, lineno) raise ValueError( "Pep8 issue with\n'{0}'\n---SCRIPT---\n{1}".format( docname, script)) from e # if an exception is raised, the documentation should report a warning # return [document.reporter.warning('messagr', line=self.lineno)] current_source = self.state.document.current_source docstring = ":docstring of " in current_source if docstring: current_source = current_source.split(":docstring of ")[0] if os.path.exists(current_source): comment = ' File "{0}", line {1}'.format(current_source, lineno) if docstring: new_name = os.path.split(current_source)[0] + ".py" comment += '\n File "{0}", line {1}'.format(new_name, lineno) cs_source = current_source else: if '.' in docname: comment = ' File "{0}", line {1}'.format(docname, lineno) else: comment = ' File "{0}.rst", line {1}\n File "{0}.py", line {1}\n'.format( docname, lineno) cs_source = docname # Add __WD__. cs_source_dir = os.path.dirname(cs_source).replace("\\", "/") script = script.replace('## __WD__ ##', "__WD__ = '{0}'".format(cs_source_dir)) out, err, context = run_python_script( script, comment=comment, setsysvar=p['setsysvar'], process=p["process"], exception=p['exception'], warningout=p['warningout'], chdir=cs_source_dir if p['current'] else None, context=context, store_in_file=p['store_in_file']) if p['store']: # Stores modified local context. setattr(env, "runpython_context", context) else: context = {} setattr(env, "runpython_context", context) if out is not None: out = out.rstrip(" \n\r\t") if err is not None: err = err.rstrip(" \n\r\t") content = out if len(err) > 0: content += "\n[runpythonerror]\n" + err # add member self.exe_class = p.copy() self.exe_class.update(dict(out=out, err=err, script=script)) # add indent def add_indent(content, nbind): "local function" lines = content.split("\n") if nbind > 0: lines = [(" " * nbind + _) for _ in lines] content = "\n".join(lines) return content content = add_indent(content, p['indent']) # build node node = self.__class__.runpython_class(rawsource=content, indent=p["indent"], showcode=p["showcode"], rst=p["rst"], sin=p["sin"], sout=p["sout"]) if p["showcode"]: if 'code' in p['toggle'] or 'both' in p['toggle']: hide = TITLES[language_code]['hide'] + \ ' ' + TITLES[language_code]['code'] unhide = TITLES[language_code]['unhide'] + \ ' ' + TITLES[language_code]['code'] secin = collapse_node(hide=hide, unhide=unhide, show=False) node += secin else: secin = node pin = nodes.paragraph(text=p["sin"]) if p['language'] in (None, ''): p['language'] = 'python' if p['language']: pcode = nodes.literal_block(script_disp, script_disp, language=p['language'], linenos=p['linenos']) else: pcode = nodes.literal_block(script_disp, script_disp, linenos=p['linenos']) secin += pin secin += pcode elif len(self.options.get('sout', '')) == 0: p["sout"] = '' p["sout2"] = '' # RST output. if p["rst"]: settings_overrides = {} try: sett.output_encoding except KeyError: settings_overrides["output_encoding"] = "unicode" # try: # sett.doctitle_xform # except KeyError: # settings_overrides["doctitle_xform"] = True try: sett.warning_stream except KeyError: # pragma: no cover settings_overrides["warning_stream"] = StringIO() # 'initial_header_level': 2, secout = node if 'out' in p['toggle'] or 'both' in p['toggle']: hide = TITLES[language_code]['hide'] + \ ' ' + TITLES[language_code]['outl'] unhide = TITLES[language_code]['unhide'] + \ ' ' + TITLES[language_code]['outl'] secout = collapse_node(hide=hide, unhide=unhide, show=False) node += secout elif len(p["sout"]) > 0: secout += nodes.paragraph(text=p["sout"]) try: if p['sphinx']: st = StringList(content.replace("\r", "").split("\n")) nested_parse_with_titles(self.state, st, secout) dt = None else: dt = core.publish_doctree( content, settings=sett, settings_overrides=settings_overrides) except Exception as e: # pragma: no cover tab = content content = ["::"] st = StringIO() traceback.print_exc(file=st) content.append("") trace = st.getvalue() trace += "\n----------------------OPT\n" + str(p) trace += "\n----------------------EXC\n" + str(e) trace += "\n----------------------SETT\n" + str(sett) trace += "\n----------------------ENV\n" + str(env) trace += "\n----------------------DOCNAME\n" + str(docname) trace += "\n----------------------CODE\n" content.extend(" " + _ for _ in trace.split("\n")) content.append("") content.append("") content.extend(" " + _ for _ in tab.split("\n")) content = "\n".join(content) pout = nodes.literal_block(content, content) secout += pout dt = None if dt is not None: for ch in dt.children: node += ch # Regular output. if not p["rst"] or p["showout"]: text = p["sout2"] if p["rst"] else p["sout"] secout = node if 'out' in p['toggle'] or 'both' in p['toggle']: hide = TITLES[language_code]['hide'] + \ ' ' + TITLES[language_code]['outl'] unhide = TITLES[language_code]['unhide'] + \ ' ' + TITLES[language_code]['outl'] secout = collapse_node(hide=hide, unhide=unhide, show=False) node += secout elif len(text) > 0: pout2 = nodes.paragraph(text=text) node += pout2 pout = nodes.literal_block(content, content) secout += pout p['runpython'] = node # classes node['classes'] += ["runpython"] ns = [node] return ns
def run(self): # type: () -> nodes.Node """ Run to generate the output from .. ddtrace-release-notes:: directive 1. Determine the max version cutoff we need to report for We determine this by traversing the git log until we find the first dev or release branch ref. If we are generating for 1.x branch we will use 2.0 as the cutoff. If we are generating for 0.60 branch we will use 0.61 as the cutoff. We do this to ensure if we are generating notes for older versions we do no include all up to date release notes. Think releasing 0.57.2 when there is 0.58.0, 0.59.0, 1.0.0, etc we only want notes for < 0.58. 2. Iterate through all release branches A release branch is one that matches the ``^[0-9]+.[0-9]+``` pattern Skip any that do not meet the max version cutoff. 3. Determine the earliest version to report for each release branch If the release has only RC releases then use ``.0rc1`` as the earliest version. If there are non-RC releases then use ``.0`` version as the earliest. We do this because we want reno to only report notes that are for that given release branch but it will collapse RC releases if there is a non-RC tag on that branch. So there isn't a consistent "earliest version" we can use for in-progress/dev branches as well as released branches. 4. Generate a reno config for reporting and generate the notes for each branch """ # This is where we will aggregate the generated notes title = " ".join(self.content) result = statemachine.ViewList() # Determine the max version we want to report for max_version = self._get_report_max_version() LOG.info("capping max report version to %r", max_version) # For each release branch, starting with the newest for version, ref in self._release_branches: # If this version is equal to or greater than the max version we want to report for if max_version is not None and version >= max_version: LOG.info("skipping %s >= %s", version, max_version) continue # Older versions did not have reno release notes # DEV: Reno will fail if we try to run on a branch with no notes if (version.major, version.minor) < (0, 44): LOG.info("skipping older version %s", version) continue # Parse the branch name from the ref, we want origin/{major}.{minor}[-dev] _, _, branch = ref.partition("refs/remotes/") # Determine the earliest release tag for this version earliest_version = self._get_earliest_version(version) if not earliest_version: LOG.info("no release tags found for %s", version) continue # Setup reno config conf = config.Config(self._repo.path, "releasenotes") conf.override( branch=branch, collapse_pre_releases=True, stop_at_branch_base=True, earliest_version=earliest_version, ) LOG.info( "scanning %s for %s release notes, stopping at %s", os.path.join(self._repo.path, "releasenotes/notes"), branch, earliest_version, ) # Generate the formatted RST with loader.Loader(conf) as ldr: versions = ldr.versions LOG.info("got versions %s", versions) text = formatter.format_report( ldr, conf, versions, title=title, branch=branch, ) source_name = "<%s %s>" % (__name__, branch or "current branch") for line_num, line in enumerate(text.splitlines(), 1): LOG.debug("%4d: %s", line_num, line) result.append(line, source_name, line_num) # Generate the RST nodes to return for rendering node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def parse_into(self, doc, content_node): rst = ViewList() for i, l in enumerate(doc, 1): rst.append(l, 'no_file.rst', i) nested_parse_with_titles(self.state, rst, content_node)
def run(self): # type: () -> List[nodes.Node] self.filename_set = set() # type: Set[unicode] # a set of dependent filenames self.reporter = self.state.document.reporter self.env = self.state.document.settings.env opt_summary = 'nosummary' not in self.options opt_annotation = 'annotation' in self.options opt_link = 'nolink' not in self.options opt_members = self.options.get('members', None) if opt_members in (None, '') and 'members' in self.options: opt_members = "all" opt_path = self.options.get('path', 'import') try: source, lineno = self.reporter.get_source_and_line(self.lineno) except AttributeError: source = lineno = None # object name object_name = " ".join(_.strip("\n\r\t ") for _ in self.content) try: obj, _, kind = import_any_object(object_name, use_init=False) except ImportError as e: logger = logging.getLogger("AutoSignature") logger.warning( "[AutoSignature] unable to import '{0}' due to '{1}'".format( object_name, e)) if lineno is not None: logger.warning(' File "{0}", line {1}'.format( source, lineno)) obj = None kind = None if opt_members is not None and kind != "class": logger = logging.getLogger("autosignature") logger.warning( "[autosignature] option members is specified but '{0}' is not a class (kind='{1}')." .format(object_name, kind)) obj = None # build node node = self.__class__.autosignature_class(rawsource=object_name, source=source, lineno=lineno, objectname=object_name) if opt_path == 'import': if obj is None: logger = logging.getLogger("autosignature") logger.warning( "[autosignature] object '{0}' cannot be imported.".format( object_name)) anchor = object_name elif kind == "staticmethod": cl, fu = object_name.split(".")[-2:] pimp = import_path(obj, class_name=cl) anchor = '{0}.{1}.{2}'.format(pimp, cl, fu) else: pimp = import_path(obj) anchor = '{0}.{1}'.format(pimp, object_name.split(".")[-1]) elif opt_path == 'full': anchor = object_name elif opt_path == 'name': anchor = object_name.split(".")[-1] else: logger = logging.getLogger("autosignature") logger.warning( "[autosignature] options path is '{0}', it should be in (import, name, full) for object '{1}'." .format(opt_path, object_name)) anchor = object_name if obj is None: if opt_link: text = "\n:py:func:`{0} <{1}>`\n\n".format(anchor, object_name) else: text = "\n``{0}``\n\n".format(anchor) else: obj_sig = obj.__init__ if kind == "class" else obj try: signature = inspect.signature(obj_sig) parameters = signature.parameters except TypeError as e: logger = logging.getLogger("autosignature") logger.warning( "[autosignature](1) unable to get signature of '{0}' - {2}." .format(object_name, str(e).replace("\n", "\\n"))) signature = None parameters = None domkind = { 'meth': 'func', 'function': 'func', 'class': 'class', 'staticmethod': 'meth', 'property': 'meth' }[kind] if signature is None: if opt_link: text = "\n:py:{2}:`{0} <{1}>`\n\n".format( anchor, object_name, domkind) else: text = "\n``{0} {1}``\n\n".format(kind, object_name) else: signature = self.build_parameters_list(parameters, opt_annotation) text = "\n:py:{3}:`{0} <{1}>` ({2})\n\n".format( anchor, object_name, signature, domkind) if obj is not None and opt_summary: # Documentation. doc = obj.__doc__ # if kind != "class" else obj.__class__.__doc__ if doc is None: logger = logging.getLogger("autosignature") logger.warning( "[autosignature] docstring empty for '{0}'.".format( object_name)) else: if "type(object_or_name, bases, dict)" in doc: raise Exception("issue with {0}\n{1}".format(obj, doc)) docstring = self.build_summary(doc) text += docstring + "\n\n" if opt_members is not None and kind == "class": docstring = self.build_members(obj, opt_members, object_name, opt_annotation, opt_summary) docstring = "\n".join( map(lambda s: " " + s, docstring.split("\n"))) text += docstring + "\n\n" st = StringList(text.split("\n")) nested_parse_with_titles(self.state, st, node) return [node]
def run(self): self.doclets = [] self.names = {} self.longnames = {} structure_json = self.get_opt('structure_json', True) parent = docutils.nodes.section() parent.document = self.state.document objtype = strip_directive(self.name) # 'js:automodule' => 'module' self.content = StringList() def obj_factory(d): """ Transmogrify the dictionaries read from the json file into objects. If the object has a known kind make it into a JS<kind> class, else if it has an unknwon kind make it into an Obj else if it has no kind (substructures of doclets) make it an obj """ try: kind = d['kind'] o = self.vtable.get(kind, Obj)(d) except KeyError: o = obj(d) return o # load and cache structure file if structure_json not in loaded_structure_files: with open(structure_json, 'r') as fp: self.state.document.settings.record_dependencies.add( structure_json) loaded_structure_files[structure_json] = \ self.merge_doclets (jsonlib.load (fp, object_hook = obj_factory)) # get cached structure file self.doclets, self.names, self.longnames = loaded_structure_files[ structure_json] try: visited = set() # remember which objects we have already output for argument in self.arguments: rex = re.compile(argument) # grep the list of doclets doclets = (d for d in self.doclets if d.kind == objtype and rex.search(d.longname) and d.longname not in visited) for d in sorted(doclets, key=operator.attrgetter('longname')): visited.add(d.longname) d.run(self, 0) with switch_source_input(self.state, self.content): # logger.info (self.content.pprint ()) try: nested_parse_with_titles(self.state, self.content, parent) except: logger.error(self.content.pprint()) raise except AutoJSDocError as exc: logger.error('Error in "%s" directive: %s.' % (self.name, str(exc))) return parent.children
def run(self): self.assert_has_content() # Detect paragraphs: any number of plain content, choices and optional feedback. empty_lines = list(loc for loc, line in enumerate(self.content) if line == u'') plain_content = None choices = [] feedback = [] if len(empty_lines) > 0: last = self.content[(empty_lines[-1] + 1):] def split_second_last(empty_lines): if len(empty_lines) > 1: return self.content[:empty_lines[-2]], self.content[( empty_lines[-2] + 1):empty_lines[-1]] else: return None, self.content[:empty_lines[-1]] # Backwards compatibility for skipping feedback paragraph. if len(last) == 1 and last[0].startswith( u'I hereby declare that no feedback '): plain_content, choices = split_second_last(empty_lines) elif all(u'§' in line for line in last): plain_content, choices = split_second_last(empty_lines) feedback = last else: plain_content = self.content[:empty_lines[-1]] choices = last else: choices = self.content # Create question. env, node, data = self.create_question() self.add_instructions(node, data, plain_content) data[u'options'] = (u'#!children', u'option') # Travel all answer options. for i, line in slicer(choices): # Split choice key off. key, content = line[0].split(u' ', 1) key = key.strip() line[0] = content.strip() # Trim the key. correct = False if key.startswith(u'*'): correct = True key = key[1:] if key.endswith(u'.'): key = key[:-1] # Create document elements. choice = aplus_nodes.html(u'div', {u'class': u'radio'}) label = aplus_nodes.html(u'label', {}) label.append( aplus_nodes.html( u'input', { u'type': self.input_type(), u'name': u'field_{:d}'.format(env.question_count - 1), u'value': key, })) choice.append(label) node.append(choice) text = aplus_nodes.html(u'span', {}) text.store_html(u'label') nested_parse_with_titles(self.state, line, text) label.append(text) # Add configuration data. optdata = { u'value': key, u'label': (u'#!html', u'label'), } if correct: optdata[u'correct'] = True choice.set_yaml(optdata, u'option') self.add_feedback(node, data, feedback) return [node]
def run(self): self.assert_has_content() panel = panel_node() panel.document = self.state.document set_source_info(self, panel) panel['classes'] = ['panel'] panel['classes'].append(self.node_class) panel['classes'] += self.options.get('class', []) self.add_name(panel) panel_heading = None if self.arguments: heading = self.arguments[0] children, msg = self.state.inliner.parse( heading, 0, self.state_machine, self.state.parent) panel_heading = panel_header_node(heading) if 'panel-title' in self.options: title_p = nodes.paragraph() title_p += children title_p['classes'] += ['panel-title'] panel_heading += title_p else: panel_heading += children nested_parse_with_titles(self.state, self.content, panel) # If we have a footer within our panel, remove it for now. pfooter = None panel_traverse = panel.traverse(panel_footer_node) if len(panel_traverse): pfooter = panel_traverse[0] panel.children.remove(pfooter) # pending elements can be a first node, we need the next index first_node_pending = isinstance(panel.children[0], nodes.pending) fn_index = 1 if first_node_pending else 0 # If first or last node is not a table or list-group # wrap it in a panel-body table_is_first_node = isinstance(panel.children[fn_index], nodes.table) table_is_last_node = isinstance(panel.children[-1], nodes.table) list_is_first_node = isinstance(panel.children[fn_index], nodes.Sequential) list_is_last_node = isinstance(panel.children[-1], nodes.Sequential) if table_is_first_node or table_is_last_node: self.create_panel_body(panel, table_is_first_node, table_is_last_node) elif list_is_first_node or list_is_last_node: self.create_panel_body(panel, list_is_first_node, list_is_last_node) else: # By default contain everything inside a panel-body panel_body = nodes.container() panel_body['classes'] = ['panel-body'] panel_body.children = panel.children panel.children = [panel_body] if panel_heading: panel.children.insert(0, panel_heading) # Add back our footer last if pfooter: panel.children.append(pfooter) return [panel]
def run(self): key, difficulty, points = self.extract_exercise_arguments() env = self.state.document.settings.env name = "{}_{}".format(env.docname.replace('/', '_'), key) override = env.config.override classes = ['exercise'] if 'class' in self.options: classes.extend(self.options['class']) if difficulty: classes.append('difficulty-' + difficulty) # Add document nodes. args = { 'class': ' '.join(classes), 'data-aplus-exercise': 'yes', } if 'quiz' in self.options: args['data-aplus-quiz'] = 'yes' if 'ajax' in self.options: args['data-aplus-ajax'] = 'yes' node = aplus_nodes.html('div', args) key_title = "{} {}".format(translations.get(env, 'exercise'), key) # Load or create exercise configuration. if 'config' in self.options: path = os.path.join(env.app.srcdir, self.options['config']) if not os.path.exists(path): raise SphinxError('Missing config path {}'.format(self.options['config'])) data = yaml_writer.read(path) config_title = data.get('title', '') else: data = { '_external': True } if 'url' in self.options: data['url'] = self.options['url'] if 'lti' in self.options: data.update({ 'lti': self.options['lti'], 'lti_context_id': self.options.get('lti_context_id', ''), 'lti_resource_link_id': self.options.get('lti_resource_link_id', ''), }) if 'lti_aplus_get_and_post' in self.options: data.update({'lti_aplus_get_and_post': True}) if 'lti_open_in_iframe' in self.options: data.update({'lti_open_in_iframe': True}) config_title = '' config_title = self.options.get('title', config_title) if "radar_tokenizer" in self.options or "radar_minimum_match_tokens" in self.options: data['radar_info'] = { 'tokenizer': self.options.get("radar_tokenizer"), 'minimum_match_tokens': self.options.get("radar_minimum_match_tokens"), } category = 'submit' data.update({ 'key': name, 'category': 'submit', 'scale_points': points, 'difficulty': difficulty or '', 'max_submissions': self.options.get('submissions', data.get('max_submissions', env.config.program_default_submissions)), 'min_group_size': data.get('min_group_size', env.config.default_min_group_size), 'max_group_size': data.get('max_group_size', env.config.default_max_group_size), 'points_to_pass': self.options.get('points-to-pass', data.get('points_to_pass', 0)), # The RST source file path is needed for fixing relative URLs # in the exercise description. # Replace the Windows path separator backslash \ with the Unix forward slash /. '_rst_srcpath': env.doc2path(env.docname, None).replace('\\', '/'), }) self.set_assistant_permissions(data) if data.get('title|i18n'): # Exercise config.yaml defines title|i18n for multiple languages. # Do not write the field "title" to data in order to avoid conflicts. if config_title: # Overwrite the title for one language since the RST directive # has defined the title option (or alternatively, the yaml file # has "title" in addition to "title|i18n", but that does not make sense). # env.config.language may be incorrect if the language can not be detected. data['title|i18n'][env.config.language] = env.config.submit_title.format( key_title=key_title, config_title=config_title ) else: formatted_title = env.config.submit_title.format( key_title=key_title, config_title=config_title ) # If no title has been defined, use key_title as the default. data['title'] = formatted_title if formatted_title else key_title if self.content: self.assert_has_content() # Sphinx can not compile the nested RST into HTML at this stage, hence # the HTML instructions defined in this directive body are added to # the exercise YAML file only at the end of the build. Sphinx calls # the visit functions of the nodes in the last writing phase. # The instructions are added to the YAML file in the depart_html # function in aplus_nodes.py. exercise_description = aplus_nodes.html('div', {}) exercise_description.store_html('exercise_description') nested_parse_with_titles(self.state, self.content, exercise_description) node.append(exercise_description) data['instructions'] = ('#!html', 'exercise_description') else: # The placeholder text is only used in the built HTML # (not in the YAML configurations). paragraph = aplus_nodes.html('p', {}) paragraph.append(nodes.Text(translations.get(env, 'submit_placeholder'))) node.append(paragraph) data.setdefault('status', self.options.get('status', 'unlisted')) if category in override: data.update(override[category]) if 'url' in data: data['url'] = data['url'].format(key=name) if 'category' in self.options: data['category'] = str(self.options['category']) node.write_yaml(env, name, data, 'exercise') return [node]
def run(self): self.assert_has_content() key, difficulty, points = self.extract_exercise_arguments() # Parse options. classes = ['exercise'] is_feedback = False if 'chapter-feedback' in self.options: classes.append('chapter-feedback') is_feedback = True if 'weekly-feedback' in self.options: classes.append('weekly-feedback') is_feedback = True if 'appendix-feedback' in self.options: classes.append('appendix-feedback') is_feedback = True if 'course-feedback' in self.options: classes.append('course-feedback-questionnaire') is_feedback = True if 'feedback' in self.options: is_feedback = True if is_feedback: key = 'feedback' category = 'feedback' classes.append('feedback') else: category = 'questionnaire' if difficulty: classes.append('difficulty-' + difficulty) if 'category' in self.options: category = str(self.options.get('category')) env = self.state.document.settings.env name = "{}_{}".format(env.docname.replace('/', '_'), key) override = env.config.override env.questionnaire_is_feedback = is_feedback env.question_count = 0 env.aplus_single_question_points = None env.aplus_quiz_total_points = 0 env.aplus_pick_randomly_quiz = 'pick_randomly' in self.options env.aplus_random_question_exists = False # Create document elements. node = aplus_nodes.html('div', { 'class': ' '.join(classes), 'data-aplus-exercise': 'yes', }) form = aplus_nodes.html('form', { 'action': key, 'method': 'post', }) nested_parse_with_titles(self.state, self.content, form) submit = aplus_nodes.html('input', { 'type': 'submit', 'value': translations.get(env, 'submit'), 'class': 'btn btn-primary', }, skip_html=True) form.append(submit) node.append(form) # Write configuration file. data = { 'key': name, 'category': category, 'difficulty': difficulty or '', 'max_submissions': self.options.get('submissions', 0 if is_feedback else env.config.questionnaire_default_submissions), 'min_group_size': 1 if is_feedback else env.config.default_min_group_size, 'max_group_size': 1 if is_feedback else env.config.default_max_group_size, 'points_to_pass': self.options.get('points-to-pass', 0), 'feedback': is_feedback, 'view_type': 'access.types.stdsync.createForm', 'status': self.options.get('status', 'unlisted'), 'fieldgroups': [{ 'title': '', 'fields': ('#!children', None), }], # The RST source file path is needed for fixing relative URLs # in the exercise description. # Replace the Windows path separator backslash \ with the Unix forward slash /. '_rst_srcpath': env.doc2path(env.docname, None).replace('\\', '/'), } meta_data = env.metadata[env.app.config.master_doc] # Show the model answer after the last submission. if 'reveal-model-at-max-submissions' in self.options: data['reveal_model_at_max_submissions'] = str_to_bool(self.options['reveal-model-at-max-submissions']) else: default_reveal = str_to_bool(meta_data.get( 'questionnaire-default-reveal-model-at-max-submissions', 'false'), error_msg_prefix=env.app.config.master_doc + " questionnaire-default-reveal-model-at-max-submissions: ") if default_reveal: data['reveal_model_at_max_submissions'] = default_reveal # Show the model answer after the module deadline. if 'show-model' in self.options: data['show_model_answer'] = str_to_bool(self.options['show-model']) else: show_default = str_to_bool(meta_data.get( 'questionnaire-default-show-model', 'true'), error_msg_prefix=env.app.config.master_doc + " questionnaire-default-show-model: ") if not show_default: data['show_model_answer'] = show_default if env.aplus_pick_randomly_quiz: pick_randomly = self.options.get('pick_randomly', 0) if pick_randomly < 1: source, line = self.state_machine.get_source_and_line(self.lineno) raise SphinxError(source + ": line " + str(line) + "\nNumber of fields to sample randomly should be greater than zero " "(option pick_randomly in the questionnaire directive).") data['fieldgroups'][0]['pick_randomly'] = pick_randomly if 'preserve-questions-between-attempts' in self.options: data['fieldgroups'][0]['resample_after_attempt'] = False elif not env.aplus_random_question_exists: # The HTML attribute data-aplus-quiz makes the A+ frontend show the # questionnaire feedback in place of the exercise description once # the student has submitted at least once. In randomized questionnaires, # the same form may not be submitted again due to one-time use nonce # values, hence the attribute must not be used in randomized # questionnaires. node.attributes['data-aplus-quiz'] = 'yes' self.set_assistant_permissions(data) points_set_in_arguments = len(self.arguments) == 2 and difficulty != self.arguments[1] if env.aplus_pick_randomly_quiz: calculated_max_points = ( self.options.get('pick_randomly') * env.aplus_single_question_points if env.aplus_single_question_points is not None else 0 ) else: calculated_max_points = env.aplus_quiz_total_points if calculated_max_points == 0 and is_feedback: data['max_points'] = points else: if points_set_in_arguments and calculated_max_points != points: source, line = self.state_machine.get_source_and_line(self.lineno) raise SphinxError(source + ": line " + str(line) + "\nThe points of the questions in the questionnaire must add up to the total points of the questionnaire!") data['max_points'] = calculated_max_points if 'title' in self.options: data['title'] = self.options.get('title') else: data['title|i18n'] = translations.opt('feedback') if is_feedback else translations.opt('exercise', postfix=" {}".format(key)) if not 'no-override' in self.options and category in override: data.update(override[category]) if 'url' in data: data['url'] = data['url'].format(key=name) form.write_yaml(env, name, data, 'exercise') return [node]
def run(self) -> List: """ http://www.sphinx-doc.org/en/stable/extdev/markupapi.html#docutils.parsers.rst.Directive.run """ env = self.state.document.settings.env app = env.app config: Configurator = app.config.FRAMEAPP_CONFIG prefix = self.options.get('prefix', '/') app.info('Generating API documentation from gathered project metadata') doc_text = StringIO() api_doc = [] doc_append = api_doc.append for namespace, route in config.routes.registry.items(): # doc_text.write(namespace_caption(f'Namespace: {namespace}')) # doc_text.write(line_separator()) for route_name, route_struct in route.items(): if not route_struct.pattern.startswith(prefix): continue pattern = route_struct.pattern[len(prefix):] doc_text.write(url_pattern_caption(pattern)) doc_text.write(line_separator()) for schema_sig, schema_data in route_struct.schemas.items(): doc_text.write(api_version(schema_sig.api_version)) doc_text.write(line_separator()) doc_text.write(request_method(schema_sig.request_method)) doc_text.write(line_separator()) docstring = '\n'.join(prepare_docstring(schema_data.doc)) doc_text.write(docstring) doc_text.write(line_separator()) if schema_data.input_schema: doc_text.write(output_schema( schema_data.output_schema)) doc_text.write(line_separator()) if schema_data.input_serializer: # Let's create a temp schema generator. We don't have to care about initial arguments too much version = '1.10' temp = OpenApiSchemaGenerator(version, '<Title>', '/url') response_schema, error_status_codes = temp.get_response_object( schema_data.input_serializer.get(version), '<empty doc>') doc_text.write(input_serializer(response_schema)) doc_text.write(line_separator()) if schema_data.output_serializer: # Let's create a temp schema generator. We don't have to care about initial arguments too much version = '1.10' temp = OpenApiSchemaGenerator(version, '<Title>', '/url') response_schema, error_status_codes = temp.get_response_object( schema_data.output_serializer.get(version), '<empty doc>') doc_text.write(output_serializer(response_schema)) doc_text.write(line_separator()) if schema_data.output_schema: doc_text.write(output_schema( schema_data.output_schema)) doc_text.write(line_separator()) doc_text.write(horizontal_separator()) doc_text.write(line_separator()) # Since there's no extensive guides on how to use nested_parse(), # the current implementation just repeats the steps from standard extensions, such as # https://github.com/sphinx-doc/sphinx/blob/228fdb892af25f4b93f2760f2cd6497f2aabc0be/sphinx/ext/autosummary/__init__.py#L377-L384 # See also http://www.sphinx-doc.org/en/stable/extdev/markupapi.html#viewlists doc_text.seek(0) vl = ViewList(doc_text.readlines()) output = nodes.paragraph() nested_parse_with_titles(self.state, vl, output) doc_append(output) return api_doc
def run(self): module_path, class_name, attribute_name = self.arguments mod = importlib.import_module(module_path) klass = getattr(mod, class_name) options = getattr(klass(), attribute_name) if not isinstance(options, OptionsDictionary): raise TypeError("Object '%s' is not an OptionsDictionary." % attribute_name) outputs = [] for option_name, option_data in sorted(iteritems(options._dict)): name = option_name default = option_data['value'] if option_data['value'] is not _undefined \ else '**Required**' values = option_data['values'] types = option_data['types'] desc = option_data['desc'] if types is None: types = "N/A" elif types is not None: if not isinstance(types, (tuple, list)): types = (types, ) types = [type_.__name__ for type_ in types] if values is None: values = "N/A" elif values is not None: if not isinstance(values, (tuple, list)): values = (values, ) values = [value for value in values] outputs.append([name, default, values, types, desc]) lines = ViewList() col_heads = [ 'Option', 'Default', 'Acceptable Values', 'Acceptable Types', 'Description' ] max_sizes = {} for j, col in enumerate(col_heads): max_sizes[j] = len(col) for output in outputs: for j, item in enumerate(output): length = len(str(item)) if max_sizes[j] < length: max_sizes[j] = length header = "" titles = "" for key, val in iteritems(max_sizes): header += '=' * val + ' ' for j, head in enumerate(col_heads): titles += "%s " % head size = max_sizes[j] space = size - len(head) if space > 0: titles += space * ' ' lines.append(header, "options table", 1) lines.append(titles, "options table", 2) lines.append(header, "options table", 3) n = 3 for output in outputs: line = "" for j, item in enumerate(output): line += "%s " % str(item) size = max_sizes[j] space = size - len(str(item)) if space > 0: line += space * ' ' lines.append(line, "options table", n) n += 1 lines.append(header, "options table", n) lines.append("", "options table", n + 1) # Blank line required after table. lines.append( "Note: Options can be passed as keyword arguments at initialization.", "options table", n + 2) # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, lines, node) # And return the result. return node.children
def run(self): self.filename_set = set() # a set of dependent filenames self.reporter = self.state.document.reporter self.env = self.state.document.settings.env opt_summary = 'nosummary' not in self.options opt_annotation = 'annotation' in self.options opt_link = 'nolink' not in self.options opt_members = self.options.get('members', None) opt_debug = 'debug' in self.options if opt_members in (None, '') and 'members' in self.options: opt_members = "all" opt_path = self.options.get('path', 'import') opt_syspath = self.options.get('syspath', None) if opt_debug: keep_logged = [] def keep_logging(*els): keep_logged.append(" ".join(str(_) for _ in els)) logging_function = keep_logging else: logging_function = None try: source, lineno = self.reporter.get_source_and_line(self.lineno) except AttributeError: # pragma: no cover source = lineno = None # object name object_name = " ".join(_.strip("\n\r\t ") for _ in self.content) if opt_syspath: syslength = len(sys.path) sys.path.extend(opt_syspath.split(';')) try: obj, _, kind = import_any_object(object_name, use_init=False, fLOG=logging_function) except ImportError as e: mes = "[autosignature] unable to import '{0}' due to '{1}'".format( object_name, e) logger = logging.getLogger("autosignature") logger.warning(mes) if logging_function: logging_function(mes) # pragma: no cover if lineno is not None: logger.warning(' File "{0}", line {1}'.format( source, lineno)) obj = None kind = None if opt_syspath: del sys.path[syslength:] if opt_members is not None and kind != "class": # pragma: no cover logger = logging.getLogger("autosignature") logger.warning( "[autosignature] option members is specified but '{0}' " "is not a class (kind='{1}').".format(object_name, kind)) obj = None # build node node = self.__class__.autosignature_class(rawsource=object_name, source=source, lineno=lineno, objectname=object_name) if opt_path == 'import': if obj is None: logger = logging.getLogger("autosignature") logger.warning( "[autosignature] object '{0}' cannot be imported.".format( object_name)) anchor = object_name elif kind == "staticmethod": cl, fu = object_name.split(".")[-2:] pimp = import_path(obj, class_name=cl, fLOG=logging_function) anchor = '{0}.{1}.{2}'.format(pimp, cl, fu) else: pimp = import_path( obj, err_msg="object name: '{0}'".format(object_name)) anchor = '{0}.{1}'.format(pimp, object_name.split(".")[-1]) elif opt_path == 'full': anchor = object_name elif opt_path == 'name': anchor = object_name.split(".")[-1] else: # pragma: no cover logger = logging.getLogger("autosignature") logger.warning( "[autosignature] options path is '{0}', it should be in " "(import, name, full) for object '{1}'.".format( opt_path, object_name)) anchor = object_name if obj is None: if opt_link: text = "\n:py:func:`{0} <{1}>`\n\n".format(anchor, object_name) else: text = "\n``{0}``\n\n".format(anchor) else: obj_sig = obj.__init__ if kind == "class" else obj try: signature = inspect.signature(obj_sig) parameters = signature.parameters except TypeError as e: # pragma: no cover mes = "[autosignature](1) unable to get signature of '{0}' - {1}.".format( object_name, str(e).replace("\n", "\\n")) logger = logging.getLogger("autosignature") logger.warning(mes) if logging_function: logging_function(mes) signature = None parameters = None except ValueError as e: # pragma: no cover # Backup plan, no __text_signature__, this happen # when a function was created with pybind11. doc = obj_sig.__doc__ sigs = set(enumerate_cleaned_signature(doc)) if len(sigs) == 0: mes = "[autosignature](2) unable to get signature of '{0}' - {1}.".format( object_name, str(e).replace("\n", "\\n")) logger = logging.getLogger("autosignature") logger.warning(mes) if logging_function: logging_function(mes) signature = None parameters = None elif len(sigs) > 1: mes = "[autosignature](2) too many signatures for '{0}' - {1} - {2}.".format( object_name, str(e).replace("\n", "\\n"), " *** ".join(sigs)) logger = logging.getLogger("autosignature") logger.warning(mes) if logging_function: logging_function(mes) signature = None parameters = None else: try: signature = inspect._signature_fromstr( inspect.Signature, obj_sig, list(sigs)[0]) parameters = signature.parameters except TypeError as e: mes = "[autosignature](3) unable to get signature of '{0}' - {1}.".format( object_name, str(e).replace("\n", "\\n")) logger = logging.getLogger("autosignature") logger.warning(mes) if logging_function: logging_function(mes) signature = None parameters = None domkind = { 'meth': 'func', 'function': 'func', 'method': 'meth', 'class': 'class', 'staticmethod': 'meth', 'property': 'meth' }[kind] if signature is None: if opt_link: text = "\n:py:{2}:`{0} <{1}>`\n\n".format( anchor, object_name, domkind) else: text = "\n``{0} {1}``\n\n".format(kind, object_name) else: signature = self.build_parameters_list(parameters, opt_annotation) text = "\n:py:{3}:`{0} <{1}>` ({2})\n\n".format( anchor, object_name, signature, domkind) if obj is not None and opt_summary: # Documentation. doc = obj.__doc__ # if kind != "class" else obj.__class__.__doc__ if doc is None: # pragma: no cover mes = "[autosignature] docstring empty for '{0}'.".format( object_name) logger = logging.getLogger("autosignature") logger.warning(mes) if logging_function: logging_function(mes) else: if "type(object_or_name, bases, dict)" in doc: raise TypeError( # pragma: no cover "issue with {0}\n{1}".format(obj, doc)) docstring = self.build_summary(doc) text += docstring + "\n\n" if opt_members is not None and kind == "class": docstring = self.build_members(obj, opt_members, object_name, opt_annotation, opt_summary) docstring = "\n".join( map(lambda s: " " + s, docstring.split("\n"))) text += docstring + "\n\n" text_lines = text.split("\n") if logging_function: text_lines.extend([' ::', '', ' [debug]', '']) text_lines.extend(' ' + li for li in keep_logged) text_lines.append('') st = StringList(text_lines) nested_parse_with_titles(self.state, st, node) return [node]
def run(self): indnode = [] for indname in sorted(self.RefCls._indcol.keys()): # indnode.append(nodes.section()) indcls = self.RefCls._indcol[indname] # Title section (indicator name) indname = indcls.__name__ indtitle = nodes.subtitle(indname, indname) indnode.append(indtitle) # Get the docstring and prepare it for parsing # a list is returned indclsdoc = indcls.__doc__ or '' # __doc__ could be None inddoc = prepare_docstring(indclsdoc) # Alias section indaliases = getattr(indcls, 'alias', []) if indaliases: inddoc.insert(0, u'Alias:') purgedaliases = [] for indalias in indaliases: if not isinstance(indalias, string_types): indalias = indalias[0] purgedaliases.append(indalias) aliastxt = u' - %s' % ', '.join(purgedaliases) inddoc.insert(1, aliastxt) inddoc.insert(2, u'') # Lines section indlines = indcls.lines._getlines() if indlines: inddoc.append(u'Lines:') for i, indline in enumerate(indlines): inddoc.append(u' - %s' % indcls.lines._getlinealias(i)) # Params section indparams = indcls.params._getpairs() if indparams: inddoc.append(u'Params:') for pkey, pvalue in indcls.params._getitems(): try: if issubclass(pvalue, self.RefCls): pvalue = pvalue.__name__ except: pass inddoc.append(u' - %s (%s)' % (pkey, str(pvalue))) if self.RefPlot: # Plotinfo section # indplotinfo = indcls.plotinfo._getpairs() if len(indcls.plotinfo._getpairs()): inddoc.append(u'PlotInfo:') for pkey, pvalue in indcls.plotinfo._getitems(): inddoc.append(u' - %s (%s)' % (pkey, str(pvalue))) # PlotLines Section if len(indcls.plotlines._getpairs()): inddoc.append(u'PlotLines:') for pkey, pvalue in indcls.plotlines._getitems(): if isinstance(pvalue, AutoInfoClass): inddoc.append(u' - %s:' % pkey) for plkey, plvalue in pvalue._getitems(): inddoc.append(u' - %s (%s)' % (plkey, plvalue)) elif isinstance(pvalue, (dict, OrderedDict)): inddoc.append(u' - %s:' % pkey) for plkey, plvalue in pvalue.items(): inddoc.append(u' - %s (%s)' % (plkey, plvalue)) else: inddoc.append(u' - %s (%s):' % pkey, str(pvalue)) # create the indicator node, add it to a viewlist and parse indsubnode = nodes.container() inddocview = ViewList(inddoc, indname) nested_parse_with_titles(self.state, inddocview, indsubnode) # Add the indicator subnode to the list of nodes to be returned indnode.append(indsubnode) return indnode
def run(self): """ Builds the mathdef text. """ env = self.state.document.settings.env if hasattr( self.state.document.settings, "env") else None docname = None if env is None else env.docname if docname is not None: docname = docname.replace("\\", "/").split("/")[-1] if not self.options.get('class'): self.options['class'] = ['admonition-quote'] # body (quote, ) = super(QuoteNode, self).run() if isinstance(quote, nodes.system_message): return [quote] # pragma: no cover # mid tag = self.options.get('tag', 'quotetag').strip() if len(tag) == 0: raise ValueError("tag is empty") # pragma: no cover def __(text): if text: return _(text) return "" # book author = __(self.options.get('author', "").strip()) book = __(self.options.get('book', "").strip()) pages = __(self.options.get('pages', "").strip()) year = __(self.options.get('year', "").strip()) source = __(self.options.get('source', "").strip()) index = __(self.options.get('index', "").strip()) date = __(self.options.get('date', "").strip()) indexes = [] if index: indexes.append(index) # pragma: no cover # add a label lid = self.options.get('lid', self.options.get('label', None)) if lid: tnl = ['', ".. _{0}:".format(lid), ""] else: tnl = [] # pragma: no cover if author: tnl.append("**{0}**, ".format(author)) indexes.append(author) if book: tnl.append("*{0}*".format(book)) indexes.append(book) if pages: tnl.append(", {0}".format(pages)) if date: tnl.append(" ({0})".format(date)) if source: if source.startswith("http"): tnl.append(", `source <{0}>`_".format(source)) else: tnl.append(", {0}".format(source)) tnl.append('') tnl.append(".. index:: " + ", ".join(indexes)) tnl.append('') content = StringList(tnl) content = content + self.content node = quote_node() try: nested_parse_with_titles(self.state, content, node) except Exception as e: # pragma: no cover from sphinx.util import logging logger = logging.getLogger("blogpost") logger.warning( "[blogpost] unable to parse '{0}' - '{1}' - {2}".format( author, book, e)) raise e node['tag'] = tag node['author'] = author node['pages'] = pages node['year'] = year node['label'] = lid node['source'] = source node['book'] = book node['index'] = index node['content'] = '\n'.join(self.content) node['classes'] += ["quote"] return [node]
def run(self): title = ' '.join(self.content) branch = self.options.get('branch') relnotessubdir = self.options.get( 'relnotessubdir', defaults.RELEASE_NOTES_SUBDIR, ) reporoot = self._find_reporoot( self.options.get('reporoot', '.'), relnotessubdir, ) ignore_notes = [ name.strip() for name in self.options.get('ignore-notes', '').split(',') ] conf = config.Config(reporoot, relnotessubdir) opt_overrides = {} if 'notesdir' in self.options: opt_overrides['notesdir'] = self.options.get('notesdir') version_opt = self.options.get('version') # FIXME(dhellmann): Force these flags True for now and figure # out how Sphinx passes a "false" flag later. # 'collapse-pre-releases' in self.options opt_overrides['collapse_pre_releases'] = True # Only stop at the branch base if we have not been told # explicitly which versions to include. opt_overrides['stop_at_branch_base'] = (version_opt is None) if 'earliest-version' in self.options: opt_overrides['earliest_version'] = self.options.get( 'earliest-version') if 'unreleased-version-title' in self.options: opt_overrides['unreleased_version_title'] = self.options.get( 'unreleased-version-title') if branch: opt_overrides['branch'] = branch if ignore_notes: opt_overrides['ignore_notes'] = ignore_notes conf.override(**opt_overrides) notesdir = os.path.join(relnotessubdir, conf.notesdir) LOG.info('scanning %s for %s release notes' % ( os.path.join(conf.reporoot, notesdir), branch or 'current branch')) ldr = loader.Loader(conf) if version_opt is not None: versions = [ v.strip() for v in version_opt.split(',') ] else: versions = ldr.versions LOG.info('got versions %s' % (versions,)) text = formatter.format_report( ldr, conf, versions, title=title, branch=branch, ) source_name = '<%s %s>' % (__name__, branch or 'current branch') result = statemachine.ViewList() for line_num, line in enumerate(text.splitlines(), 1): LOG.debug('%4d: %s', line_num, line) result.append(line, source_name, line_num) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children