def get_supported_format(suffix): parser_class = app.config.source_parsers.get(suffix) if parser_class is None: return ('restructuredtext',) if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') return parser_class.supported
def get_supported_format(suffix: str) -> Tuple[str, ...]: parser_class = app.registry.get_source_parsers().get(suffix) if parser_class is None: return ('restructuredtext',) if isinstance(parser_class, str): parser_class = import_object(parser_class, 'source parser') return parser_class.supported
def get_supported_format(suffix): parser_class = app.config.source_parsers.get(suffix) if parser_class is None: return ('restructuredtext', ) if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') return parser_class.supported
def __init__(self, module_path, base_module=None): self.module = import_object(module_path) self.base_module = base_module or self.module.__name__ self.module_classes = set() self.inheritances = [] self._populate_tree()
def init(self, options: Dict) -> None: dotted_path = options.get('type', 'sphinx.search.ja.DefaultSplitter') try: self.splitter = import_object(dotted_path)(options) except ExtensionError as exc: raise ExtensionError("Splitter module %r can't be imported" % dotted_path) from exc
def __init__(self, parsers={}, *args, **kwargs): standalone.Reader.__init__(self, *args, **kwargs) self.parser_map = {} for suffix, parser_class in parsers.items(): if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') self.parser_map[suffix] = parser_class()
def get_nb_converter( path: str, env: BuildEnvironment, source_iter: Optional[Iterable[str]] = None, ) -> Optional[NbConverter]: """Get function, to convert a source string to a Notebook.""" # Standard notebooks take priority if path.endswith(".ipynb"): return NbConverter( lambda text: nbf.reads(text, as_version=NOTEBOOK_VERSION), env.myst_config) # we check suffixes ordered by longest first, to ensure we get the "closest" match for source_suffix in sorted(env.config.nb_custom_formats.keys(), key=len, reverse=True): if path.endswith(source_suffix): ( converter, converter_kwargs, commonmark_only, ) = env.config.nb_custom_formats[source_suffix] converter = import_object(converter) a = NbConverter( lambda text: converter(text, **(converter_kwargs or {})), env.myst_config if commonmark_only is None else attr.evolve( env.myst_config, commonmark_only=commonmark_only), ) return a # If there is no source text then we assume a MyST Notebook if source_iter is None: # Check if docname exists return NbConverter( lambda text: myst_to_notebook( text, config=env.myst_config, add_source_map=True, path=path, ), env.myst_config, ) # Given the source lines, we check it can be recognised as a MyST Notebook if is_myst_notebook(source_iter): # Check if docname exists return NbConverter( lambda text: myst_to_notebook( text, config=env.myst_config, add_source_map=True, path=path, ), env.myst_config, ) # Otherwise, we return None, # to imply that it should be parsed as as standard Markdown file return None
def create_template_bridge(self) -> None: """Return the template bridge configured.""" if self.config.template_bridge: self.templates = import_object(self.config.template_bridge, 'template_bridge setting')() else: from sphinx.jinja2glue import BuiltinTemplateLoader self.templates = BuiltinTemplateLoader()
def get_supported_format(suffix): # type: (str) -> Tuple[str, ...] parser_class = app.registry.get_source_parsers().get(suffix) if parser_class is None: return ('restructuredtext',) if isinstance(parser_class, str): parser_class = import_object(parser_class, 'source parser') return parser_class.supported
def get_parser_type(source_path): # type: (unicode) -> Tuple[unicode] for suffix, parser_class in iteritems(self.app.registry.get_source_parsers()): if source_path.endswith(suffix): if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') # type: ignore # NOQA return parser_class.supported return ('restructuredtext',)
def get_supported_format(suffix): # type: (unicode) -> Tuple[unicode] parser_class = app.registry.get_source_parsers().get(suffix) if parser_class is None: return ('restructuredtext',) if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') # type: ignore return parser_class.supported
def get_parser_type(source_path): for suffix in self.env.config.source_parsers: if source_path.endswith(suffix): parser_class = self.env.config.source_parsers[suffix] if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') return parser_class.supported else: return ('restructuredtext',)
def deprecate_source_parsers(app: "Sphinx", config: Config) -> None: if config.source_parsers: warnings.warn('The config variable "source_parsers" is deprecated. ' 'Please update your extension for the parser and remove the setting.', RemovedInSphinx30Warning) for suffix, parser in config.source_parsers.items(): if isinstance(parser, str): parser = import_object(parser, 'source parser') app.add_source_parser(suffix, parser)
def create_template_bridge(self): # type: () -> None """Return the template bridge configured.""" if self.config.template_bridge: self.templates = import_object(self.config.template_bridge, 'template_bridge setting')() else: from sphinx.jinja2glue import BuiltinTemplateLoader self.templates = BuiltinTemplateLoader()
def __init__(self, module_path, base_module=None): self.module = import_object(module_path) self.base_module = base_module or self.module.__name__ self.module_classes = set() self.inheritances = [] self.associations = [] self.class_members = {} self.namedtuples = [] self._populate_tree()
def __init__(self, app, parsers={}, *args, **kwargs): standalone.Reader.__init__(self, *args, **kwargs) self.parser_map = {} for suffix, parser_class in parsers.items(): if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') parser = parser_class() if hasattr(parser, 'set_application'): parser.set_application(app) self.parser_map[suffix] = parser
def deprecate_source_parsers(app, config): # type: (Sphinx, Config) -> None if config.source_parsers: warnings.warn('The config variable "source_parsers" is deprecated. ' 'Please use app.add_source_parser() API instead.', RemovedInSphinx30Warning) for suffix, parser in config.source_parsers.items(): if isinstance(parser, str): parser = import_object(parser, 'source parser') app.add_source_parser(suffix, parser)
def get_parser_type(source_path): # type: (unicode) -> Tuple[unicode] for suffix in self.env.config.source_parsers: if source_path.endswith(suffix): parser_class = self.env.config.source_parsers[suffix] if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') # type: ignore # NOQA return parser_class.supported else: return ('restructuredtext',)
def create_nb_reader( path: str, md_config: MdParserConfig, nb_config: NbParserConfig, content: None | str | Iterator[str], ) -> NbReader | None: """Create a notebook reader, given a string, source path and configuration. Note, we do not directly parse to a notebook, since jupyter-cache functionality requires the reader. :param path: Path to the input source being processed. :param nb_config: The configuration for parsing Notebooks. :param md_config: The default configuration for parsing Markown. :param content: The input string (optionally used to check for text-based notebooks) :returns: the notebook reader, and the (potentially modified) MdParserConfig, or None if the input cannot be read as a notebook. """ # the import is here so this module can be loaded without sphinx from sphinx.util import import_object # get all possible readers readers = nb_config.custom_formats.copy() # add the default reader readers.setdefault(".ipynb", (standard_nb_read, {}, False)) # type: ignore # we check suffixes ordered by longest first, to ensure we get the "closest" match iterator = sorted(readers.items(), key=lambda x: len(x[0]), reverse=True) for suffix, (reader, reader_kwargs, commonmark_only) in iterator: if path.endswith(suffix): if isinstance(reader, str): # attempt to load the reader as an object path reader = import_object(reader) if commonmark_only: # Markdown cells should be read as Markdown only md_config = dc.replace(md_config, commonmark_only=True) return NbReader(partial(reader, **(reader_kwargs or {})), md_config) # type: ignore # a Markdown file is a special case, since we only treat it as a notebook, # if it starts with certain "top-matter" if content is not None and is_myst_markdown_notebook(content): return NbReader( partial( read_myst_markdown_notebook, config=md_config, add_source_map=True, path=path, ), md_config, {"type": "plugin", "name": "myst_nb_md"}, ) # if we get here, we did not find a reader return None
def init(self, options): type = options.get('type', 'default') if type in self.splitters: dotted_path = self.splitters[type] else: dotted_path = type try: self.splitter = import_object(dotted_path)(options) except ExtensionError: raise ExtensionError("Splitter module %r can't be imported" % dotted_path)
def import_object(self, objname, source=None): # type: (str, unicode) -> Any """Import an object from a ``module.name`` string. .. deprecated:: 1.8 Use ``sphinx.util.import_object()`` instead. """ warnings.warn('app.import_object() is deprecated. ' 'Use sphinx.util.add_object_type() instead.', RemovedInSphinx30Warning) return import_object(objname, source=None)
def __init__(self, app, parsers={}, *args, **kwargs): # type: (Sphinx, Dict[unicode, Parser], Any, Any) -> None standalone.Reader.__init__(self, *args, **kwargs) self.parser_map = {} # type: Dict[unicode, Parser] for suffix, parser_class in parsers.items(): if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') # type: ignore parser = parser_class() if hasattr(parser, 'set_application'): parser.set_application(app) self.parser_map[suffix] = parser
def import_object(self, objname, source=None): # type: (str, str) -> Any """Import an object from a ``module.name`` string. .. deprecated:: 1.8 Use ``sphinx.util.import_object()`` instead. """ warnings.warn('app.import_object() is deprecated. ' 'Use sphinx.util.add_object_type() instead.', RemovedInSphinx30Warning, stacklevel=2) return import_object(objname, source=None)
def get_parser_type(source_path): # type: (unicode) -> Tuple[unicode] for suffix, parser_class in iteritems( self.app.registry.get_source_parsers()): if source_path.endswith(suffix): if isinstance(parser_class, string_types): parser_class = import_object( parser_class, 'source parser') # type: ignore # NOQA return parser_class.supported return ('restructuredtext', )
def run(self): def walk(cls): """Render the given class, then recursively render its descendants depth first. Appends to the outer ``lines`` variable. :param cls: The Jinja ``Node`` class to render. """ lines.append(".. autoclass:: {}({})".format( cls.__name__, ", ".join(cls.fields))) # render member methods for nodes marked abstract if cls.abstract: members = [] for key, value in cls.__dict__.items(): if (not key.startswith("_") and not hasattr(cls.__base__, key) and callable(value)): members.append(key) if members: members.sort() lines.append(" :members: " + ", ".join(members)) # reference the parent node, except for the base node if cls.__base__ is not object: lines.append("") lines.append(" :Node type: :class:`{}`".format( cls.__base__.__name__)) lines.append("") children = cls.__subclasses__() children.sort(key=lambda x: x.__name__.lower()) # render each child for child in children: walk(child) # generate the markup starting at the base class lines = [] target = import_object(self.arguments[0]) walk(target) # parse the generated markup into nodes doc = StringList(lines, "<jinja>") node = nodes.Element() self.state.nested_parse(doc, self.content_offset, node) return node.children
def init(self, options): # type: (Dict) -> None type = options.get('type', 'sphinx.search.ja.DefaultSplitter') if type in self.splitters: dotted_path = self.splitters[type] warnings.warn('html_search_options["type"]: %s is deprecated. ' 'Please give "%s" instead.' % (type, dotted_path), RemovedInSphinx30Warning, stacklevel=2) else: dotted_path = type try: self.splitter = import_object(dotted_path)(options) except ExtensionError: raise ExtensionError("Splitter module %r can't be imported" % dotted_path)
def get_source_parser(self, filename): # type: (unicode) -> Type[Parser] for suffix, parser_class in iteritems(self.source_parsers): if filename.endswith(suffix): break else: # use special parser for unknown file-extension '*' (if exists) parser_class = self.source_parsers.get('*') if parser_class is None: raise SphinxError(__('source_parser for %s not registered') % filename) else: if isinstance(parser_class, string_types): parser_class = import_object(parser_class, 'source parser') # type: ignore return parser_class
def init(self, options): # type: (Dict) -> None type = options.get('type', 'sphinx.search.ja.DefaultSplitter') if type in self.splitters: dotted_path = self.splitters[type] warnings.warn('html_search_options["type"]: %s is deprecated. ' 'Please give "%s" instead.' % (type, dotted_path), RemovedInSphinx30Warning) else: dotted_path = type try: self.splitter = import_object(dotted_path)(options) except ExtensionError: raise ExtensionError("Splitter module %r can't be imported" % dotted_path)
def _build_functions(self): """Imports the dict and builds the output for the functions. This is what determines aliases and performs sorting. Calls :func:`build_function_directive` for each function, then renders the list of reStructuredText to nodes. The list of sorted names is stored for use by :meth:`_build_table`. :return: A list of rendered nodes. """ map_name = self.arguments[0] mapping = import_object(map_name) grouped = {} # reverse the mapping to get a list of aliases for each function for key, value in mapping.items(): grouped.setdefault(value, []).append(key) # store the function names for use by _build_table self.funcs = funcs = [] compare_ops = {"eq", "ge", "gt", "le", "lt", "ne"} for func, names in grouped.items(): # use the longest alias as the canonical name names.sort(key=len) # adjust for special cases names.sort(key=lambda x: x in compare_ops) name = names.pop() funcs.append((name, names, func)) funcs.sort() result = StringList() # generate and collect markup for name, aliases, func in funcs: for item in build_function_directive(name, aliases, func): result.append(item, "<jinja>") # parse the generated markup into nodes node = nodes.Element() self.state.nested_parse(result, self.content_offset, node) return node.children
def test_import_object(): module = import_object('sphinx') assert module.__name__ == 'sphinx' module = import_object('sphinx.application') assert module.__name__ == 'sphinx.application' obj = import_object('sphinx.application.Sphinx') assert obj.__name__ == 'Sphinx' with pytest.raises(ExtensionError) as exc: import_object('sphinx.unknown_module') assert exc.value.args[0] == 'Could not import sphinx.unknown_module' with pytest.raises(ExtensionError) as exc: import_object('sphinx.unknown_module', 'my extension') assert exc.value.args[0] == ('Could not import sphinx.unknown_module ' '(needed for my extension)')
def import_object(self, objname, source=None): """Import an object from a 'module.name' string.""" return import_object(objname, source=None)
def validate_config_values(app: Sphinx, config): """Validate configuration values.""" execute_mode = app.config["jupyter_execute_notebooks"] if execute_mode not in ["force", "auto", "cache", "off"]: raise MystNbConfigError( "'jupyter_execute_notebooks' can be: " f"`force`, `auto`, `cache` or `off`, but got: {execute_mode}", ) if app.config["jupyter_cache"] and execute_mode != "cache": raise MystNbConfigError( "'jupyter_cache' is set, " f"but 'jupyter_execute_notebooks' is not `cache`: {execute_mode}") if app.config["jupyter_cache"] and not os.path.isdir( app.config["jupyter_cache"]): raise MystNbConfigError( f"'jupyter_cache' is not a directory: {app.config['jupyter_cache']}", ) if not isinstance(app.config["nb_custom_formats"], dict): raise MystNbConfigError("'nb_custom_formats' should be a dictionary: " f"{app.config['nb_custom_formats']}") for name, converter in app.config["nb_custom_formats"].items(): if not isinstance(name, str): raise MystNbConfigError( f"'nb_custom_formats' keys should br a string: {name}") if isinstance(converter, str): app.config["nb_custom_formats"][name] = (converter, {}) elif not (isinstance(converter, Sequence) and len(converter) in [2, 3]): raise MystNbConfigError( "'nb_custom_formats' values must be " f"either strings or 2/3-element sequences, got: {converter}") converter_str = app.config["nb_custom_formats"][name][0] caller = import_object( converter_str, f"MyST-NB nb_custom_formats: {name}", ) if not callable(caller): raise MystNbConfigError( f"`nb_custom_formats.{name}` converter is not callable: {caller}" ) if len(app.config["nb_custom_formats"][name]) == 2: app.config["nb_custom_formats"][name].append(None) elif not isinstance(app.config["nb_custom_formats"][name][2], bool): raise MystNbConfigError( f"`nb_custom_formats.{name}.commonmark_only` arg is not boolean" ) if not isinstance(app.config["nb_render_key"], str): raise MystNbConfigError("`nb_render_key` is not a string") if app.config["nb_output_stderr"] not in [ "show", "remove", "remove-warn", "warn", "error", "severe", ]: raise MystNbConfigError( "`nb_output_stderr` not one of: " "'show', 'remove', 'remove-warn', 'warn', 'error', 'severe'") # try loading notebook output renderer load_renderer(app.config["nb_render_plugin"])
def import_object(self, objname, source=None): # type: (str, unicode) -> Any """Import an object from a 'module.name' string.""" return import_object(objname, source=None)