def recurse(cls): if not show_builtins and cls in py_builtins: return if not private_bases and cls.__name__.startswith('_'): return nodename = self.class_name(cls, parts) fullname = self.class_name(cls, 0) # Use first line of docstring as tooltip, if available tooltip = None try: if cls.__doc__: enc = ModuleAnalyzer.for_module(cls.__module__).encoding doc = cls.__doc__.strip().split("\n")[0] if not isinstance(doc, text_type): doc = force_decode(doc, enc) if doc: tooltip = '"%s"' % doc.replace('"', '\\"') except Exception: # might raise AttributeError for strange classes pass baselist = [] all_classes[cls] = (nodename, fullname, baselist, tooltip) for base in cls.__bases__: if not show_builtins and base in py_builtins: continue if not private_bases and base.__name__.startswith('_'): continue baselist.append(self.class_name(base, parts)) if base not in all_classes: recurse(base)
def has_tag(modname, fullname, docname, refname): entry = env._viewcode_modules.get(modname, None) # type: ignore if entry is False: return code_tags = app.emit_firstresult('viewcode-find-source', modname) if code_tags is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False # type: ignore return if not isinstance(analyzer.code, text_type): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code analyzer.find_tags() tags = analyzer.tags else: code, tags = code_tags if entry is None or entry[0] != code: entry = code, tags, {}, refname env._viewcode_modules[modname] = entry # type: ignore _, tags, used, _ = entry if fullname in tags: used[fullname] = docname return True
def test_ModuleAnalyzer_find_tags(): code = ('class Foo(object):\n' # line: 1 ' """class Foo!"""\n' ' def __init__(self):\n' ' pass\n' '\n' ' def bar(self, arg1, arg2=True, *args, **kwargs):\n' ' """method Foo.bar"""\n' ' pass\n' '\n' ' class Baz(object):\n' ' def __init__(self):\n' # line: 11 ' pass\n' '\n' 'def qux():\n' ' """function baz"""\n' ' pass\n' '\n' '@decorator\n' 'def quux():\n' ' pass\n') analyzer = ModuleAnalyzer.for_string(code, 'module') tags = analyzer.find_tags() assert set(tags.keys()) == {'Foo', 'Foo.__init__', 'Foo.bar', 'Foo.Baz', 'Foo.Baz.__init__', 'qux', 'quux'} assert tags['Foo'] == ('class', 1, 13) # type, start, end assert tags['Foo.__init__'] == ('def', 3, 5) assert tags['Foo.bar'] == ('def', 6, 9) assert tags['Foo.Baz'] == ('class', 10, 13) assert tags['Foo.Baz.__init__'] == ('def', 11, 13) assert tags['qux'] == ('def', 14, 17) assert tags['quux'] == ('def', 18, 21) # decorator
def make_rst(self): app = import_object(self.arguments[0]) for method, path, endpoint in get_routes(app): try: blueprint, endpoint_internal = endpoint.split('.') if blueprint in self.undoc_blueprints: continue except ValueError: pass # endpoint is not within a blueprint if self.endpoints and endpoint not in self.endpoints: continue if endpoint in self.undoc_endpoints: continue try: static_url_path = app.static_url_path # Flask 0.7 or higher except AttributeError: static_url_path = app.static_path # Flask 0.6 or under if ('undoc-static' in self.options and endpoint == 'static' and path == static_url_path + '/(path:filename)'): continue view = app.view_functions[endpoint] docstring = view.__doc__ or '' if hasattr(view, 'view_class'): meth_func = getattr(view.view_class, method.lower(), None) if meth_func and meth_func.__doc__: docstring = meth_func.__doc__ if not isinstance(docstring, unicode): analyzer = ModuleAnalyzer.for_module(view.__module__) docstring = force_decode(docstring, analyzer.encoding) if not docstring and 'include-empty-docstring' not in self.options: continue docstring = prepare_docstring(docstring) for line in http_directive(method, path, docstring): yield line
def run(self): document = self.state.document filename = self.arguments[0] if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', line=self.lineno)] env = document.settings.env if filename.startswith('/') or filename.startswith(os.sep): rel_fn = filename[1:] else: docdir = path.dirname(env.doc2path(env.docname, base=None)) rel_fn = path.normpath(path.join(docdir, filename)) try: fn = path.join(env.srcdir, rel_fn) except UnicodeDecodeError: # the source directory is a bytestring with non-ASCII characters; # let's try to encode the rel_fn in the file system encoding rel_fn = rel_fn.encode(sys.getfilesystemencoding()) fn = path.join(env.srcdir, rel_fn) if 'pyobject' in self.options and 'lines' in self.options: return [document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno)] encoding = self.options.get('encoding', env.config.source_encoding) try: f = codecs.open(fn, 'rU', encoding) lines = f.readlines() f.close() except (IOError, OSError): return [document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno)] except UnicodeError: return [document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename))] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(fn, '') tags = analyzer.find_tags() if objectname not in tags: return [document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno)] else: lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] lines = [lines[i] for i in linelist]
def test_ModuleAnalyzer_for_file(): analyzer = ModuleAnalyzer.for_string(SPHINX_MODULE_PATH, 'sphinx') assert analyzer.modname == 'sphinx' assert analyzer.srcname == '<string>' if PY2: assert analyzer.encoding == 'ascii' else: assert analyzer.encoding is None
def test_ModuleAnalyzer_for_string(): analyzer = ModuleAnalyzer.for_string('print("Hello world")', 'module_name') assert analyzer.modname == 'module_name' assert analyzer.srcname == '<string>' if PY2: assert analyzer.encoding == 'ascii' else: assert analyzer.encoding is None
def inspect_routes(self, app): """Inspects the views of Flask. :param app: The Flask application. :returns: 4-tuple like ``(method, paths, view_func, view_doc)`` """ if self.endpoints: routes = itertools.chain(*[get_routes(app, endpoint, self.order) for endpoint in self.endpoints]) else: routes = get_routes(app, order=self.order) for method, paths, endpoint in routes: try: blueprint, _, endpoint_internal = endpoint.rpartition('.') if self.blueprints and blueprint not in self.blueprints: continue if blueprint in self.undoc_blueprints: continue except ValueError: pass # endpoint is not within a blueprint if endpoint in self.undoc_endpoints: continue try: static_url_path = app.static_url_path # Flask 0.7 or higher except AttributeError: static_url_path = app.static_path # Flask 0.6 or under if ('undoc-static' in self.options and endpoint == 'static' and static_url_path + '/(path:filename)' in paths): continue view = app.view_functions[endpoint] if self.modules and view.__module__ not in self.modules: continue if self.undoc_modules and view.__module__ in self.modules: continue view_class = getattr(view, 'view_class', None) if view_class is None: view_func = view else: view_func = getattr(view_class, method.lower(), None) view_doc = view.__doc__ or '' if view_func and view_func.__doc__: view_doc = view_func.__doc__ if not isinstance(view_doc, six.text_type): analyzer = ModuleAnalyzer.for_module(view.__module__) view_doc = force_decode(view_doc, analyzer.encoding) if not view_doc and 'include-empty-docstring' not in self.options: continue yield (method, paths, view_func, view_doc)
def run(self): document = self.state.document if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', line=self.lineno)] env = document.settings.env rel_filename, filename = env.relfn2path(self.arguments[0]) if 'pyobject' in self.options and 'lines' in self.options: return [document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno)] encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) f = None try: f = codecs.StreamReaderWriter(open(filename, 'rb'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() except (IOError, OSError): return [document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno)] except UnicodeError: return [document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename))] finally: if f is not None: f.close() objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(filename, '') tags = analyzer.find_tags() if objectname not in tags: return [document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno)] else: lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] # just ignore nonexisting lines nlines = len(lines) lines = [lines[i] for i in linelist if i < nlines] if not lines: return [document.reporter.warning( 'Line spec %r: no lines pulled from include file %r' % (linespec, filename), line=self.lineno)]
def create_node(self, filename, rel_filename, lang): document = self.state.document env = document.settings.env # Read the contents of the file to include encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) try: f = codecs.StreamReaderWriter(open(filename, 'rb'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() f.close() except (IOError, OSError): print_err('Failed to read %r' % filename) return [document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno)] except UnicodeError: print_err('Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename)) return [document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename))] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(filename, '') tags = analyzer.find_tags() if objectname not in tags: return [document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno)] else: lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] # just ignore nonexisting lines nlines = len(lines) lines = [lines[i] for i in linelist if i < nlines] if not lines: return [document.reporter.warning( 'Line spec %r: no lines pulled from include file %r' % (linespec, filename), line=self.lineno)]
def test_ModuleAnalyzer_for_module_in_egg(rootdir): try: path = rootdir / 'test-pycode-egg' / 'sample-0.0.0-py3.7.egg' sys.path.insert(0, path) analyzer = ModuleAnalyzer.for_module('sample') docs = analyzer.find_attr_docs() assert docs == {('', 'CONSTANT'): ['constant on sample.py', '']} finally: sys.path.pop(0)
def get_attr_docs(self, ty): # this reaches into some undocumented stuff in sphinx to # extract the attribute documentation. analyzer = ModuleAnalyzer.for_module(ty.__module__) module_attrs = analyzer.find_attr_docs() # (scope is broken!) # Make sure we can split lines for type docs on long lines attrs_docs = {} for k, v in module_attrs.iteritems(): attrs_docs[k[1]] = "\n".join(v).strip() return attrs_docs
def _str_member_list(self): """ Generate a member listing, autosummary:: table . """ out = [] for name in ['Attributes', 'Methods']: if not self[name]: continue out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '%s.' % prefix elif hasattr(self, 'name') and self.name: # This is a class: Use its name to make sure Sphinx can find # the methods and attributes prefix = '%s.' % (self.name) else: prefix = '' autosum = [] for param, _, desc in self[name]: param = param.strip() if self._obj: # Fake the attribute as a class property, but do not touch # methods if (hasattr(self._obj, '__module__') and not (hasattr(self._obj, param) and callable(getattr(self._obj, param)))): # Do not override directly provided docstrings if not len(''.join(desc).strip()): analyzer = ModuleAnalyzer.for_module(self._obj.__module__) desc = analyzer.find_attr_docs().get((self._obj.__name__, param), '') # Only fake a property if we got a docstring if len(''.join(desc).strip()): setattr(self._obj, param, property(lambda self: None, doc='\n'.join(desc))) if len(prefix): autosum += [" ~%s%s" % (prefix, param)] else: autosum += [" %s" % param] if autosum: out += ['.. autosummary::', ''] out += autosum out += [''] return out
def parse_override_docs(namespace, version): import_namespace(namespace, version) try: ma = ModuleAnalyzer.for_module("pgi.overrides.%s" % namespace) except PycodeError: return {} docs = {} for key, value in ma.find_attr_docs().iteritems(): docs[namespace + "." + ".".join(filter(None, key))] = "\n".join(value) return docs
def make_rst(self, qref=False): app = import_object(self.arguments[0]) if self.endpoints: routes = itertools.chain(*[get_routes(app, endpoint, self.order) for endpoint in self.endpoints]) else: routes = get_routes(app, order=self.order) for method, paths, endpoint in routes: try: blueprint, _, endpoint_internal = endpoint.rpartition('.') if self.blueprints and blueprint not in self.blueprints: continue if blueprint in self.undoc_blueprints: continue except ValueError: pass # endpoint is not within a blueprint if endpoint in self.undoc_endpoints: continue try: static_url_path = app.static_url_path # Flask 0.7 or higher except AttributeError: static_url_path = app.static_path # Flask 0.6 or under if ('undoc-static' in self.options and endpoint == 'static' and static_url_path + '/(path:filename)' in paths): continue view = app.view_functions[endpoint] if self.modules and view.__module__ not in self.modules: continue if self.undoc_modules and view.__module__ in self.modules: continue docstring = view.__doc__ or '' if hasattr(view, 'view_class'): meth_func = getattr(view.view_class, method.lower(), None) if meth_func and meth_func.__doc__: docstring = meth_func.__doc__ if not isinstance(docstring, six.text_type): analyzer = ModuleAnalyzer.for_module(view.__module__) docstring = force_decode(docstring, analyzer.encoding) if not docstring and 'include-empty-docstring' not in self.options: continue docstring = prepare_docstring(docstring) if qref == True: for path in paths: row = quickref_directive(method, path, docstring) yield row else: for line in http_directive(method, paths, docstring): yield line
def properties(self): if self._cls is None: return [] analyzer = ModuleAnalyzer.for_module(self._cls.__module__) instance_members = set([attr_name for (class_name, attr_name) in analyzer.find_attr_docs().keys() if class_name == self._cls.__name__]) class_members = set([name for name, func in getattr(self._cls, '__dict__').iteritems() if not name.startswith('_') and (func is None or inspect.isdatadescriptor(func))]) return instance_members | class_members
def get_iattributes(obj): items = [] name = obj.__name__ obj_attr = dir(obj) analyzer = ModuleAnalyzer.for_module(obj.__module__) attr_docs = analyzer.find_attr_docs() for pair, doc in attr_docs.iteritems(): if name!=pair[0]: continue if not pair[1] in obj_attr: items.append({"name":pair[1], "doc":'\n '.join(doc)}) items.sort(key=lambda d: d["name"]) return items
def literalinclude_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """Like .. include:: :literal:, but only warns if the include file is not found.""" if not state.document.settings.file_insertion_enabled: return [state.document.reporter.warning('File insertion disabled', line=lineno)] env = state.document.settings.env rel_fn = arguments[0] source_dir = path.dirname(path.abspath(state_machine.input_lines.source( lineno - state_machine.input_offset - 1))) fn = path.normpath(path.join(source_dir, rel_fn)) if 'pyobject' in options and 'lines' in options: return [state.document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=lineno)] encoding = options.get('encoding', env.config.source_encoding) try: f = codecs.open(fn, 'r', encoding) #f = open(fn, 'rb') lines = f.readlines() f.close() except (IOError, OSError): return [state.document.reporter.warning( 'Include file %r not found or reading it failed' % arguments[0], line=lineno)] except UnicodeError: return [state.document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, arguments[0]))] objectname = options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(fn, '') tags = analyzer.find_tags() if objectname not in tags: return [state.document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, arguments[0]), line=lineno)] else: lines = lines[tags[objectname][1] - 1 : tags[objectname][2] - 1] linespec = options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [state.document.reporter.warning(str(err), line=lineno)] lines = [lines[i] for i in linelist]
def has_tag(modname, fullname, docname): entry = env._viewcode_modules.get(modname, None) if entry is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False return analyzer.find_tags() entry = analyzer.code.decode(analyzer.encoding), analyzer.tags, {} env._viewcode_modules[modname] = entry elif entry is False: return code, tags, used = entry if fullname in tags: used[fullname] = docname return True
def make_rst(self): app = import_object(self.arguments[0]) for method, path, target in get_routes(app): endpoint = target.name or target.callback.__name__ if self.endpoints and endpoint not in self.endpoints: continue if endpoint in self.undoc_endpoints: continue view = target.callback docstring = view.__doc__ or '' if not isinstance(docstring, six.text_type): analyzer = ModuleAnalyzer.for_module(view.__module__) docstring = force_decode(docstring, analyzer.encoding) if not docstring and 'include-empty-docstring' not in self.options: continue docstring = prepare_docstring(docstring) for line in http_directive(method, path, docstring): yield line
def pyobject_filter(self, lines, location=None): # type: (List[unicode], Any) -> List[unicode] pyobject = self.options.get('pyobject') if pyobject: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(self.filename, '') tags = analyzer.find_tags() if pyobject not in tags: raise ValueError(__('Object named %r not found in include file %r') % (pyobject, self.filename)) else: start = tags[pyobject][1] end = tags[pyobject][2] lines = lines[start - 1:end] if 'lineno-match' in self.options: self.lineno_start = start return lines
def generate(self, more_content=None, real_modname=None, check_module=False, all_members=False): """Generate reST for the object given by *self.name*, and possibly for its members. If *more_content* is given, include that content. If *real_modname* is given, use that module name to find attribute docs. If *check_module* is True, only generate if the object is defined in the module name it is imported from. If *all_members* is True, document all members. """ if not self.parse_name(): # need a module to import self.directive.warn( 'don\'t know which module to import for autodocumenting ' '%r (try placing a "module" or "currentmodule" directive ' 'in the document, or giving an explicit module name)' % self.name) return # now, import the module and get object to document if not self.import_object(): return # If there is no real module defined, figure out which to use. # The real module is used in the module analyzer to look up the module # where the attribute documentation would actually be found in. # This is used for situations where you have a module that collects the # functions and classes of internal submodules. self.real_modname = real_modname or self.get_real_modname() # try to also get a source code analyzer for attribute docs try: self.analyzer = ModuleAnalyzer.for_module(self.real_modname) # parse right now, to get PycodeErrors on parsing (results will # be cached anyway) self.analyzer.find_attr_docs() except PycodeError, err: self.env.app.debug('[autodoc] module analyzer failed: %s', err) # no source file -- e.g. for builtin and C modules self.analyzer = None # at least add the module.__file__ as a dependency if hasattr(self.module, '__file__') and self.module.__file__: self.directive.filename_set.add(self.module.__file__)
def test_ModuleAnalyzer_find_attr_docs(): code = ('class Foo(object):\n' ' """class Foo!"""\n' ' #: comment before attr1\n' ' attr1 = None\n' ' attr2 = None # attribute comment for attr2 (without colon)\n' ' attr3 = None #: attribute comment for attr3\n' ' attr4 = None #: long attribute comment\n' ' #: for attr4\n' ' #: comment before attr5\n' ' attr5 = None #: attribute comment for attr5\n' ' attr6, attr7 = 1, 2 #: this comment is ignored\n' '\n' ' def __init__(self):\n' ' self.attr8 = None #: first attribute comment (ignored)\n' ' self.attr8 = None #: attribute comment for attr8\n' ' #: comment before attr9\n' ' self.attr9 = None #: comment after attr9\n' ' "string after attr9"\n' '\n' ' def bar(self, arg1, arg2=True, *args, **kwargs):\n' ' """method Foo.bar"""\n' ' pass\n' '\n' 'def baz():\n' ' """function baz"""\n' ' pass\n') analyzer = ModuleAnalyzer.for_string(code, 'module') docs = analyzer.find_attr_docs() assert set(docs) == {('Foo', 'attr1'), ('Foo', 'attr3'), ('Foo', 'attr4'), ('Foo', 'attr5'), ('Foo', 'attr8'), ('Foo', 'attr9')} assert docs[('Foo', 'attr1')] == ['comment before attr1', ''] assert docs[('Foo', 'attr3')] == ['attribute comment for attr3', ''] assert docs[('Foo', 'attr4')] == ['long attribute comment', ''] assert docs[('Foo', 'attr4')] == ['long attribute comment', ''] assert docs[('Foo', 'attr5')] == ['attribute comment for attr5', ''] assert docs[('Foo', 'attr8')] == ['attribute comment for attr8', ''] assert docs[('Foo', 'attr9')] == ['string after attr9', '']
def register_source(app, env, modname): """ Registers source code. :param app: application :param env: environment of the plugin :param modname: name of the module to load :return: True if the code is registered successfully, False otherwise """ entry = env._viewcode_modules.get(modname, None) if entry is False: print(f"[{modname}] Entry is false for ") return False code_tags = app.emit_firstresult("viewcode-find-source", modname) if code_tags is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception as ex: # pylint: disable=broad-except logger.info( "Module \"%s\" could not be loaded. Full source will not be available. \"%s\"", modname, ex) env._viewcode_modules[modname] = False return False if not isinstance(analyzer.code, str): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code analyzer.find_tags() tags = analyzer.tags else: code, tags = code_tags if entry is None or entry[0] != code: entry = code, tags, {}, "" env._viewcode_modules[modname] = entry return True
def has_tag(modname, fullname, docname): entry = env._viewcode_modules.get(modname, None) try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False return if not isinstance(analyzer.code, str): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code if entry is None or entry[0] != code: analyzer.find_tags() entry = code, analyzer.tags, {} env._viewcode_modules[modname] = entry elif entry is False: return code, tags, used = entry if fullname in tags: used[fullname] = docname return True
def has_tag(modname, fullname, docname): entry = env._viewcode_modules.get(modname, None) if entry is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False return analyzer.find_tags() if not isinstance(analyzer.code, unicode): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code entry = code, analyzer.tags, {} env._viewcode_modules[modname] = entry elif entry is False: return code, tags, used = entry if fullname in tags: used[fullname] = docname return True
def pyobject_filter( self, lines: List[str], location: Optional[Tuple[str, int]] = None) -> List[str]: pyobject = self.options.get('pyobject') if pyobject: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(self.filename, '') tags = analyzer.find_tags() if pyobject not in tags: raise ValueError( __('Object named %r not found in include file %r') % (pyobject, self.filename)) else: start = tags[pyobject][1] end = tags[pyobject][2] lines = lines[start - 1:end] if 'lineno-match' in self.options: self.lineno_start = start return lines
def has_tag(modname, fullname, docname, refname): entry = env._viewcode_modules.get(modname, None) # type: ignore try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False # type: ignore return if not isinstance(analyzer.code, text_type): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code if entry is False: return elif entry is None or entry[0] != code: analyzer.find_tags() entry = code, analyzer.tags, {}, refname env._viewcode_modules[modname] = entry # type: ignore _, tags, used, _ = entry if fullname in tags: used[fullname] = docname return True
def has_tag(modname, fullname, docname, refname): entry = env._viewcode_modules.get(modname, None) if entry is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False return analyzer.find_tags() if not isinstance(analyzer.code, text_type): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code entry = code, analyzer.tags, {}, refname env._viewcode_modules[modname] = entry elif entry is False: return _, tags, used, _ = entry if fullname in tags: used[fullname] = docname return True
def import_ivar_by_name(name: str, prefixes: List[str] = [None ]) -> Tuple[str, Any, Any, str]: """Import an instance variable that has the given *name*, under one of the *prefixes*. The first name that succeeds is used. """ try: name, attr = name.rsplit(".", 1) real_name, obj, parent, modname = import_by_name(name, prefixes) qualname = real_name.replace(modname + ".", "") analyzer = ModuleAnalyzer.for_module( getattr(obj, '__module__', modname)) analyzer.analyze() # check for presence in `annotations` to include dataclass attributes if (qualname, attr) in analyzer.attr_docs or (qualname, attr) in analyzer.annotations: return real_name + "." + attr, INSTANCEATTR, obj, modname except (ImportError, ValueError, PycodeError): pass raise ImportError
def make_rst(self): app = import_object(self.arguments[0]) for method, path, endpoint in get_routes(app): try: blueprint, endpoint_internal = endpoint.split('.') if self.blueprints and blueprint not in self.blueprints: continue if blueprint in self.undoc_blueprints: continue except ValueError: pass # endpoint is not within a blueprint if self.endpoints and endpoint not in self.endpoints: continue if endpoint in self.undoc_endpoints: continue try: static_url_path = app.static_url_path # Flask 0.7 or higher except AttributeError: static_url_path = app.static_path # Flask 0.6 or under if ('undoc-static' in self.options and endpoint == 'static' and path == static_url_path + '/(path:filename)'): continue view = app.view_functions[endpoint] docstring = view.__doc__ or '' if hasattr(view, 'view_class'): meth_func = getattr(view.view_class, method.lower(), None) if meth_func and meth_func.__doc__: docstring = meth_func.__doc__ if not isinstance(docstring, six.text_type): analyzer = ModuleAnalyzer.for_module(view.__module__) docstring = force_decode(docstring, analyzer.encoding) if not docstring and 'include-empty-docstring' not in self.options: continue docstring = prepare_docstring(docstring) for line in http_directive(method, path, docstring): yield line
def scan(self, imported_members: bool) -> List[str]: members = [] analyzer = ModuleAnalyzer.for_module(self.object.__name__) attr_docs = analyzer.find_attr_docs() for name in members_of(self.object, self.app.config): try: value = safe_getattr(self.object, name) except AttributeError: value = None objtype = self.get_object_type(name, value) if self.is_skipped(name, value, objtype): continue try: if ('', name) in attr_docs: imported = False elif inspect.ismodule(value): imported = True elif safe_getattr(value, '__module__') != self.object.__name__: imported = True else: imported = False except AttributeError: imported = False respect_module_all = not self.app.config.autosummary_ignore_module_all if imported_members: # list all members up members.append(name) elif imported is False: # list not-imported members members.append(name) elif '__all__' in dir(self.object) and respect_module_all: # list members that have __all__ set members.append(name) return members
def _update_tags(env, modname, fullname=None): # Analyze modname code and return True if fullname is present. Also caches # the analysis results in _viewcode_modules. entry = env._viewcode_modules.get(modname, None) if entry is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False return analyzer.find_tags() if not isinstance(analyzer.code, str): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code entry = code, analyzer.tags, {} env._viewcode_modules[modname] = entry elif entry is False: return code, tags, used = entry if fullname is not None and fullname in tags: used[fullname] = env.docname return True
def _update_tags(env, modname, fullname=None): # Analyze modname code and return True if fullname is present. Also caches # the analysis results in _viewcode_modules. entry = env._viewcode_modules.get(modname, None) if entry is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False return analyzer.find_tags() if not isinstance(analyzer.code, unicode): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code entry = code, analyzer.tags, {} env._viewcode_modules[modname] = entry elif entry is False: return code, tags, used = entry if fullname is not None and fullname in tags: used[fullname] = env.docname return True
def recurse(cls): # type: (Any) -> None if not show_builtins and cls in py_builtins: return if not private_bases and cls.__name__.startswith('_'): return nodename = self.class_name(cls, parts, aliases) fullname = self.class_name(cls, 0, aliases) # Use first line of docstring as tooltip, if available tooltip = None try: if cls.__doc__: enc = ModuleAnalyzer.for_module(cls.__module__).encoding doc = cls.__doc__.strip().split("\n")[0] if not isinstance(doc, text_type): doc = force_decode(doc, enc) if doc: tooltip = '"%s"' % doc.replace('"', '\\"') except Exception: # might raise AttributeError for strange classes pass baselist = [] # type: List[unicode] all_classes[cls] = (nodename, fullname, baselist, tooltip) if fullname in top_classes: return for base in cls.__bases__: if not show_builtins and base in py_builtins: continue if not private_bases and base.__name__.startswith('_'): continue baselist.append(self.class_name(base, parts, aliases)) if base not in all_classes: recurse(base)
def run(self): document = self.state.document if not document.settings.file_insertion_enabled: return [ document.reporter.warning('File insertion disabled', line=self.lineno) ] env = document.settings.env rel_filename, filename = env.relfn2path(self.arguments[0]) if 'pyobject' in self.options and 'lines' in self.options: return [ document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno) ] encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) try: f = codecs.StreamReaderWriter(open(filename, 'rb'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() f.close() except (IOError, OSError): return [ document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno) ] except UnicodeError: return [ document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename)) ] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(filename, '') tags = analyzer.find_tags() if objectname not in tags: return [ document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno) ] else: lines = lines[tags[objectname][1] - 1:tags[objectname][2] - 1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] lines = [lines[i] for i in linelist]
def visit_module(self, module) -> bool: """ Visit the specified module and generate doc for it, and if it is a package module also any submodules. Does nothing if the specified module has been skipped. :param module: The module object. """ mod=module modulename = mod.__name__ skipped = self.config['skip_module_regex'] and re.match(self.config['skip_module_regex'], modulename) skip_on_docstring_regex = self.config['skip_on_docstring_regex'] if skip_on_docstring_regex: if mod.__doc__ and re.search(skip_on_docstring_regex, mod.__doc__): skipped=True logger.info(f'{self} Visiting module: {modulename} {"(skipped)" if skipped else ""}') if skipped: return False # TODO: call skip here too # use a flat structure for the global modules list if we're not documenting submodules #TODO: if 'module' not in doc_module_member_types: allmodules.append(modulename) # TODO: do something with this # can't use getmembers for getting submodules of packages membersByType = {t:[] for t in doc_module_member_types} if hasattr(mod, '__path__'): # if this is a package (i.e. contains other modules) for _, submodulename, _ in pkgutil.iter_modules(mod.__path__, prefix=modulename+ '.'): submodule = importlib.import_module(submodulename.lstrip('.')) if not self.visit_module(submodule): continue if 'module' in membersByType: membersByType['module'].append( (submodulename, submodule, submodule.__doc__)) moduleall = set(getattr(mod, '__all__', [])) # It'd be possible to iterate over mod.__dict__.items() but there are some subtlies around how to decide # what to document (including the __all__ handling in get_object_members and the fact that we have to use the # Sphinx ModuleAnalyzer if we want to get docstrings for attributes which filter_members does), # .... which we can avoid reimplementing by just using the existing code. # This was implemented against Sphinx 2.2.0 # unfortunately the FakeDirective from autosummary is a bit too fake to actually work, so make it slightly less so class FakeBuildEnvironment(object): def __init__(self, app): self.app, self.config = app, app.config directive = sphinx.ext.autosummary.FakeDirective() directive.env = FakeBuildEnvironment(self.app) documenterclass = sphinx.ext.autosummary.get_documenter(app=self.app, obj=module, parent=None) documenter = documenterclass(directive, modulename) if not documenter.parse_name() or not documenter.import_object(): assert False, 'documenter failed to import module %s'%module # must call this before filter_members will work correctly documenter.analyzer = ModuleAnalyzer.for_module(modulename) attr_docs = documenter.analyzer.find_attr_docs() # find out which members are documentable; use __dict__.items() to retain the ordering info, but delegate to # autodoc get_object_members for its __all__ handling logic permittedmembers = set(memberinfo[0] for memberinfo in documenter.get_object_members(want_all=True)[1]) members = [(mname,m) for mname, m in mod.__dict__.items() if mname in permittedmembers] # TODO: ordering c.f. autodoc_member_order for (mname, m, isattr) in documenter.filter_members(members, want_all=True): logger.debug(' visiting member: %s'%mname) if not isattr and not self.app.config['autodoc_default_options'].get('imported-members',False) and getattr(m, '__module__', modulename) != modulename: # need to immediately rule out the majority of items which aren't really defined in this module; # data attributes don't have module set on them so don't do the check for those else we'd miss stuff that # should be included continue if isattr: mtype = 'data' elif inspect.isclass(m): if isinstance(m, BaseException): mtype = 'exception' else: mtype = 'class' elif inspect.ismodule(m): continue # submodules are handled above, so anything here will be an imported module that we don't want elif inspect.isfunction(m): mtype = 'function' else: logger.debug(f'Ignoring unknown member type: {mname} {repr(m)}') continue if ('', mname) in attr_docs: docstring = '\n'.join(attr_docs[('', mname)]) else: docstring = getattr(m, '__doc__', None) if skip_on_docstring_regex: if docstring and re.search(skip_on_docstring_regex, docstring): logger.info(f'{self} Skipping {modulename}.{mname} due to its docstring matching the skip_on_docstring_regex') continue membersByType[mtype].append((modulename+'.'+mname, m, docstring)) logger.debug('%s Visiting module %s with members: %s', self, modulename, membersByType) rst = self.generate_module_rst(mod, membersByType) if not rst: logger.info(f'{self} No .rst generated for {modulename}') return rstfile = self.config['generated_source_dir']+f'/{mod.__name__}.rst' self.rst_files_generated.add(os.path.basename(rstfile)) if os.path.exists(rstfile): with open(rstfile, 'r', encoding='utf-8') as f: if f.read() == rst: return True # nothing to do if not self.config['overwrite_generated_source_rsts']: logger.info(f'{self} Skipping overwrite of {mod.__name__}.rst due to overwrite_generated_source_rsts=True') return True with open(rstfile, 'w', encoding='utf-8') as f: f.write(rst) return True # indicates this module isn't skipped
def test_ModuleAnalyzer_find_attr_docs(): code = ('class Foo(object):\n' ' """class Foo!"""\n' ' #: comment before attr1\n' ' attr1 = None\n' ' attr2 = None # attribute comment for attr2 (without colon)\n' ' attr3 = None #: attribute comment for attr3\n' ' attr4 = None #: long attribute comment\n' ' #: for attr4\n' ' #: comment before attr5\n' ' attr5 = None #: attribute comment for attr5\n' ' attr6, attr7 = 1, 2 #: this comment is ignored\n' '\n' ' def __init__(self):\n' ' self.attr8 = None #: first attribute comment (ignored)\n' ' self.attr8 = None #: attribute comment for attr8\n' ' #: comment before attr9\n' ' self.attr9 = None #: comment after attr9\n' ' "string after attr9"\n' '\n' ' def bar(self, arg1, arg2=True, *args, **kwargs):\n' ' """method Foo.bar"""\n' ' pass\n' '\n' 'def baz():\n' ' """function baz"""\n' ' pass\n' '\n' 'class Qux: attr1 = 1; attr2 = 2') analyzer = ModuleAnalyzer.for_string(code, 'module') docs = analyzer.find_attr_docs() assert set(docs) == {('Foo', 'attr1'), ('Foo', 'attr3'), ('Foo', 'attr4'), ('Foo', 'attr5'), ('Foo', 'attr6'), ('Foo', 'attr7'), ('Foo', 'attr8'), ('Foo', 'attr9')} assert docs[('Foo', 'attr1')] == ['comment before attr1', ''] assert docs[('Foo', 'attr3')] == ['attribute comment for attr3', ''] assert docs[('Foo', 'attr4')] == ['long attribute comment', ''] assert docs[('Foo', 'attr4')] == ['long attribute comment', ''] assert docs[('Foo', 'attr5')] == ['attribute comment for attr5', ''] assert docs[('Foo', 'attr6')] == ['this comment is ignored', ''] assert docs[('Foo', 'attr7')] == ['this comment is ignored', ''] assert docs[('Foo', 'attr8')] == ['attribute comment for attr8', ''] assert docs[('Foo', 'attr9')] == ['string after attr9', ''] assert analyzer.tagorder == { 'Foo': 0, 'Foo.__init__': 8, 'Foo.attr1': 1, 'Foo.attr2': 2, 'Foo.attr3': 3, 'Foo.attr4': 4, 'Foo.attr5': 5, 'Foo.attr6': 6, 'Foo.attr7': 7, 'Foo.attr8': 10, 'Foo.attr9': 12, 'Foo.bar': 13, 'baz': 14, 'Qux': 15, 'Qux.attr1': 16, 'Qux.attr2': 17 }
def test_ModuleAnalyzer_for_module(): analyzer = ModuleAnalyzer.for_module('sphinx') assert analyzer.modname == 'sphinx' assert analyzer.srcname == SPHINX_MODULE_PATH assert analyzer.encoding == 'utf-8'
def run(self): document = self.state.document if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', line=self.lineno)] env = document.settings.env rel_filename, filename = env.relfn2path(self.arguments[0]) if 'pyobject' in self.options and 'lines' in self.options: return [document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno)] if 'lineno-match' in self.options and 'lineno-start' in self.options: return [document.reporter.warning( 'Cannot use both "lineno-match" and "lineno-start"', line=self.lineno)] if 'lineno-match' in self.options and \ (set(['append', 'prepend']) & set(self.options.keys())): return [document.reporter.warning( 'Cannot use "lineno-match" and "append" or "prepend"', line=self.lineno)] encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) lines = self.read_with_encoding(filename, document, codec_info, encoding) if lines and not isinstance(lines[0], string_types): return lines diffsource = self.options.get('diff') if diffsource is not None: tmp, fulldiffsource = env.relfn2path(diffsource) difflines = self.read_with_encoding(fulldiffsource, document, codec_info, encoding) if not isinstance(difflines[0], string_types): return difflines diff = unified_diff( difflines, lines, diffsource, self.arguments[0]) lines = list(diff) linenostart = self.options.get('lineno-start', 1) objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(filename, '') tags = analyzer.find_tags() if objectname not in tags: return [document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno)] else: lines = lines[tags[objectname][1]-1: tags[objectname][2]-1] if 'lineno-match' in self.options: linenostart = tags[objectname][1] linespec = self.options.get('lines') if linespec: try: linelist = parselinenos(linespec, len(lines)) except ValueError as err: return [document.reporter.warning(str(err), line=self.lineno)] if 'lineno-match' in self.options: # make sure the line list is not "disjoint". previous = linelist[0] for line_number in linelist[1:]: if line_number == previous + 1: previous = line_number continue return [document.reporter.warning( 'Cannot use "lineno-match" with a disjoint set of ' '"lines"', line=self.lineno)] linenostart = linelist[0] + 1 # just ignore non-existing lines lines = [lines[i] for i in linelist if i < len(lines)] if not lines: return [document.reporter.warning( 'Line spec %r: no lines pulled from include file %r' % (linespec, filename), line=self.lineno)] linespec = self.options.get('emphasize-lines') if linespec: try: hl_lines = [x+1 for x in parselinenos(linespec, len(lines))] except ValueError as err: return [document.reporter.warning(str(err), line=self.lineno)] else: hl_lines = None startafter = self.options.get('start-after') endbefore = self.options.get('end-before') if startafter is not None or endbefore is not None: use = not startafter res = [] for line_number, line in enumerate(lines): if not use and startafter and startafter in line: if 'lineno-match' in self.options: linenostart += line_number + 1 use = True elif use and endbefore and endbefore in line: break elif use: res.append(line) lines = res prepend = self.options.get('prepend') if prepend: lines.insert(0, prepend + '\n') append = self.options.get('append') if append: lines.append(append + '\n') text = ''.join(lines) if self.options.get('tab-width'): text = text.expandtabs(self.options['tab-width']) retnode = nodes.literal_block(text, text, source=filename) set_source_info(self, retnode) if diffsource: # if diff is set, set udiff retnode['language'] = 'udiff' if 'language' in self.options: retnode['language'] = self.options['language'] retnode['linenos'] = 'linenos' in self.options or \ 'lineno-start' in self.options or \ 'lineno-match' in self.options retnode['classes'] += self.options.get('class', []) extra_args = retnode['highlight_args'] = {} if hl_lines is not None: extra_args['hl_lines'] = hl_lines extra_args['linenostart'] = linenostart env.note_dependency(rel_filename) caption = self.options.get('caption') if caption is not None: if not caption: caption = self.arguments[0] try: retnode = container_wrapper(self, retnode, caption) except ValueError as exc: document = self.state.document errmsg = _('Invalid caption: %s' % exc[0][0].astext()) return [document.reporter.warning(errmsg, line=self.lineno)] # retnode will be note_implicit_target that is linked from caption and numref. # when options['name'] is provided, it should be primary ID. self.add_name(retnode) return [retnode]
def get_class_members(subject: Any, objpath: List[str], attrgetter: Callable) -> Dict[str, "ObjectMember"]: """Get members and attributes of target class.""" from sphinx.ext.autodoc import INSTANCEATTR, ObjectMember # the members directly defined in the class obj_dict = attrgetter(subject, '__dict__', {}) members = {} # type: Dict[str, ObjectMember] # enum members if isenumclass(subject): for name, value in subject.__members__.items(): if name not in members: members[name] = ObjectMember(name, value, class_=subject) superclass = subject.__mro__[1] for name in obj_dict: if name not in superclass.__dict__: value = safe_getattr(subject, name) members[name] = ObjectMember(name, value, class_=subject) # members in __slots__ try: __slots__ = getslots(subject) if __slots__: from sphinx.ext.autodoc import SLOTSATTR for name, docstring in __slots__.items(): members[name] = ObjectMember(name, SLOTSATTR, class_=subject, docstring=docstring) except (TypeError, ValueError): pass # other members for name in dir(subject): try: value = attrgetter(subject, name) if ismock(value): value = undecorate(value) unmangled = unmangle(subject, name) if unmangled and unmangled not in members: if name in obj_dict: members[unmangled] = ObjectMember(unmangled, value, class_=subject) else: members[unmangled] = ObjectMember(unmangled, value) except AttributeError: continue try: for cls in getmro(subject): try: modname = safe_getattr(cls, '__module__') qualname = safe_getattr(cls, '__qualname__') analyzer = ModuleAnalyzer.for_module(modname) analyzer.analyze() except AttributeError: qualname = None analyzer = None except PycodeError: analyzer = None # annotation only member (ex. attr: int) for name in getannotations(cls): name = unmangle(cls, name) if name and name not in members: if analyzer and (qualname, name) in analyzer.attr_docs: docstring = '\n'.join(analyzer.attr_docs[qualname, name]) else: docstring = None members[name] = ObjectMember(name, INSTANCEATTR, class_=cls, docstring=docstring) # append instance attributes (cf. self.attr1) if analyzer knows if analyzer: for (ns, name), docstring in analyzer.attr_docs.items(): if ns == qualname and name not in members: members[name] = ObjectMember( name, INSTANCEATTR, class_=cls, docstring='\n'.join(docstring)) except AttributeError: pass return members
def get_items(self, names): """Try to import the given names, and return a list of ``[(name, signature, summary_string, real_name), ...]``. """ env = self.state.document.settings.env prefixes = get_import_prefixes_from_env(env) items = [] max_item_chars = 50 for name in names: display_name = name if name.startswith('~'): name = name[1:] display_name = name.split('.')[-1] try: real_name, obj, parent, modname = import_by_name(name, prefixes=prefixes) except ImportError: self.warn('failed to import %s' % name) items.append((name, '', '', name)) continue self.result = ViewList() # initialize for each documenter full_name = real_name if not isinstance(obj, ModuleType): # give explicitly separated module name, so that members # of inner classes can be documented full_name = modname + '::' + full_name[len(modname)+1:] # NB. using full_name here is important, since Documenters # handle module prefixes slightly differently documenter = get_documenter(obj, parent)(self, full_name) if not documenter.parse_name(): self.warn('failed to parse name %s' % real_name) items.append((display_name, '', '', real_name)) continue if not documenter.import_object(): self.warn('failed to import object %s' % real_name) items.append((display_name, '', '', real_name)) continue if documenter.options.members and not documenter.check_module(): continue # try to also get a source code analyzer for attribute docs try: documenter.analyzer = ModuleAnalyzer.for_module( documenter.get_real_modname()) # parse right now, to get PycodeErrors on parsing (results will # be cached anyway) documenter.analyzer.find_attr_docs() except PycodeError as err: documenter.env.app.debug( '[autodoc] module analyzer failed: %s', err) # no source file -- e.g. for builtin and C modules documenter.analyzer = None # -- Grab the signature sig = documenter.format_signature() if not sig: sig = '' else: max_chars = max(10, max_item_chars - len(display_name)) sig = mangle_signature(sig, max_chars=max_chars) sig = sig.replace('*', r'\*') # -- Grab the summary documenter.add_content(None) doc = list(documenter.process_doc([self.result.data])) while doc and not doc[0].strip(): doc.pop(0) # If there's a blank line, then we can assume the first sentence / # paragraph has ended, so anything after shouldn't be part of the # summary for i, piece in enumerate(doc): if not piece.strip(): doc = doc[:i] break # Try to find the "first sentence", which may span multiple lines m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(doc).strip()) if m: summary = m.group(1).strip() elif doc: summary = doc[0].strip() else: summary = '' items.append((display_name, sig, summary, real_name)) return items
def test_ModuleAnalyzer_for_string(): analyzer = ModuleAnalyzer.for_string('print("Hello world")', 'module_name') assert analyzer.modname == 'module_name' assert analyzer.srcname == '<string>'
def run(self): document = self.state.document if not document.settings.file_insertion_enabled: return [ document.reporter.warning('File insertion disabled', line=self.lineno) ] env = document.settings.env rel_filename, filename = env.relfn2path(self.arguments[0]) if 'pyobject' in self.options and 'lines' in self.options: return [ document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno) ] if 'lineno-match' in self.options and 'lineno-start' in self.options: return [ document.reporter.warning( 'Cannot use both "lineno-match" and "lineno-start"', line=self.lineno) ] if 'lineno-match' in self.options and \ (set(['append', 'prepend']) & set(self.options.keys())): return [ document.reporter.warning( 'Cannot use "lineno-match" and "append" or "prepend"', line=self.lineno) ] encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) lines = self.read_with_encoding(filename, document, codec_info, encoding) if lines and not isinstance(lines[0], string_types): return lines diffsource = self.options.get('diff') if diffsource is not None: tmp, fulldiffsource = env.relfn2path(diffsource) difflines = self.read_with_encoding(fulldiffsource, document, codec_info, encoding) if not isinstance(difflines[0], string_types): return difflines diff = unified_diff(difflines, lines, diffsource, self.arguments[0]) lines = list(diff) linenostart = self.options.get('lineno-start', 1) objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(filename, '') tags = analyzer.find_tags() if objectname not in tags: return [ document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno) ] else: lines = lines[tags[objectname][1] - 1:tags[objectname][2] - 1] if 'lineno-match' in self.options: linenostart = tags[objectname][1] linespec = self.options.get('lines') if linespec: try: linelist = parselinenos(linespec, len(lines)) except ValueError as err: return [document.reporter.warning(str(err), line=self.lineno)] if 'lineno-match' in self.options: # make sure the line list is not "disjoint". previous = linelist[0] for line_number in linelist[1:]: if line_number == previous + 1: previous = line_number continue return [ document.reporter.warning( 'Cannot use "lineno-match" with a disjoint set of ' '"lines"', line=self.lineno) ] linenostart = linelist[0] + 1 # just ignore non-existing lines lines = [lines[i] for i in linelist if i < len(lines)] if not lines: return [ document.reporter.warning( 'Line spec %r: no lines pulled from include file %r' % (linespec, filename), line=self.lineno) ] linespec = self.options.get('emphasize-lines') if linespec: try: hl_lines = [x + 1 for x in parselinenos(linespec, len(lines))] except ValueError as err: return [document.reporter.warning(str(err), line=self.lineno)] else: hl_lines = None startafter = self.options.get('start-after') endbefore = self.options.get('end-before') if startafter is not None or endbefore is not None: use = not startafter res = [] for line_number, line in enumerate(lines): if not use and startafter and startafter in line: if 'lineno-match' in self.options: linenostart += line_number + 1 use = True elif use and endbefore and endbefore in line: break elif use: res.append(line) lines = res prepend = self.options.get('prepend') if prepend: lines.insert(0, prepend + '\n') append = self.options.get('append') if append: lines.append(append + '\n') text = ''.join(lines) if self.options.get('tab-width'): text = text.expandtabs(self.options['tab-width']) retnode = nodes.literal_block(text, text, source=filename) set_source_info(self, retnode) if diffsource: # if diff is set, set udiff retnode['language'] = 'udiff' if 'language' in self.options: retnode['language'] = self.options['language'] retnode['linenos'] = 'linenos' in self.options or \ 'lineno-start' in self.options or \ 'lineno-match' in self.options retnode['classes'] += self.options.get('class', []) extra_args = retnode['highlight_args'] = {} if hl_lines is not None: extra_args['hl_lines'] = hl_lines extra_args['linenostart'] = linenostart env.note_dependency(rel_filename) caption = self.options.get('caption') if caption is not None: if not caption: caption = self.arguments[0] self.options.setdefault('name', nodes.fully_normalize_name(caption)) retnode = container_wrapper(self, retnode, caption) # retnode will be note_implicit_target that is linked from caption and numref. # when options['name'] is provided, it should be primary ID. self.add_name(retnode) return [retnode]
def run(self): document = self.state.document filename = self.arguments[0] #print filename if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', line=self.lineno)] env = document.settings.env if filename.startswith('/') or filename.startswith(os.sep): rel_fn = filename[1:] else: docdir = path.dirname(env.doc2path(env.docname, base=None)) rel_fn = path.normpath(path.join(docdir, filename)) fn = path.join(env.srcdir, rel_fn) if 'pyobject' in self.options and 'lines' in self.options: return [document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno)] encoding = self.options.get('encoding', env.config.source_encoding) try: f = codecs.open(fn, 'rU', encoding) lines = f.readlines() f.close() # 去掉编码指示 if fn.endswith(".py") and lines[0].startswith("#") and "coding" in lines[0]: lines = lines[1:] # 去掉文档说明 if fn.endswith(".py"): if lines[0].startswith('"""'): for lineno, line in enumerate(lines[1:]): if line.strip().endswith('"""'): lines = lines[lineno+2:] break # 去掉每行末尾空格 for i in xrange(len(lines)): lines[i] = lines[i].rstrip() + "\n" except (IOError, OSError): return [document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno)] except UnicodeError: return [document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename))] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(fn, '') tags = analyzer.find_tags() if objectname not in tags: return [document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno)] else: lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] lines = [lines[i] for i in linelist]
def run(self): document = self.state.document filename = self.arguments[0] if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', line=self.lineno)] env = document.settings.env if filename.startswith('/') or filename.startswith(os.sep): rel_fn = filename[1:] else: docdir = path.dirname(env.doc2path(env.docname, base=None)) rel_fn = path.join(docdir, filename) try: fn = path.join(env.srcdir, rel_fn) except UnicodeDecodeError: # the source directory is a bytestring with non-ASCII characters; # let's try to encode the rel_fn in the file system encoding rel_fn = rel_fn.encode(sys.getfilesystemencoding()) fn = path.join(env.srcdir, rel_fn) if 'pyobject' in self.options and 'lines' in self.options: return [document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno)] encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) try: f = codecs.StreamReaderWriter(open(fn, 'U'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() f.close() except (IOError, OSError): return [document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno)] except UnicodeError: return [document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename))] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(fn, '') tags = analyzer.find_tags() if objectname not in tags: return [document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno)] else: lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] lines = [lines[i] for i in linelist]
def test_ModuleAnalyzer_for_file(): analyzer = ModuleAnalyzer.for_string(SPHINX_MODULE_PATH, 'sphinx') assert analyzer.modname == 'sphinx' assert analyzer.srcname == '<string>'
def get_class_members( subject: Any, objpath: List[str], attrgetter: Callable, inherit_docstrings: bool = True) -> Dict[str, "ObjectMember"]: """Get members and attributes of target class.""" from sphinx.ext.autodoc import INSTANCEATTR, ObjectMember # the members directly defined in the class obj_dict = attrgetter(subject, '__dict__', {}) members: Dict[str, ObjectMember] = {} # enum members if isenumclass(subject): for name, value in subject.__members__.items(): if name not in members: members[name] = ObjectMember(name, value, class_=subject) superclass = subject.__mro__[1] for name in obj_dict: if name not in superclass.__dict__: value = safe_getattr(subject, name) members[name] = ObjectMember(name, value, class_=subject) # members in __slots__ try: __slots__ = getslots(subject) if __slots__: from sphinx.ext.autodoc import SLOTSATTR for name, docstring in __slots__.items(): members[name] = ObjectMember(name, SLOTSATTR, class_=subject, docstring=docstring) except (TypeError, ValueError): pass # other members for name in dir(subject): try: value = attrgetter(subject, name) if ismock(value): value = undecorate(value) unmangled = unmangle(subject, name) if unmangled and unmangled not in members: if name in obj_dict: members[unmangled] = ObjectMember(unmangled, value, class_=subject) else: members[unmangled] = ObjectMember(unmangled, value) except AttributeError: continue try: for cls in getmro(subject): try: modname = safe_getattr(cls, '__module__') qualname = safe_getattr(cls, '__qualname__') analyzer = ModuleAnalyzer.for_module(modname) analyzer.analyze() except AttributeError: qualname = None analyzer = None except PycodeError: analyzer = None # annotation only member (ex. attr: int) for name in getannotations(cls): name = unmangle(cls, name) if name and name not in members: if analyzer and (qualname, name) in analyzer.attr_docs: docstring = '\n'.join(analyzer.attr_docs[qualname, name]) else: docstring = None members[name] = ObjectMember(name, INSTANCEATTR, class_=cls, docstring=docstring) # append or complete instance attributes (cf. self.attr1) if analyzer knows if analyzer: for (ns, name), docstring in analyzer.attr_docs.items(): if ns == qualname and name not in members: # otherwise unknown instance attribute members[name] = ObjectMember( name, INSTANCEATTR, class_=cls, docstring='\n'.join(docstring)) elif (ns == qualname and docstring and isinstance(members[name], ObjectMember) and not members[name].docstring): if cls != subject and not inherit_docstrings: # If we are in the MRO of the class and not the class itself, # and we do not want to inherit docstrings, then skip setting # the docstring below continue # attribute is already known, because dir(subject) enumerates it. # But it has no docstring yet members[name].docstring = '\n'.join(docstring) except AttributeError: pass return members
def run(self): document = self.state.document if not document.settings.file_insertion_enabled: return [ document.reporter.warning('File insertion disabled', line=self.lineno) ] env = document.settings.env rel_filename, filename = env.relfn2path( os.path.join('/' + env.config.includefrags_base_dir, self.arguments[0])) ## !!! if 'pyobject' in self.options and 'lines' in self.options: return [ document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno) ] encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) f = None try: f = codecs.StreamReaderWriter(open(filename, 'rb'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() except (IOError, OSError): return [ document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno) ] except UnicodeError: return [ document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename)) ] finally: if f is not None: f.close() objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(filename, '') tags = analyzer.find_tags() if objectname not in tags: return [ document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno) ] else: lines = lines[tags[objectname][1] - 1:tags[objectname][2] - 1] fragment = self.options.get('fragment') if fragment is not None: result = [] key = None active = False for line in lines: line = line.rstrip( ) # Strip line ending and trailing whitespace. line += '\n' # add back line ending if line.strip().startswith('//![') and line.strip().endswith( ']'): key = line.strip()[4:-1].strip() if key == fragment: active = not active continue if active: result.append(line) while result and not result[-1].strip(): result.pop() lines = result linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] # just ignore nonexisting lines nlines = len(lines) lines = [lines[i] for i in linelist if i < nlines] if not lines: return [ document.reporter.warning( 'Line spec %r: no lines pulled from include file %r' % (linespec, filename), line=self.lineno) ]
def get_attr_docs(self, ty): # this reaches into some undocumented stuff in sphinx to # extract the attribute documentation. analyzer = ModuleAnalyzer.for_module(ty.__module__) module_attrs = analyzer.find_attr_docs() # (scope is broken!) return {k[1]: v[0] for k, v in module_attrs.iteritems()}
def create_node(self, filename, rel_filename, lang): document = self.state.document env = document.settings.env # Read the contents of the file to include encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) try: f = codecs.StreamReaderWriter(open(filename, 'rb'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() f.close() except (IOError, OSError): print_err('Failed to read %r' % filename) return [ document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno) ] except UnicodeError: print_err('Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename)) return [ document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename)) ] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(filename, '') tags = analyzer.find_tags() if objectname not in tags: return [ document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno) ] else: lines = lines[tags[objectname][1] - 1:tags[objectname][2] - 1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] # just ignore nonexisting lines nlines = len(lines) lines = [lines[i] for i in linelist if i < nlines] if not lines: return [ document.reporter.warning( 'Line spec %r: no lines pulled from include file %r' % (linespec, filename), line=self.lineno) ]
def begin_generate( documenter: Documenter, real_modname: Optional[str] = None, check_module: bool = False, ) -> Optional[str]: """ Boilerplate for the top of ``generate`` in :class:`sphinx.ext.autodoc.Documenter` subclasses. .. versionadded:: 0.2.0 :param documenter: :param real_modname: :param check_module: :return: The ``sourcename``, or :py:obj:`None` if certain conditions are met, to indicate that the Documenter class should exit early. """ # Do not pass real_modname and use the name from the __module__ # attribute of the class. # If a class gets imported into the module real_modname # the analyzer won't find the source of the class, if # it looks in real_modname. if not documenter.parse_name(): # need a module to import unknown_module_warning(documenter) return None # now, import the module and get object to document if not documenter.import_object(): return None # If there is no real module defined, figure out which to use. # The real module is used in the module analyzer to look up the module # where the attribute documentation would actually be found in. # This is used for situations where you have a module that collects the # functions and classes of internal submodules. guess_modname = documenter.get_real_modname() documenter.real_modname = real_modname or guess_modname # try to also get a source code analyzer for attribute docs try: documenter.analyzer = ModuleAnalyzer.for_module( documenter.real_modname) # parse right now, to get PycodeErrors on parsing (results will # be cached anyway) documenter.analyzer.find_attr_docs() except PycodeError as err: logger.debug("[autodoc] module analyzer failed: %s", err) # no source file -- e.g. for builtin and C modules documenter.analyzer = None # type: ignore # at least add the module.__file__ as a dependency if hasattr(documenter.module, "__file__") and documenter.module.__file__: documenter.directive.filename_set.add(documenter.module.__file__) else: documenter.directive.filename_set.add(documenter.analyzer.srcname) if documenter.real_modname != guess_modname: # Add module to dependency list if target object is defined in other module. try: analyzer = ModuleAnalyzer.for_module(guess_modname) documenter.directive.filename_set.add(analyzer.srcname) except PycodeError: pass # check __module__ of object (for members not given explicitly) if check_module: if not documenter.check_module(): return None sourcename = documenter.get_sourcename() # make sure that the result starts with an empty line. This is # necessary for some situations where another directive preprocesses # reST and no starting newline is present documenter.add_line('', sourcename) return sourcename
def get_object_members( subject: Any, objpath: List[str], attrgetter: Callable, analyzer: ModuleAnalyzer = None) -> Dict[str, Attribute]: """Get members and attributes of target object.""" from sphinx.ext.autodoc import INSTANCEATTR # the members directly defined in the class obj_dict = attrgetter(subject, '__dict__', {}) members = {} # type: Dict[str, Attribute] # enum members if isenumclass(subject): for name, value in subject.__members__.items(): if name not in members: members[name] = Attribute(name, True, value) superclass = subject.__mro__[1] for name in obj_dict: if name not in superclass.__dict__: value = safe_getattr(subject, name) members[name] = Attribute(name, True, value) # members in __slots__ try: __slots__ = getslots(subject) if __slots__: from sphinx.ext.autodoc import SLOTSATTR for name in __slots__: members[name] = Attribute(name, True, SLOTSATTR) except (TypeError, ValueError): pass # other members for name in dir(subject): try: value = attrgetter(subject, name) directly_defined = name in obj_dict name = unmangle(subject, name) if name and name not in members: members[name] = Attribute(name, directly_defined, value) except AttributeError: continue # annotation only member (ex. attr: int) for i, cls in enumerate(getmro(subject)): for name in getannotations(cls): name = unmangle(cls, name) if name and name not in members: members[name] = Attribute(name, i == 0, INSTANCEATTR) if analyzer: # append instance attributes (cf. self.attr1) if analyzer knows namespace = '.'.join(objpath) for (ns, name) in analyzer.find_attr_docs(): if namespace == ns and name not in members: members[name] = Attribute(name, True, INSTANCEATTR) return members
def run(self): document = self.state.document if not document.settings.file_insertion_enabled: return [ document.reporter.warning('File insertion disabled', line=self.lineno) ] env = document.settings.env rel_filename, filename = env.relfn2path(self.arguments[0]) if 'pyobject' in self.options and 'lines' in self.options: return [ document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno) ] encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) try: f = codecs.StreamReaderWriter(open(filename, 'rb'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() f.close() except (IOError, OSError): return [ document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno) ] except UnicodeError: return [ document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename)) ] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(filename, '') tags = analyzer.find_tags() if objectname not in tags: return [ document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno) ] else: lines = lines[tags[objectname][1] - 1:tags[objectname][2] - 1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError as err: return [document.reporter.warning(str(err), line=self.lineno)] # just ignore nonexisting lines nlines = len(lines) lines = [lines[i] for i in linelist if i < nlines] if not lines: return [ document.reporter.warning( 'Line spec %r: no lines pulled from include file %r' % (linespec, filename), line=self.lineno) ] linespec = self.options.get('emphasize-lines') if linespec: try: hl_lines = [x + 1 for x in parselinenos(linespec, len(lines))] except ValueError as err: return [document.reporter.warning(str(err), line=self.lineno)] else: hl_lines = None startafter = self.options.get('start-after') endbefore = self.options.get('end-before') prepend = self.options.get('prepend') append = self.options.get('append') if startafter is not None or endbefore is not None: use = not startafter res = [] for line in lines: if not use and startafter and startafter in line: use = True elif use and endbefore and endbefore in line: use = False break elif use: res.append(line) lines = res if prepend: lines.insert(0, prepend + '\n') if append: lines.append(append + '\n') text = ''.join(lines) if self.options.get('tab-width'): text = text.expandtabs(self.options['tab-width']) retnode = nodes.literal_block(text, text, source=filename) set_source_info(self, retnode) if self.options.get('language', ''): retnode['language'] = self.options['language'] if 'linenos' in self.options: retnode['linenos'] = True if hl_lines is not None: retnode['highlight_args'] = {'hl_lines': hl_lines} env.note_dependency(rel_filename) return [retnode]
def test_ModuleAnalyzer_for_module(): analyzer = ModuleAnalyzer.for_module('sphinx') assert analyzer.modname == 'sphinx' assert analyzer.srcname in (SPHINX_MODULE_PATH, os.path.abspath(SPHINX_MODULE_PATH)) assert analyzer.encoding == 'utf-8'
def get_items(self, names: List[str]) -> List[Tuple[str, str, str, str]]: """Try to import the given names, and return a list of ``[(name, signature, summary_string, real_name), ...]``. """ prefixes = get_import_prefixes_from_env(self.env) items = [] # type: List[Tuple[str, str, str, str]] max_item_chars = 50 for name in names: display_name = name if name.startswith('~'): name = name[1:] display_name = name.split('.')[-1] try: real_name, obj, parent, modname = self.import_by_name( name, prefixes=prefixes) except ImportError: logger.warning(__('autosummary: failed to import %s'), name, location=self.get_source_info()) continue self.bridge.result = StringList() # initialize for each documenter full_name = real_name if not isinstance(obj, ModuleType): # give explicitly separated module name, so that members # of inner classes can be documented full_name = modname + '::' + full_name[len(modname) + 1:] # NB. using full_name here is important, since Documenters # handle module prefixes slightly differently documenter = self.create_documenter(self.env.app, obj, parent, full_name) if not documenter.parse_name(): logger.warning(__('failed to parse name %s'), real_name, location=self.get_source_info()) items.append((display_name, '', '', real_name)) continue if not documenter.import_object(): logger.warning(__('failed to import object %s'), real_name, location=self.get_source_info()) items.append((display_name, '', '', real_name)) continue if documenter.options.members and not documenter.check_module(): continue # try to also get a source code analyzer for attribute docs try: documenter.analyzer = ModuleAnalyzer.for_module( documenter.get_real_modname()) # parse right now, to get PycodeErrors on parsing (results will # be cached anyway) documenter.analyzer.find_attr_docs() except PycodeError as err: logger.debug('[autodoc] module analyzer failed: %s', err) # no source file -- e.g. for builtin and C modules documenter.analyzer = None # -- Grab the signature try: sig = documenter.format_signature(show_annotation=False) except TypeError: # the documenter does not support ``show_annotation`` option sig = documenter.format_signature() if not sig: sig = '' else: max_chars = max(10, max_item_chars - len(display_name)) sig = mangle_signature(sig, max_chars=max_chars) # -- Grab the summary documenter.add_content(None) summary = extract_summary(self.bridge.result.data[:], self.state.document) items.append((display_name, sig, summary, real_name)) return items
def get_items(self, names): """Try to import the given names, and return a list of ``[(name, signature, summary_string, real_name), ...]``. """ env = self.state.document.settings.env prefixes = get_import_prefixes_from_env(env) items = [] max_item_chars = 50 for name in names: display_name = name if name.startswith('~'): name = name[1:] display_name = name.split('.')[-1] try: real_name, obj, parent, modname = import_by_name( name, prefixes=prefixes) except ImportError: self.warn('failed to import %s' % name) items.append((name, '', '', name)) continue self.result = ViewList() # initialize for each documenter full_name = real_name if not isinstance(obj, ModuleType): # give explicitly separated module name, so that members # of inner classes can be documented full_name = modname + '::' + full_name[len(modname) + 1:] # NB. using full_name here is important, since Documenters # handle module prefixes slightly differently documenter = get_documenter(obj, parent)(self, full_name) if not documenter.parse_name(): self.warn('failed to parse name %s' % real_name) items.append((display_name, '', '', real_name)) continue if not documenter.import_object(): self.warn('failed to import object %s' % real_name) items.append((display_name, '', '', real_name)) continue # try to also get a source code analyzer for attribute docs try: documenter.analyzer = ModuleAnalyzer.for_module( documenter.get_real_modname()) # parse right now, to get PycodeErrors on parsing (results will # be cached anyway) documenter.analyzer.find_attr_docs() except PycodeError, err: documenter.env.app.debug( '[autodoc] module analyzer failed: %s', err) # no source file -- e.g. for builtin and C modules documenter.analyzer = None # -- Grab the signature sig = documenter.format_signature() if not sig: sig = '' else: max_chars = max(10, max_item_chars - len(display_name)) sig = mangle_signature(sig, max_chars=max_chars) sig = sig.replace('*', r'\*') # -- Grab the summary documenter.add_content(None) doc = list(documenter.process_doc([self.result.data])) while doc and not doc[0].strip(): doc.pop(0) # If there's a blank line, then we can assume the first sentence / # paragraph has ended, so anything after shouldn't be part of the # summary for i, piece in enumerate(doc): if not piece.strip(): doc = doc[:i] break # Try to find the "first sentence", which may span multiple lines m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(doc).strip()) if m: summary = m.group(1).strip() elif doc: summary = doc[0].strip() else: summary = '' items.append((display_name, sig, summary, real_name))
def run(self): document = self.state.document filename = self.arguments[0] #print filename if not document.settings.file_insertion_enabled: return [ document.reporter.warning('File insertion disabled', line=self.lineno) ] env = document.settings.env if filename.startswith('/') or filename.startswith(os.sep): rel_fn = filename[1:] else: docdir = path.dirname(env.doc2path(env.docname, base=None)) rel_fn = path.normpath(path.join(docdir, filename)) fn = path.join(env.srcdir, rel_fn) if 'pyobject' in self.options and 'lines' in self.options: return [ document.reporter.warning( 'Cannot use both "pyobject" and "lines" options', line=self.lineno) ] encoding = self.options.get('encoding', env.config.source_encoding) try: f = codecs.open(fn, 'rU', encoding) lines = f.readlines() f.close() # 去掉编码指示 if fn.endswith(".py") and lines[0].startswith( "#") and "coding" in lines[0]: lines = lines[1:] # 去掉文档说明 if fn.endswith(".py"): if lines[0].startswith('"""'): for lineno, line in enumerate(lines[1:]): if line.strip().endswith('"""'): lines = lines[lineno + 2:] break # 去掉每行末尾空格 for i in xrange(len(lines)): lines[i] = lines[i].rstrip() + "\n" except (IOError, OSError): return [ document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno) ] except UnicodeError: return [ document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename)) ] objectname = self.options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(fn, '') tags = analyzer.find_tags() if objectname not in tags: return [ document.reporter.warning( 'Object named %r not found in include file %r' % (objectname, filename), line=self.lineno) ] else: lines = lines[tags[objectname][1] - 1:tags[objectname][2] - 1] linespec = self.options.get('lines') if linespec is not None: try: linelist = parselinenos(linespec, len(lines)) except ValueError, err: return [document.reporter.warning(str(err), line=self.lineno)] lines = [lines[i] for i in linelist]
def generate(self, more_content=None, real_modname=None, check_module=False, all_members=False): """ Generate reST for the object given by *self.name*, and possibly members. If *more_content* is given, include that content. If *real_modname* is given, use that module name to find attribute docs. If *check_module* is True, only generate if the object is defined in the module name it is imported from. If *all_members* is True, document all members. """ if not self.parse_name(): # need a module to import self.directive.warn( 'don\'t know which module to import for autodocumenting ' '%r (try placing a "module" or "currentmodule" directive ' 'in the document, or giving an explicit module name)' % self.name) return # now, import the module and get object to document if not self.import_object(): return # If there is no real module defined, figure out which to use. # The real module is used in the module analyzer to look up the module # where the attribute documentation would actually be found in. # This is used for situations where you have a module that collects the # functions and classes of internal submodules. self.real_modname = real_modname or self.get_real_modname() # try to also get a source code analyzer for attribute docs try: self.analyzer = ModuleAnalyzer.for_module(self.real_modname) # parse right now, to get PycodeErrors on parsing (results will # be cached anyway) self.analyzer.find_attr_docs() except PycodeError as err: # no source file -- e.g. for builtin and C modules self.analyzer = None # at least add the module.__file__ as a dependency if hasattr(self.module, '__file__') and self.module.__file__: self.directive.filename_set.add(self.module.__file__) else: self.directive.filename_set.add(self.analyzer.srcname) # check __module__ of object (for members not given explicitly) if check_module: if not self.check_module(): return # make sure that the result starts with an empty line. This is # necessary for some situations where another directive preprocesses # reST and no starting newline is present self.add_line(u'', '') # format the object's signature, if any try: sig = self.format_signature() except Exception as err: self.directive.warn('error while formatting signature for ' '%s: %s' % (self.fullname, err)) sig = '' # generate the directive header and options, if applicable self.add_directive_header(sig) self.add_line(u'', '<autodoc>') # e.g. the module directive doesn't have content self.indent += self.content_indent # add all content (from docstrings, attribute docs etc.) self.add_content(more_content) # document members, if possible self.document_members(all_members)