def Find(name, log=None): f = None mod = _imp(name, log) parent = mod foundAs = '' if inspect.ismodule(mod): f = get_file(mod) components = name.split('.') old_comp = None for comp in components[1:]: try: #this happens in the following case: #we have mx.DateTime.mxDateTime.mxDateTime.pyd #but after importing it, mx.DateTime.mxDateTime shadows access to mxDateTime.pyd mod = getattr(mod, comp) except AttributeError: if old_comp != comp: raise if inspect.ismodule(mod): f = get_file(mod) else: if len(foundAs) > 0: foundAs = foundAs + '.' foundAs = foundAs + comp old_comp = comp return f, mod, parent, foundAs
def _stub_attr(obj, attr_name): """ Stub an attribute of an object. Will return an existing stub if there already is one. """ # Annoying circular reference requires importing here. Would like to see # this cleaned up. @AW from mock import Mock # Check to see if this a property, this check is only for when dealing with an # instance. getattr will work for classes. is_property = False if not inspect.isclass(obj) and not inspect.ismodule(obj): # It's possible that the attribute is defined after initialization, and # so is not on the class itself. attr = getattr(obj.__class__, attr_name, None) if isinstance(attr, property): is_property = True if not is_property: attr = getattr(obj, attr_name) # Return an existing stub if isinstance(attr, Stub): return attr # If a Mock object, stub its __call__ if isinstance(attr, Mock): return stub(attr.__call__) if isinstance(attr, property): return StubProperty(obj, attr_name) # Sadly, builtin functions and methods have the same type, so we have to use # the same stub class even though it's a bit ugly if inspect.ismodule(obj) and isinstance( attr, (types.FunctionType, types.BuiltinFunctionType, types.BuiltinMethodType) ): return StubFunction(obj, attr_name) if isinstance(attr, types.MethodType): # Handle differently if unbound because it's an implicit "any instance" if attr.im_self == None: return StubUnboundMethod(attr) else: return StubMethod(obj, attr_name) if isinstance(attr, (types.BuiltinFunctionType, types.BuiltinMethodType)): return StubFunction(obj, attr_name) # What an absurd type this is .... if type(attr).__name__ == "method-wrapper": return StubMethodWrapper(attr) # This is also slot_descriptor if type(attr).__name__ == "wrapper_descriptor": return StubWrapperDescriptor(obj, attr_name) raise UnsupportedStub("can't stub %s(%s) of %s", attr_name, type(attr), obj)
def get_members(app, mod, typ, include_public=None): """Return the memebrs of mod of the given type :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param mod: the module with members :type mod: module :param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'`` :type typ: str :param include_public: list of private members to include to plublics :type include_public: list | None :returns: None :rtype: None :raises: None """ include_public = include_public or [] tests = {'class': lambda x: inspect.isclass(x) and not issubclass(x, BaseException) and x.__module__ == mod.__name__, 'function': lambda x: inspect.isfunction(x) and x.__module__ == mod.__name__, 'exception': lambda x: inspect.isclass(x) and issubclass(x, BaseException) and x.__module__ == mod.__name__, 'data': lambda x: not inspect.ismodule(x) and not inspect.isclass(x) and not inspect.isfunction(x), 'members': lambda x: True} items = [] for name in dir(mod): i = getattr(mod, name) inspect.ismodule(i) if tests.get(typ, lambda x: False)(i): items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] app.debug2('Got members of %s of type %s: public %s and %s', mod, typ, public, items) return public, items
def _get_valuedoc(value): """ If a C{ValueDoc} for the given value exists in the valuedoc cache, then return it; otherwise, create a new C{ValueDoc}, add it to the cache, and return it. When possible, the new C{ValueDoc}'s C{pyval}, C{repr}, and C{canonical_name} attributes will be set appropriately. """ pyid = id(value) val_doc = _valuedoc_cache.get(pyid) if val_doc is None: try: canonical_name = get_canonical_name(value, strict=True) except DottedName.InvalidDottedName: canonical_name = UNKNOWN val_doc = ValueDoc(pyval=value, canonical_name = canonical_name, docs_extracted_by='introspecter') _valuedoc_cache[pyid] = val_doc # If it's a module, then do some preliminary introspection. # Otherwise, check what the containing module is (used e.g. # to decide what markup language should be used for docstrings) if inspect.ismodule(value): introspect_module(value, val_doc, preliminary=True) val_doc.defining_module = val_doc else: module_name = str(get_containing_module(value)) module = sys.modules.get(module_name) if module is not None and inspect.ismodule(module): val_doc.defining_module = _get_valuedoc(module) return val_doc
def delete_module(mod): global sub_mods if type(mod)==str: mod=sys.modules[mod] if mod.__name__=="__main__": return if not is_script_component(mod): sys.stderr.write("[WARNING] Tried to reload non script module. Aborted."+"\n") return if mod.__name__==__name__: return sys.stderr.write("[Delete Module] "+mod.__name__+"\n") if hasattr(mod, "__del__"): mod.__del__() for attr in filter(lambda x : not (x.startswith("__") and x.endswith("__")), dir(mod)): if inspect.ismodule(getattr(mod, attr)) and not is_script_component(getattr(mod, attr)): continue elif inspect.ismodule(getattr(mod, attr)): delattr(mod, attr) del sys.modules[mod.__name__] for i in sys.modules: if is_script_component(i) or i == "__main__": if hasattr(sys.modules[i],mod.__name__): delattr(sys.modules[i],mod.__name__) if sys.modules[i].__name__ not in sub_mods: sub_mods[sys.modules[i].__name__]=[] sub_mods[sys.modules[i].__name__].append(mod.__name__)
def _traverse_internal(root, visit, stack, path): """Internal helper for traverse.""" # Only traverse modules and classes if not inspect.isclass(root) and not inspect.ismodule(root): return try: children = inspect.getmembers(root) except ImportError: # On some Python installations, some modules do not support enumerating # members (six in particular), leading to import errors. children = [] new_stack = stack + [root] visit(path, root, children) for name, child in children: # Do not descend into built-in modules if inspect.ismodule(child) and child.__name__ in sys.builtin_module_names: continue # Break cycles if any(child is item for item in new_stack): # `in`, but using `is` continue child_path = path + '.' + name if path else name _traverse_internal(child, visit, new_stack, child_path)
def _find(self, tests, obj, name, module, source_lines, globs, seen): if self._verbose: print('Finding tests in %s' % name) if id(obj) in seen: return seen[id(obj)] = 1 test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) if inspect.ismodule(obj) and self._recurse: for (valname, val) in obj.__dict__.items(): valname = '%s.%s' % (name, valname) while (inspect.isfunction(val) or inspect.isclass(val)) and self._from_module(module, val): self._find(tests, val, valname, module, source_lines, globs, seen) if inspect.ismodule(obj) and self._recurse: for (valname, val) in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError('DocTestFinder.find: __test__ keys must be strings: %r' % (type(valname),)) if not (inspect.isfunction(val) or (inspect.isclass(val) or (inspect.ismethod(val) or (inspect.ismodule(val) or isinstance(val, str))))): raise ValueError('DocTestFinder.find: __test__ values must be strings, functions, methods, classes, or modules: %r' % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) if inspect.isclass(obj) and self._recurse: for (valname, val) in obj.__dict__.items(): if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ while (inspect.isfunction(val) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val): valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen)
def generate_rst(modpath, module, handle=None): if handle is None: handle = sys.stdout target_names = [x for x in dir(module) if not (x.startswith("_") or inspect.ismodule(getattr(module, x)))] target_names = getattr(module, '__all__', target_names) klasses, funcs, exceptions, others = [], [], [], [] modules = [] base_exception = globals().get("BaseException", Exception) for target in target_names: try: obj = getattr(module, target) except AttributeError, a: sys.stderr.write("failed processing %s, accessing %s: %s\n" % (modpath, target, a)) raise if inspect.isclass(obj): if issubclass(obj, base_exception): exceptions.append(target) else: klasses.append(target) elif callable(obj): funcs.append(target) elif inspect.ismodule(obj): modules.append(target) else: others.append(target)
def startContext(self, context): if ismodule(context) or is_test_case_class(context): if ismodule(context): attr_suffix = '' else: attr_suffix = '_after_all_tests' if getattr(context, 'database_single_transaction' + attr_suffix, False): #TODO: When no test case in this module needing database is run (for example # user selected only one unitTestCase), database should not be initialized. # So it would be best if db is initialized when first test case needing # database is run. # create test database if not already created if not self.test_database_created: self._create_test_databases() if getattr(context, 'database_single_transaction' + attr_suffix, False): from django.db import transaction transaction.enter_transaction_management() transaction.managed(True) # when used from startTest, nose-wrapped testcase is provided -- while now, # we have 'bare' test case. self._prepare_tests_fixtures(context)
def extract_views_from_urlpatterns(cls, urlpatterns, base='', mod=None): """ Return a list of views from a list of urlpatterns. Each object in the returned list is a two-tuple: (view_func, regex) """ views = [] for p in urlpatterns: if hasattr(p, 'url_patterns'): try: sub_patterns = p.url_patterns except ImportError: continue url_mod = None if inspect.ismodule(p): url_mod = p.__name__ elif hasattr(p, 'urlconf_module'): _x = p.urlconf_module if inspect.ismodule(_x): url_mod = _x.__name__ else: url_mod = 'a {} in {}'.format(type(_x), mod) else: url_mod = p views.extend( cls.extract_views_from_urlpatterns(sub_patterns, base=base + cls.fix_pattern(p.regex.pattern, base), mod=url_mod) ) elif hasattr(p, 'callback'): try: views.append((p.callback, base + cls.fix_pattern(p.regex.pattern, base), mod)) except ViewDoesNotExist: continue else: raise TypeError("%s does not appear to be a urlpattern object" % p) return views
def mygetmodule(object): """Return the module an object was defined in, or None if not found.""" import os.path if inspect.ismodule(object): return object if hasattr(object, "__module__"): return sys.modules.get(object.__module__) try: file = inspect.getabsfile(object) except TypeError: return None if modulesbyfile.has_key(file): return sys.modules.get(modulesbyfile[file]) for module in sys.modules.values(): if inspect.ismodule(module) and hasattr(module, "__file__"): # check if value is indeed a module modulesbyfile[os.path.realpath(inspect.getabsfile(module))] = module.__name__ if modulesbyfile.has_key(file): return sys.modules.get(modulesbyfile[file]) main = sys.modules["__main__"] if not hasattr(object, "__name__"): return None if hasattr(main, object.__name__): mainobject = getattr(main, object.__name__) if mainobject is object: return main builtin = sys.modules["__builtin__"] if hasattr(builtin, object.__name__): builtinobject = getattr(builtin, object.__name__) if builtinobject is object: return builtin
def get_derived_nodes(modules): ''' Create a key:value pair of each node_name to Node class for all Nodes within modules provided. sample module_names = ['path_to.module', 'analysis_engine.flight_phase',..] :param module_names: Modules or module names to import as locations on PYTHON PATH :type module_names: [str or module] :returns: Modules or module name to Classes :rtype: dict ''' # OPT: local variable to avoid module-level lookup. node_subclasses = NODE_SUBCLASSES def isclassandsubclass(value, classes, superclass): if not isclass(value): return False # OPT: Lookup from set instead of issubclass (200x speedup). for base_class in value.__bases__: if base_class in classes: return True return issubclass(value, superclass) if isinstance(modules, basestring) or ismodule(modules): # This has been done too often! modules = [modules] nodes = {} for module in modules: #Ref: #http://code.activestate.com/recipes/223972-import-package-modules-at-runtime/ # You may notice something odd about the call to __import__(): why is # the last parameter a list whose only member is an empty string? This # hack stems from a quirk about __import__(): if the last parameter is # empty, loading class "A.B.C.D" actually only loads "A". If the last # parameter is defined, regardless of what its value is, we end up # loading "A.B.C" ##abstract_nodes = ['Node', 'Derived Parameter Node', 'Key Point Value Node', 'Flight Phase Node' ##print 'importing', name if not ismodule(module): module = __import__(module, globals(), locals(), ['']) for c in vars(module).values(): if isclassandsubclass(c, node_subclasses, Node) \ and c.__module__ != 'analysis_engine.node': try: #TODO: Alert when dupe node_name found which overrides previous ##name = c.get_name() ##if name in nodes: ### alert about overide happening or raise out? ##nodes[name] = c nodes[c.get_name()] = c except TypeError: #TODO: Handle the expected error of top level classes # Can't instantiate abstract class DerivedParameterNode # - but don't know how to detect if we're at that level without resorting to 'if c.get_name() in 'derived parameter node',.. logger.exception('Failed to import class: %s' % c.get_name()) return nodes
def __init__(self, module=False, with_path=None, prefixes=None): if isinstance(prefixes, list): self.prefixes = prefixes elif isinstance(prefixes, str): self.prefixes = [prefixes] else: self.prefixes = ['rust_bind_'] # get package root for parsing if not with_path: caller = inspect.stack()[1] info = dict(inspect.getmembers(caller.frame)) path = info["f_globals"]["__file__"] path = os.path.abspath(path) else: path = with_path if module is True or inspect.ismodule(module): self._ismodule = True if inspect.ismodule(module): self.root = module else: if not os.path.exists(path): raise FileNotFoundError(path) try: mod = import_module( os.path.basename(path).replace('.py', '')) except: raise ImportError(ERR_NO_MODULE) self.root = os.path.dirname(path) else: self._ismodule = False # discover the root of the package init_f = os.path.join(path, '__init__.py') if os.path.exists(init_f): path = init_f else: while os.path.basename(path) != '__init__.py': path = os.path.dirname(path) for f in os.listdir(path): if f == "__init__.py": path = os.path.join(path, f) break if path == os.path.abspath(os.sep): raise FileNotFoundError(self.ERR_NO_PACKAGE) self.root = os.path.dirname(path) self.pyfiles, self.rsfiles = [], [] # add all files to process for root, _, files in os.walk(self.root, topdown=False): for f in files: if f[-3:] == ".py": self.pyfiles.append(os.path.join(root, f)) elif f[-3:] == ".rs": self.rsfiles.append(os.path.join(root, f)) # parse python files function signatures to generate bindings self.parse_functions() # write the code in a Rust file self.dump_to_rust()
def can_document_member(cls, member, membername, isattr, parent): # Do not allow Attribute Documenter to document modules. isdatadesc = sphinx.ext.autodoc.isdescriptor(member) and not \ isinstance(member, cls.method_types) return isdatadesc or (not inspect.ismodule(parent) and not inspect.isroutine(member) and not inspect.ismodule(member) and not isinstance( member, sphinx.ext.autodoc.class_types))
def get_doc(self, descriptor): """Reimplemented to (potentially) use the features from sphinx.ext.autodoc""" obj = descriptor.obj if inspect.ismodule(obj): module = obj else: module = inspect.getmodule(obj) if module is not None and (re.match('__.*__', module.__name__) or not self.is_importable(module.__name__)): module = None isclass = inspect.isclass(obj) # If the module is available, we try to use autodoc if module is not None: doc = '.. currentmodule:: ' + module.__name__ + '\n\n' # a module --> use automodule if inspect.ismodule(obj): doc += self.header(descriptor, '') doc += '.. automodule:: ' + obj.__name__ # an importable class --> use autoclass elif isclass and getattr(module, obj.__name__, None) is not None: doc += self.header(descriptor, '') doc += '.. autoclass:: ' + obj.__name__ # an instance and the class can be imported # --> use super get_doc and autoclass for the tyoe elif descriptor.objtype == 'data' and getattr( module, type(obj).__name__, None) is not None: doc += '\n\n'.join([ super(UrlHelp, self).get_doc(descriptor), "Class docstring\n===============", '.. autoclass:: ' + type(obj).__name__]) # an instance --> use super get_doc for instance and the type elif descriptor.objtype == 'data': cls_doc = super(UrlHelp, self).get_doc(self.describe_object( type(obj), type(obj).__name__)) doc += '\n\n'.join([ super(UrlHelp, self).get_doc(descriptor), "Class docstring\n===============", cls_doc]) # a function or method --> use super get_doc else: doc += super(UrlHelp, self).get_doc(descriptor) # otherwise the object has been defined in this session else: # an instance --> use super get_doc for instance and the type if descriptor.objtype == 'data': cls_doc = super(UrlHelp, self).get_doc(self.describe_object( type(obj), type(obj).__name__)) doc = '\n\n'.join([ super(UrlHelp, self).get_doc(descriptor), "Class docstring\n===============", cls_doc]) # a function or method --> use super get_doc else: doc = super(UrlHelp, self).get_doc(descriptor) return doc.rstrip() + '\n'
def _patched_getmodule(object, _filename=None): """Return the module an object was defined in, or None if not found. This replicates the functionality of the stdlib `inspect.getmodule` function but includes a fix for a bug present in Python 3.1 and 3.2. """ #these imports mock up what would otherwise have been in inspect import sys import os from inspect import modulesbyfile, _filesbymodname, getabsfile, ismodule if ismodule(object): return object if hasattr(object, '__module__'): return sys.modules.get(object.__module__) # Try the filename to modulename cache if _filename is not None and _filename in modulesbyfile: return sys.modules.get(modulesbyfile[_filename]) # Try the cache again with the absolute file name try: file = getabsfile(object, _filename) except TypeError: return None if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) # Update the filename to module name cache and check yet again # Copy sys.modules in order to cope with changes while iterating # This is where the fix is made - the adding of the "list" call: for modname, module in list(sys.modules.items()): if ismodule(module) and hasattr(module, '__file__'): f = module.__file__ if f == _filesbymodname.get(modname, None): # Have already mapped this module, so skip it continue _filesbymodname[modname] = f f = getabsfile(module) # Always map to the name the module knows itself by modulesbyfile[f] = modulesbyfile[ os.path.realpath(f)] = module.__name__ if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) # Check the main module main = sys.modules['__main__'] if not hasattr(object, '__name__'): return None if hasattr(main, object.__name__): mainobject = getattr(main, object.__name__) if mainobject is object: return main # Check builtins builtin = sys.modules['builtins'] if hasattr(builtin, object.__name__): builtinobject = getattr(builtin, object.__name__) if builtinobject is object: return builtin
def importModuleCheck(mod, parent=None, added=False): """ If can't find module on first try, recursively check for relative imports """ current_path = os.path.dirname(pep8.current_file) try: with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) valid = True if parent: if is_import_exception(parent): return parent_mod = __import__(parent, globals(), locals(), [mod], -1) valid = inspect.ismodule(getattr(parent_mod, mod)) else: __import__(mod, globals(), locals(), [], -1) valid = inspect.ismodule(sys.modules[mod]) if not valid: if added: sys.path.pop() added = False return (logical_line.find(mod), ("ENERGY N304: No " "relative imports. '%s' is a relative import" % logical_line)) return (logical_line.find(mod), ("ENERGY N302: import only " "modules. '%s' does not import a module" % logical_line)) except (ImportError, NameError) as exc: if not added: added = True sys.path.append(current_path) return importModuleCheck(mod, parent, added) else: name = logical_line.split()[1] if name not in _missingImport: if VERBOSE_MISSING_IMPORT: print >> sys.stderr, ("ERROR: import '%s' failed: %s" % (name, exc)) _missingImport.add(name) added = False sys.path.pop() return except AttributeError: # Invalid import return logical_line.find(mod), ("ENERGY N303: Invalid import, " "AttributeError raised")
def loadApp(app_name): """ load app configuration file, do minimalistic validation return dict with app parameters raises error if can not load """ try: default_app_cfg_filename=os.path.join(curdir,"default.app.inp.py") app_cfg_filename=os.path.join(curdir,'appkernels',app_name+".app.inp.py") if not os.path.isfile(default_app_cfg_filename): akrrError(ERROR_GENERAL,"Default application kernel configuration file do not exists (%s)!"%default_app_cfg_filename) if not os.path.isfile(app_cfg_filename): akrrError(ERROR_GENERAL,"application kernel configuration file do not exists (%s)!"%app_cfg_filename) tmp={} execfile(default_app_cfg_filename,tmp) execfile(app_cfg_filename,tmp) app={} for key,val in tmp.iteritems(): if inspect.ismodule(val):continue if wrongfields.count(key)>0:continue app[key]=val #load resource specific parameters for resource_name in os.listdir(os.path.join(akrrcfgdir, "resources")): if resource_name not in ['notactive','templates']: resource_specific_app_cfg_filename=os.path.join(akrrcfgdir, "resources",resource_name,app_name+".app.inp.py") if os.path.isfile(resource_specific_app_cfg_filename): tmp=copy.deepcopy(app['appkernelOnResource']['default']) execfile(resource_specific_app_cfg_filename,tmp) app['appkernelOnResource'][resource_name]={} for key,val in tmp.iteritems(): if inspect.ismodule(val):continue if wrongfields.count(key)>0:continue app['appkernelOnResource'][resource_name][key]=val app['appkernelOnResource'][resource_name]['resource_specific_app_cfg_filename']=resource_specific_app_cfg_filename app['appkernelOnResource'][resource_name]['resource_specific_app_cfg_file_mtime']=os.path.getmtime(resource_specific_app_cfg_filename) #mapped options in app input file to those used in AKRR if 'name' not in app:app['name']=app_name if 'nickname' not in app:app['nickname']=app_name+".@nnodes@" #last modification time for future reloading app['default_app_cfg_filename']=default_app_cfg_filename app['app_cfg_filename']=app_cfg_filename app['default_app_cfg_file_mtime']=os.path.getmtime(default_app_cfg_filename) app['app_cfg_file_mtime']=os.path.getmtime(app_cfg_filename) #here should be validation verifyAppParams(app) return app except Exception,e: raise akrrError(ERROR_GENERAL,"Can not load app configuration for "+app_name+":\n"+str(e)+traceback.format_exc())
def import_module_check(mod, parent=None, added=False): """Checks for relative, modules and invalid imports. If can't find module on first try, recursively check for relative imports. When parsing 'from x import y,' x is the parent. """ current_path = os.path.dirname(pep8.current_file) try: with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) valid = True if parent: parent_mod = __import__(parent, globals(), locals(), [mod], -1) valid = inspect.ismodule(getattr(parent_mod, mod)) else: __import__(mod, globals(), locals(), [], -1) valid = inspect.ismodule(sys.modules[mod]) if not valid: if added: sys.path.pop() added = False return logical_line.find(mod), ("N304: No " "relative imports. '%s' is a relative import" % logical_line) return logical_line.find(mod), ("N302: import only " "modules. '%s' does not import a module" % logical_line) except (ImportError, NameError) as exc: if not added: added = True sys.path.append(current_path) return import_module_check(mod, parent, added) else: name = logical_line.split()[1] if name not in _missingImport: if VERBOSE_MISSING_IMPORT != 'False': print >> sys.stderr, ("ERROR: import '%s' in %s " "failed: %s" % (name, pep8.current_file, exc)) _missingImport.add(name) added = False sys.path.pop() return except AttributeError: # Invalid import if "import *" in logical_line: # TODO(jogo): handle "from x import *, by checking all # "objects in x" return return logical_line.find(mod), ("N303: Invalid import, " "%s" % mod)
def model_iterator(application, include_related=True, exclude=None): '''A generator of :class:`StdModel` classes found in *application*. :parameter application: A python dotted path or an iterable over python dotted-paths where models are defined. Only models defined in these paths are considered. For example:: from stdnet.odm import model_iterator APPS = ('stdnet.contrib.searchengine', 'stdnet.contrib.timeseries') for model in model_iterator(APPS): ... ''' if exclude is None: exclude = set() application = native_str(application) if ismodule(application) or isinstance(application, str): if ismodule(application): mod, application = application, application.__name__ else: try: mod = import_module(application) except ImportError: # the module is not there mod = None if mod: label = application.split('.')[-1] try: mod_models = import_module('.models', application) except ImportError: mod_models = mod label = getattr(mod_models, 'app_label', label) models = set() for name in dir(mod_models): value = getattr(mod_models, name) meta = getattr(value, '_meta', None) if isinstance(value, ModelType) and meta: for model in models_from_model( value, include_related=include_related, exclude=exclude): if (model._meta.app_label == label and model not in models): models.add(model) yield model else: for app in application: for m in model_iterator(app): yield m
def __init__( self, object, extra_attributes=[], extra_functions=[], extra_classes=[], extra_methods=[], extra_class_attributes=[], inherited=True, ): self.inherited = inherited # Check must be a module or a class if not inspect.ismodule(object) and not inspect.isclass(object): raise if inspect.ismodule(object): self.module = object else: self.module = inspect.getmodule(object) self.modname = self.module.__name__ # Get base lists self.attributes = self.get_members(object) self.functions = self.get_members(object, "function") self.classes = self.get_members(object, "class") # Sub content self.class_contents = {} for clsname, cls in self.classes: self.class_contents[clsname] = dict( methods=self.get_members(cls, "method"), # classmethods = self.get_members(object, 'classmethod'), # staticmethods = self.get_members(object, 'staticmethod'), attributes=self.get_members(cls), ) # Check extra args extra_names = "attributes", "functions", "classes", "methods", "class_attributes" for etype in extra_names: # Get etype = "extra_" + etype extras = eval(etype) # Store setattr(self, etype, extras) # Check prefix for i, extra in enumerate(extras): if not extra.startswith(self.modname + "."): extras[i] = self.modname + "." + extra if len(extra_methods + extra_attributes): # Check missing classes for objname in extra_methods + extra_attributes: clsname = objname.split(".")[1] if clsname not in extra_classes: extra_classes.append(clsname)
def info_obj( obj ): # builtin関数を除外したい p = eval(obj) #オブジェクトがモジュールの場合真を返します。 #if inspect.ismodule(obj) or inspect.isclass(obj) or inspect.ismethod(obj) or inspect.isfunction(obj): if inspect.ismodule(p) or inspect.isclass(p) or inspect.ismethod(p) or inspect.isfunction(p): if '__class__' in obj: pass else: help(p) else: print(obj + " : "+ p ) cnt = 0 for i in dir(p): #print(i) #オブジェクトがモジュールの場合真を返します。 if inspect.ismodule(p): try: modname = str(p).split(' ')[1] modname = modname.replace("'",'') s = modname+'.'+i info_obj(s) except: pass #オブジェクトが組み込みか Python が生成したクラスの場合に真を返します。 if inspect.isclass(p): classname = str(p).split(' ')[1] classname = classname.replace("'",'') classname = classname.replace(">",'') s = classname+'.'+i info_obj(s) #オブジェクトがメソッドの場合真を返します。 if inspect.ismethod(p): print(p) methodname = str(p) print('methodname') #オブジェクトが Python 関数(lambda 式で生成されたものを含む) の場合に真を返します。 if inspect.isfunction(p): print(p) functionname = str(p).split(' ')[1] s = functionname+'.'+i print(s) info_obj(s) #object が Python のジェネレータ関数の場合真を返します。 if inspect.isgeneratorfunction(p): print(p) generatorname = str(p) print('generatorname') cnt += 1
def generateApi(api): """Returns a stringified output for the given API set.""" import jasy.env.Task as Task result = [] for key in sorted(api): if key.startswith("__"): continue value = api[key] if isinstance(value, Task.Task): continue msg = Console.colorize(key, "bold") if inspect.isfunction(value): msg += Console.colorize(highlightArgs(value), "bold") elif inspect.isclass(value): msg += Console.colorize(highlightArgs(value.__init__, True), "bold") humanType = extractType(value) if humanType: msg += Console.colorize(" [%s]" % extractType(value), "magenta") msg += extractDoc(value) or "" result.append(msg) if inspect.isclass(value) or inspect.ismodule(value) or isinstance(value, object): if inspect.isclass(value): sprefix = "" elif inspect.ismodule(value) or isinstance(value, object): sprefix = "%s." % key smembers = dict(inspect.getmembers(value)) for skey in sorted(smembers): if not "__" in skey: svalue = smembers[skey] if inspect.ismethod(svalue) or inspect.isfunction(svalue): msg = " - %s%s" % (sprefix, Console.colorize(skey, "bold")) msg += highlightArgs(svalue, humanType in ("Class", "Object")) msg += extractDoc(svalue, indent=6) or "" result.append(msg) result.append("") return "\n".join(result)
def get_module_attributes(module): if not inspect.ismodule(module): return None temp = module.__dict__.copy() for todel in ["__builtins__", "__name__", "__file__", "__doc__", "__package__"]: del temp[todel] keystodel = [k for k, v in temp.viewitems() if inspect.isfunction(v) or inspect.ismodule(v) or inspect.isclass(v)] for k in keystodel: del temp[k] return temp
def get_classes(module): """Recursively get all classes in the module""" logger.debug('Called.') for class_name, class_object in getmembers( module, lambda o: isclass(o) or ismodule(o) ): logger.debug('name="{}" object="{}"'.format(class_name, class_object)) if ismodule(class_object) and class_name.startswith('asn_'): for sub_name, sub_class in get_classes(class_object): yield sub_name, sub_class elif isclass(class_object): yield class_name, class_object
def safe_patch(patchable, key, patch_func, service, meta, tracer): """ takes patch_func (signature: takes the orig_method that is wrapped in the monkey patch == UNBOUND + service and meta) and attach the patched result to patchable at patchable.key - if this is the module/class we can rely on methods being unbound, and just have to update the __dict__ - if this is an instance, we have to unbind the current and rebind our patched method - If patchable is an instance and if we've already patched at the module/class level then patchable[key] contains an already patched command! To workaround this, check if patchable or patchable.__class__ are _dogtraced If is isn't, nothing to worry about, patch the key as usual But if it is, search for a "__dd_orig_{key}" method on the class, which is the original unpatched method we wish to trace. """ def _get_original_method(thing, key): orig = None if hasattr(thing, '_dogtraced'): # Search for original method orig = getattr(thing, "__dd_orig_{}".format(key), None) else: orig = getattr(thing, key) # Set it for the next time we attempt to patch `thing` setattr(thing, "__dd_orig_{}".format(key), orig) return orig if inspect.isclass(patchable) or inspect.ismodule(patchable): orig = _get_original_method(patchable, key) if not orig: # Should never happen return elif hasattr(patchable, '__class__'): orig = _get_original_method(patchable.__class__, key) if not orig: # Should never happen return else: return dest = patch_func(orig, service, meta, tracer) if inspect.isclass(patchable) or inspect.ismodule(patchable): setattr(patchable, key, dest) elif hasattr(patchable, '__class__'): setattr(patchable, key, dest.__get__(patchable, patchable.__class__))
def describeModule(self,module): """ Describe the module object including its classes and functions """ mod_classes = [] mod_functions = [] mod_valuables = [] if not inspect.ismodule(module): print ("This is not a python module!") return for i in dir(module): if i.startswith('__'): continue obj = getattr(module,i) #this is already a modele, added this for filtering "Imports" if inspect.ismodule(obj): continue #Local class in Module elif inspect.isclass(obj): parentName = obj.__dict__['__module__'] if parentName == module.__name__: classObj = self.describe_class(obj) mod_classes.append(classObj) #Local function in Module elif inspect.isfunction(obj): if obj.__module__ == module.__name__: #print dir(obj) #print obj.__module__ #print obj #print (i); mod_functions.append(self.describe_func(obj, isMethod=False, isOverrideMethod = False)) #Local valuable in Module else: mod_valuables.append(self.creatValuable(i, obj)) moduleDict = {"Name":module.__name__ ,"Classes":mod_classes, "Functions":mod_functions, "Valuables":mod_valuables}; #print module #return json.dumps(moduleDict); return moduleDict; #import honeybee #from honeybee.radiance.command.raTiff import RaTiff #module = RaTiff #obj = PyModuleDescriber().describeAll(honeybee) #a = obj
def get_objects_by_type(objects_dict): motors = dict() counters = dict() actuator = dict() shutter = dict() # if beacon_static: # cfg = beacon_static.get_config() # else: # cfg = None for name, obj in objects_dict.iteritems(): if inspect.isclass(obj): continue # is it a motor? if has_method(obj, all, "move", "state", "position"): motors[name] = obj # is it a counter? if isinstance(obj, measurement.CounterBase): counters[name] = obj else: if not inspect.ismodule(obj): try: obj_dict = obj.__dict__ except AttributeError: pass else: for member_name, member in obj_dict.iteritems(): if isinstance(member, measurement.CounterBase): counters["%s.%s" % (name, member_name)] = member # has it in/out capability? if has_method(obj, all, "state") and has_method(obj, any, "set_in") and has_method(obj, any, "set_out"): actuator[name] = obj if not inspect.ismodule(obj): for member_name, member in inspect.getmembers(obj): if isinstance(getattr(obj.__class__, member_name, None), property): if ( has_method(member, all, "state") and has_method(member, any, "set_in") and has_method(member, any, "set_out") ): actuator["%s.%s" % (name, member_name)] = member # has it open/close capability? if has_method(obj, all, "open", "close", "state"): shutter[name] = obj return {"motors": motors, "counters": counters, "actuator": actuator, "shutter": shutter}
def getmodule_2_5(object, _filename=None): """Return the module an object was defined in, or None if not found.""" global modulesbyfile global _filesbymodname if inspect.ismodule(object): return object if hasattr(object, '__module__'): return sys.modules.get(object.__module__) # Try the filename to modulename cache if _filename is not None and _filename in modulesbyfile: return sys.modules.get(modulesbyfile[_filename]) # Try the cache again with the absolute file name try: file = inspect.getabsfile(object) except TypeError: return None if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) # Update the filename to module name cache and check yet again # Copy sys.modules in order to cope with changes while iterating for modname, module in sys.modules.items(): if inspect.ismodule(module) and hasattr(module, '__file__'): f = module.__file__ if f == _filesbymodname.get(modname, None): # Have already mapped this module, so skip it continue _filesbymodname[modname] = f f = inspect.getabsfile(module) # Always map to the name the module knows itself by modulesbyfile[f] = modulesbyfile[ os.path.realpath(f)] = module.__name__ if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) # Check the main module main = sys.modules['__main__'] if not hasattr(object, '__name__'): return None if hasattr(main, object.__name__): mainobject = getattr(main, object.__name__) if mainobject is object: return main # Check builtins builtin = sys.modules['__builtin__'] if hasattr(builtin, object.__name__): builtinobject = getattr(builtin, object.__name__) if builtinobject is object: return builtin
def parse_cmd(module, cmd, args): """Parse arguments `args` to command `cmd` from module `module`.""" if isinstance(module, str): module = importlib.import_module(module) assert inspect.ismodule(module) parser_fn = dict(getattr(module, '__commands__'))[cmd] return parser_fn(argparse.ArgumentParser()).parse_args(list(map(str, args)))
def introspect_module(module, module_doc, module_name=None, preliminary=False): """ Add API documentation information about the module C{module} to C{module_doc}. """ module_doc.specialize_to(ModuleDoc) # Record the module's docformat if hasattr(module, '__docformat__'): module_doc.docformat = unicode(module.__docformat__) # Record the module's filename if hasattr(module, '__file__'): try: module_doc.filename = unicode(module.__file__) except KeyboardInterrupt: raise except: pass if module_doc.filename is not UNKNOWN: try: module_doc.filename = py_src_filename(module_doc.filename) except ValueError: pass # If this is just a preliminary introspection, then don't do # anything else. (Typically this is true if this module was # imported, but is not included in the set of modules we're # documenting.) module_doc.variables = {} if preliminary: return # Record the module's docstring if hasattr(module, '__doc__'): module_doc.docstring = get_docstring(module) # If the module has a __path__, then it's (probably) a # package; so set is_package=True and record its __path__. if hasattr(module, '__path__'): module_doc.is_package = True try: module_doc.path = [unicode(p) for p in module.__path__] except KeyboardInterrupt: raise except: pass else: module_doc.is_package = False # Make sure we have a name for the package. dotted_name = module_doc.canonical_name if dotted_name is UNKNOWN: dotted_name = DottedName(module.__name__) name_without_primes = DottedName(str(dotted_name).replace("'", "")) # Record the module's parent package, if it has one. if len(dotted_name) > 1: package_name = str(dotted_name.container()) package = sys.modules.get(package_name) if package is not None: module_doc.package = introspect_docs(package) else: module_doc.package = None # Initialize the submodules property module_doc.submodules = [] # Add the module to its parent package's submodules list. if module_doc.package not in (None, UNKNOWN): module_doc.package.submodules.append(module_doc) # Look up the module's __all__ attribute (public names). public_names = None if hasattr(module, '__all__'): try: public_names = set([str(name) for name in module.__all__]) except KeyboardInterrupt: raise except: pass # Record the module's variables. module_doc.variables = {} for child_name in dir(module): if child_name in UNDOCUMENTED_MODULE_VARS: continue child = getattr(module, child_name) # Create a VariableDoc for the child, and introspect its # value if it's defined in this module. container = get_containing_module(child) if (((container is not None and container == name_without_primes) or (public_names is not None and child_name in public_names)) and not inspect.ismodule(child)): # Local variable. child_val_doc = introspect_docs(child, context=module_doc, module_name=dotted_name) child_var_doc = VariableDoc(name=child_name, value=child_val_doc, is_imported=False, container=module_doc, docs_extracted_by='introspecter') elif ((container is None or module_doc.canonical_name is UNKNOWN) and not inspect.ismodule(child)): # Don't introspect stuff "from __future__" if is_future_feature(child): continue # Possibly imported variable. child_val_doc = introspect_docs(child, context=module_doc) child_var_doc = VariableDoc(name=child_name, value=child_val_doc, container=module_doc, docs_extracted_by='introspecter') else: # Imported variable. child_val_doc = _get_valuedoc(child) child_var_doc = VariableDoc(name=child_name, value=child_val_doc, is_imported=True, container=module_doc, docs_extracted_by='introspecter') # If the module's __all__ attribute is set, use it to set the # variables public/private status and imported status. if public_names is not None: if child_name in public_names: child_var_doc.is_public = True if not isinstance(child_var_doc, ModuleDoc): child_var_doc.is_imported = False else: child_var_doc.is_public = False module_doc.variables[child_name] = child_var_doc return module_doc
def testLinearOptimizer(self): # pylint: disable=g-import-not-at-top import tensorflow as tf assert inspect.ismodule(tf.contrib.linear_optimizer)
def testContrib(self): # pylint: disable=g-import-not-at-top import tensorflow as tf _ = tf.contrib.layers # `tf.contrib` is loaded lazily on first use. assert inspect.ismodule(tf.contrib)
def is_importable(module, attr, only_modules): if only_modules: return inspect.ismodule(getattr(module, attr)) else: return not (attr[:2] == '__' and attr[-2:] == '__')
print(p.spouse) delattr(p, 'spouse') # delattr(p, 'party') print(p.__dict__) print(dir(p)) print("ATTRIBUTES:") for attr in dir(p): if not attr.startswith('__'): obj = getattr(p, attr) print(attr, str(obj)[:30], type(obj), ins.isfunction(obj), ins.ismodule(obj)) print() print(dir(__builtins__)) # def print(*args): # weird and confusing # pass # # __builtins__.print("Hello") def mfn(self): return f"{self.first_name} {self.last_name}" setattr(President, 'get_fullname', mfn)
def getfile(object): if inspect.ismodule(object) and getattr(object, '__orig_file__', None): res = object.__orig_file__ else: res = orig_getfile(object) return res
def object_description(object): """ Produce a human-consumable text description of ``object``, usually involving a Python dotted name. For example: .. code-block:: python >>> object_description(None) u'None' >>> from xml.dom import minidom >>> object_description(minidom) u'module xml.dom.minidom' >>> object_description(minidom.Attr) u'class xml.dom.minidom.Attr' >>> object_description(minidom.Attr.appendChild) u'method appendChild of class xml.dom.minidom.Attr' >>> If this method cannot identify the type of the object, a generic description ala ``object <object.__name__>`` will be returned. If the object passed is already a string, it is simply returned. If it is a boolean, an integer, a list, a tuple, a set, or ``None``, a (possibly shortened) string representation is returned. """ if isinstance(object, string_types): return text_(object) if isinstance(object, integer_types): return text_(str(object)) if isinstance(object, (bool, float, type(None))): return text_(str(object)) if isinstance(object, set): if PY3: # pragma: no cover return shortrepr(object, '}') else: return shortrepr(object, ')') if isinstance(object, tuple): return shortrepr(object, ')') if isinstance(object, list): return shortrepr(object, ']') if isinstance(object, dict): return shortrepr(object, '}') module = inspect.getmodule(object) if module is None: return text_('object %s' % str(object)) modulename = module.__name__ if inspect.ismodule(object): return text_('module %s' % modulename) if inspect.ismethod(object): oself = getattr(object, '__self__', None) if oself is None: # pragma: no cover oself = getattr(object, 'im_self', None) return text_('method %s of class %s.%s' % (object.__name__, modulename, oself.__class__.__name__)) if inspect.isclass(object): dottedname = '%s.%s' % (modulename, object.__name__) return text_('class %s' % dottedname) if inspect.isfunction(object): dottedname = '%s.%s' % (modulename, object.__name__) return text_('function %s' % dottedname) return text_('object %s' % str(object))
def write_docs( *, output_dir: Union[str, pathlib.Path], parser_config: parser.ParserConfig, yaml_toc: bool, root_module_name: str, root_title: str = 'TensorFlow', search_hints: bool = True, site_path: str = 'api_docs/python', gen_redirects: bool = True, gen_report: bool = False, extra_docs: Optional[Dict[int, str]] = None, ): """Write previously extracted docs to disk. Write a docs page for each symbol included in the indices of parser_config to a tree of docs at `output_dir`. Symbols with multiple aliases will have only one page written about them, which is referenced for all aliases. Args: output_dir: Directory to write documentation markdown files to. Will be created if it doesn't exist. parser_config: A `parser.ParserConfig` object, containing all the necessary indices. yaml_toc: Set to `True` to generate a "_toc.yaml" file. root_module_name: (str) the name of the root module (`tf` for tensorflow). root_title: The title name for the root level index.md. search_hints: (bool) include meta-data search hints at the top of each output file. site_path: The output path relative to the site root. Used in the `_toc.yaml` and `_redirects.yaml` files. gen_redirects: Bool which decides whether to generate _redirects.yaml file or not. gen_report: If True, a report for the library is generated by linting the docstrings of its public API symbols. extra_docs: Extra docs for symbols like public constants(list, tuple, etc) that need to be added to the markdown pages created. Raises: ValueError: if `output_dir` is not an absolute path """ output_dir = pathlib.Path(output_dir) site_path = pathlib.Path('/', site_path) # Make output_dir. if not output_dir.is_absolute(): raise ValueError("'output_dir' must be an absolute path.\n" f" output_dir='{output_dir}'") output_dir.mkdir(parents=True, exist_ok=True) # These dictionaries are used for table-of-contents generation below # They will contain, after the for-loop below:: # - module name(string):classes and functions the module contains(list) module_children = {} # Collect redirects for an api _redirects.yaml file. redirects = [] if gen_report: api_report_obj = utils.ApiReport() # Parse and write Markdown pages, resolving cross-links (`tf.symbol`). for full_name in sorted(parser_config.index.keys(), key=lambda k: k.lower()): py_object = parser_config.index[full_name] if full_name in parser_config.duplicate_of: continue # Methods constants are only documented only as part of their parent's page. if parser_config.reference_resolver.is_fragment(full_name): continue # Remove the extension from the path. docpath, _ = os.path.splitext(parser.documentation_path(full_name)) # For a module, remember the module for the table-of-contents if inspect.ismodule(py_object): if full_name in parser_config.tree: mod_obj = Module( module=full_name, py_object=py_object, path=str(site_path / docpath)) module_children[full_name] = mod_obj # For something else that's documented, # figure out what module it lives in else: subname = str(full_name) while True: subname = subname[:subname.rindex('.')] if inspect.ismodule(parser_config.index[subname]): module_name = parser_config.duplicate_of.get(subname, subname) child_mod = ModuleChild( name=full_name, py_object=py_object, parent=module_name, path=str(site_path / docpath)) module_children[module_name].add_children(child_mod) break # Generate docs for `py_object`, resolving references. try: page_info = parser.docs_for_object(full_name, py_object, parser_config, extra_docs) except: raise ValueError(f'Failed to generate docs for symbol: `{full_name}`') if gen_report and not full_name.startswith( ('tf.compat.v', 'tf.keras.backend', 'tf.numpy', 'tf.experimental.numpy')): api_report_obj.fill_metrics(page_info) continue path = output_dir / parser.documentation_path(full_name) content = _get_headers(page_info, search_hints) content.append(pretty_docs.build_md_page(page_info)) text = '\n'.join(content) try: path.parent.mkdir(exist_ok=True, parents=True) path.write_text(text, encoding='utf-8') except OSError: raise OSError('Cannot write documentation for ' f'{full_name} to {path.parent}') duplicates = parser_config.duplicates.get(full_name, []) if not duplicates: continue duplicates = [item for item in duplicates if item != full_name] if gen_redirects: for dup in duplicates: from_path = site_path / dup.replace('.', '/') to_path = site_path / full_name.replace('.', '/') redirects.append({'from': str(from_path), 'to': str(to_path)}) if gen_report: serialized_proto = api_report_obj.api_report.SerializeToString() raw_proto = output_dir / 'api_report.pb' raw_proto.write_bytes(serialized_proto) return if yaml_toc: toc_gen = GenerateToc(module_children) toc_dict = toc_gen.generate() # Replace the overview path *only* for 'TensorFlow' to # `/api_docs/python/tf_overview`. This will be redirected to # `/api_docs/python/tf`. toc_values = toc_dict['toc'][0] if toc_values['title'] == 'tf': section = toc_values['section'][0] section['path'] = str(site_path / 'tf_overview') leftnav_toc = output_dir / root_module_name / '_toc.yaml' with open(leftnav_toc, 'w') as toc_file: yaml.dump(toc_dict, toc_file, default_flow_style=False) if redirects and gen_redirects: if yaml_toc and toc_values['title'] == 'tf': redirects.append({ 'from': str(site_path / 'tf_overview'), 'to': str(site_path / 'tf'), }) redirects_dict = { 'redirects': sorted(redirects, key=lambda redirect: redirect['from']) } api_redirects_path = output_dir / root_module_name / '_redirects.yaml' with open(api_redirects_path, 'w') as redirect_file: yaml.dump(redirects_dict, redirect_file, default_flow_style=False) # Write a global index containing all full names with links. with open(output_dir / root_module_name / 'all_symbols.md', 'w') as f: global_index = parser.generate_global_index( root_title, parser_config.index, parser_config.reference_resolver) if not search_hints: global_index = 'robots: noindex\n' + global_index f.write(global_index)
def not_module_or_function(x): return not (inspect.ismodule(x) or inspect.isfunction(x))
def _render(self) -> Iterable[RenderableType]: """Render object.""" def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]: key, (_error, value) = item return (callable(value), key.strip("_").lower()) def safe_getattr(attr_name: str) -> Tuple[Any, Any]: """Get attribute or any exception.""" try: return (None, getattr(obj, attr_name)) except Exception as error: return (error, None) obj = self.obj keys = dir(obj) total_items = len(keys) if not self.dunder: keys = [key for key in keys if not key.startswith("__")] if not self.private: keys = [key for key in keys if not key.startswith("_")] not_shown_count = total_items - len(keys) items = [(key, safe_getattr(key)) for key in keys] if self.sort: items.sort(key=sort_items) items_table = Table.grid(padding=(0, 1), expand=False) items_table.add_column(justify="right") add_row = items_table.add_row highlighter = self.highlighter if callable(obj): signature = self._get_signature("", obj) if signature is not None: yield signature yield "" if self.docs: _doc = getdoc(obj) if _doc is not None: if not self.help: _doc = _first_paragraph(_doc) doc_text = Text(_reformat_doc(_doc), style="inspect.help") doc_text = highlighter(doc_text) yield doc_text yield "" if self.value and not (isclass(obj) or callable(obj) or ismodule(obj)): yield Panel( Pretty(obj, indent_guides=True, max_length=10, max_string=60), border_style="inspect.value.border", ) yield "" for key, (error, value) in items: key_text = Text.assemble( ( key, "inspect.attr.dunder" if key.startswith("__") else "inspect.attr", ), (" =", "inspect.equals"), ) if error is not None: warning = key_text.copy() warning.stylize("inspect.error") add_row(warning, highlighter(repr(error))) continue if callable(value): if not self.methods: continue _signature_text = self._get_signature(key, value) if _signature_text is None: add_row(key_text, Pretty(value, highlighter=highlighter)) else: if self.docs: docs = getdoc(value) if docs is not None: _doc = _reformat_doc(str(docs)) if not self.help: _doc = _first_paragraph(_doc) _signature_text.append("\n" if "\n" in _doc else " ") doc = highlighter(_doc) doc.stylize("inspect.doc") _signature_text.append(doc) add_row(key_text, _signature_text) else: add_row(key_text, Pretty(value, highlighter=highlighter)) if items_table.row_count: yield items_table elif not_shown_count: yield Text.from_markup( f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] " f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options." )
def find_api_page(obj, version=None, openinbrowser=True, timeout=None): """ Determines the URL of the API page for the specified object, and optionally open that page in a web browser. .. note:: You must be connected to the internet for this to function even if ``openinbrowser`` is `False`, unless you provide a local version of the documentation to ``version`` (e.g., ``file:///path/to/docs``). Parameters ---------- obj The object to open the docs for or its fully-qualified name (as a str). version : str The doc version - either a version number like '0.1', 'dev' for the development/latest docs, or a URL to point to a specific location that should be the *base* of the documentation. Defaults to latest if you are on aren't on a release, otherwise, the version you are on. openinbrowser : bool If `True`, the `webbrowser` package will be used to open the doc page in a new web browser window. timeout : number, optional The number of seconds to wait before timing-out the query to the astropy documentation. If not given, the default python stdlib timeout will be used. Returns ------- url : str The loaded URL Raises ------ ValueError If the documentation can't be found """ import webbrowser from zlib import decompress from astropy.utils.data import get_readable_fileobj if (not isinstance(obj, str) and hasattr(obj, '__module__') and hasattr(obj, '__name__')): obj = obj.__module__ + '.' + obj.__name__ elif inspect.ismodule(obj): obj = obj.__name__ if version is None: from astropy import version if version.release: version = 'v' + version.version else: version = 'dev' if '://' in version: if version.endswith('index.html'): baseurl = version[:-10] elif version.endswith('/'): baseurl = version else: baseurl = version + '/' elif version == 'dev' or version == 'latest': baseurl = 'http://devdocs.astropy.org/' else: baseurl = f'https://docs.astropy.org/en/{version}/' # Custom request headers; see # https://github.com/astropy/astropy/issues/8990 url = baseurl + 'objects.inv' headers = {'User-Agent': f'Astropy/{version}'} with get_readable_fileobj(url, encoding='binary', remote_timeout=timeout, http_headers=headers) as uf: oiread = uf.read() # need to first read/remove the first four lines, which have info before # the compressed section with the actual object inventory idx = -1 headerlines = [] for _ in range(4): oldidx = idx idx = oiread.index(b'\n', oldidx + 1) headerlines.append(oiread[(oldidx + 1):idx].decode('utf-8')) # intersphinx version line, project name, and project version ivers, proj, vers, compr = headerlines if 'The remainder of this file is compressed using zlib' not in compr: raise ValueError('The file downloaded from {} does not seem to be' 'the usual Sphinx objects.inv format. Maybe it ' 'has changed?'.format(baseurl + 'objects.inv')) compressed = oiread[(idx + 1):] decompressed = decompress(compressed).decode('utf-8') resurl = None for l in decompressed.strip().splitlines(): ls = l.split() name = ls[0] loc = ls[3] if loc.endswith('$'): loc = loc[:-1] + name if name == obj: resurl = baseurl + loc break if resurl is None: raise ValueError(f'Could not find the docs for the object {obj}') elif openinbrowser: webbrowser.open(resurl) return resurl
def _verify_type(self, imported): if inspect.isclass(imported) or inspect.ismodule(imported): return imported raise DataError('Expected class or module, got %s.' % type_name(imported))
def _log_import_succeeded(self, item, name, source): import_type = '%s ' % self._type if self._type else '' item_type = 'module' if inspect.ismodule(item) else 'class' location = ("'%s'" % source) if source else 'unknown location' self._logger.info("Imported %s%s '%s' from %s." % (import_type, item_type, name, location))
def _lookfor_generate_cache(module, import_modules, regenerate): """ Generate docstring cache for given module. Parameters ---------- module : str, None, module Module for which to generate docstring cache import_modules : bool Whether to import sub-modules in packages. regenerate : bool Re-generate the docstring cache Returns ------- cache : dict {obj_full_name: (docstring, kind, index), ...} Docstring cache for the module, either cached one (regenerate=False) or newly generated. """ # Local import to speed up numpy's import time. import inspect from io import StringIO if module is None: module = "numpy" if isinstance(module, str): try: __import__(module) except ImportError: return {} module = sys.modules[module] elif isinstance(module, list) or isinstance(module, tuple): cache = {} for mod in module: cache.update(_lookfor_generate_cache(mod, import_modules, regenerate)) return cache if id(module) in _lookfor_caches and not regenerate: return _lookfor_caches[id(module)] # walk items and collect docstrings cache = {} _lookfor_caches[id(module)] = cache seen = {} index = 0 stack = [(module.__name__, module)] while stack: name, item = stack.pop(0) if id(item) in seen: continue seen[id(item)] = True index += 1 kind = "object" if inspect.ismodule(item): kind = "module" try: _all = item.__all__ except AttributeError: _all = None # import sub-packages if import_modules and hasattr(item, '__path__'): for pth in item.__path__: for mod_path in os.listdir(pth): this_py = os.path.join(pth, mod_path) init_py = os.path.join(pth, mod_path, '__init__.py') if (os.path.isfile(this_py) and mod_path.endswith('.py')): to_import = mod_path[:-3] elif os.path.isfile(init_py): to_import = mod_path else: continue if to_import == '__init__': continue try: old_stdout = sys.stdout old_stderr = sys.stderr try: sys.stdout = StringIO() sys.stderr = StringIO() __import__("%s.%s" % (name, to_import)) finally: sys.stdout = old_stdout sys.stderr = old_stderr # Catch SystemExit, too except (Exception, SystemExit): continue for n, v in _getmembers(item): try: item_name = getattr(v, '__name__', "%s.%s" % (name, n)) mod_name = getattr(v, '__module__', None) except NameError: # ref. SWIG's global cvars # NameError: Unknown C global variable item_name = "%s.%s" % (name, n) mod_name = None if '.' not in item_name and mod_name: item_name = "%s.%s" % (mod_name, item_name) if not item_name.startswith(name + '.'): # don't crawl "foreign" objects if isinstance(v, ufunc): # ... unless they are ufuncs pass else: continue elif not (inspect.ismodule(v) or _all is None or n in _all): continue stack.append(("%s.%s" % (name, n), v)) elif inspect.isclass(item): kind = "class" for n, v in _getmembers(item): stack.append(("%s.%s" % (name, n), v)) elif hasattr(item, "__call__"): kind = "func" try: doc = inspect.getdoc(item) except NameError: # ref SWIG's NameError: Unknown C global variable doc = None if doc is not None: cache[name] = (doc, kind, index) return cache
def setitem(self, key, value): if inspect.ismodule(value): if not self.prevent_module_import: self.app.modules[key] = value else: self.app.objects[key] = value
def findsource(object): """Return the entire source file and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of all the lines in the file and the line number indexes a line in that list. An IOError is raised if the source code cannot be retrieved. FIXED version with which we monkeypatch the stdlib to work around a bug.""" file = getsourcefile(object) or getfile(object) # If the object is a frame, then trying to get the globals dict from its # module won't work. Instead, the frame object itself has the globals # dictionary. globals_dict = None if inspect.isframe(object): # XXX: can this ever be false? globals_dict = object.f_globals else: module = getmodule(object, file) if module: globals_dict = module.__dict__ lines = linecache.getlines(file, globals_dict) if not lines: raise IOError('could not get source code') if ismodule(object): return lines, 0 if isclass(object): name = object.__name__ pat = re.compile(r'^(\s*)class\s*' + name + r'\b') # make some effort to find the best matching class definition: # use the one with the least indentation, which is the one # that's most probably not inside a function definition. candidates = [] for i in range(len(lines)): match = pat.match(lines[i]) if match: # if it's at toplevel, it's already the best one if lines[i][0] == 'c': return lines, i # else add whitespace to candidate list candidates.append((match.group(1), i)) if candidates: # this will sort by whitespace, and by line number, # less whitespace first candidates.sort() return lines, candidates[0][1] else: raise IOError('could not find class definition') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): if not hasattr(object, 'co_firstlineno'): raise IOError('could not find function definition') pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') pmatch = pat.match # fperez - fix: sometimes, co_firstlineno can give a number larger than # the length of lines, which causes an error. Safeguard against that. lnum = min(object.co_firstlineno,len(lines))-1 while lnum > 0: if pmatch(lines[lnum]): break lnum -= 1 return lines, lnum raise IOError('could not find code object')
def doc(mod, outdir='.', index=True, back=None, script=None): """This is the main function of odpydoc. It documents a module/package by importing it and inspecting the objects it contains, creating HTML strings for them and spitting out a single HTML file. Subpackages in the target package's __all__ variable are recursively documented. args: mod - str, module name or module object optional args: outdir - directory to write output html files into index - bool, whether to name the top module index.html back - link for going back/up a module script - path to file containing javascript script to include in the head of the resulting html file. The scripts must have script tags around them in the file.""" #import the module if a string is passed in if (type(mod) is str): try: mod = importlib.import_module(mod) mod = importlib.reload(mod) print('imported') except (ImportError, SyntaxError) as e: print(e) raise ImportError('Cannot import "%s"' % mod) else: importlib.reload(mod) #store the module name mod_name = mod.__name__ #get the output file name if index: fnout = join(outdir, 'index.html') else: fnout = join(outdir, mod_name + '.html') #get the module's dictionary/namespace/whatever try: v = vars(mod) except (TypeError): return (None) #check __all__ if ('__all__' in v): #store a list of submodules submods_in_all = [k for k in v['__all__'] if (type(v[k]) is type(mod))] #recursively document submodules for submod_name in submods_in_all: if type(v[submod_name]) is type(mod): doc(v[submod_name], outdir=outdir, index=False, back=basename(fnout)) #remove variables that are not public v = {k: v[k] for k in v if (k in v['__all__'])} else: submods_in_all = [] #remove private keys v = {k: v[k] for k in v if (k[0] != '_')} #separate the objects in v based on their types submodules, functions, classes, others = dict(), dict(), dict(), dict() for k in v: t = type(v[k]) if (inspect.ismodule(v[k])): if (k in submods_in_all): submodules[k] = v[k] elif (inspect.isfunction(v[k])): functions[k] = v[k] elif (inspect.isclass(v[k])): classes[k] = v[k] else: others[k] = v[k] #get the module docstring, if any mod_docstring = _get_docstr(mod) if (mod_docstring): mod_docstring = ( '<div class="section">%s</div>' % mod_docstring.replace('class="docstr"', 'id="mod-docstr"')) #get script, if any _script = '' if script is not None: with open(script, 'r') as ifile: _script = ifile.read() html = (""" <html lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>%s</title> <link href="https://fonts.googleapis.com/css?family=Anonymous+Pro|Open+Sans" rel="stylesheet"> <style>%s</style> %s <!--Control the width and height of the #main element--> <script type="text/javascript">%s</script> </head> <body onresize="arrange()" onload="arrange()"> <!--Back to Top Button--> <a id="go-to-top" href="#">go to top</a> <!--Up Module Button--> %s <!--Navigation Sidebar--> %s <div id="main"> <h1>%s</h1> <!--module docstring section--> %s <!--functions section--> %s <!--classes section--> %s <!--others section--> %s </div> </body> </html> """ % (mod_name, _css, _script, _js, _back_html(back), _nav_html(mod_name, submodules, functions, classes, others), mod_name, mod_docstring, _functions_to_html(functions), _classes_to_html(classes), _others_to_html(others))) #delete the module del (mod) #write the html with open(fnout, 'w') as ofile: ofile.write(html) print('documentation written to: %s' % fnout)
path = path.split("in: ")[-1] elif path == "Command": cmds.warning('%s : is a Command not a script' % value) return False except StandardError, error: log.info(error) # Inspect for Python if not path or not os.path.exists(path): log.info( 'This is not a known Mel command, inspecting Python libs for : %s' % value) try: log.debug('value : %s' % value) log.debug('value isString : ', isinstance(value, str)) log.debug('value callable: ', callable(value)) log.debug('value is module : ', inspect.ismodule(value)) log.debug('value is method : ', inspect.ismethod(value)) if isinstance(value, str): # if not callable(value): value = eval(value) path = inspect.getsourcefile(value) if path: # sourceType='python' log.info('path : %s' % path) except StandardError, error: log.exception(error) # Open the file with the default editor # FIXME: If Python and you're a dev then the .py file may be set to open in the default # Python runtime/editor and won't open as expected. Need to look at this. if path and os.path.exists(path):
def __init__(self, docstring, config=None, app=None, what='', name='', obj=None, options=None): # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA self._config = config self._app = app if not self._config: from bonaparte import Config self._config = self._app and self._app.config or Config( ) # type: ignore if not what: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif isinstance(obj, collections.Callable): # type: ignore what = 'function' else: what = 'object' self._what = what self._name = name self._obj = obj self._opt = options if isinstance(docstring, string_types): docstring = docstring.splitlines() self._lines = docstring self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip()) self._parsed_lines = [] # type: List[unicode] self._is_in_section = False self._section_indent = 0 if not hasattr(self, '_directive_sections'): self._directive_sections = [] # type: List[unicode] if not hasattr(self, '_sections'): self._sections = { 'args': self._parse_parameters_section, 'arguments': self._parse_parameters_section, 'attention': partial(self._parse_admonition, 'attention'), 'attributes': self._parse_attributes_section, 'caution': partial(self._parse_admonition, 'caution'), 'danger': partial(self._parse_admonition, 'danger'), 'error': partial(self._parse_admonition, 'error'), 'example': self._parse_examples_section, 'examples': self._parse_examples_section, 'hint': partial(self._parse_admonition, 'hint'), 'important': partial(self._parse_admonition, 'important'), 'keyword args': self._parse_keyword_arguments_section, 'keyword arguments': self._parse_keyword_arguments_section, 'methods': self._parse_methods_section, 'note': partial(self._parse_admonition, 'note'), 'notes': self._parse_notes_section, 'other parameters': self._parse_other_parameters_section, 'parameters': self._parse_parameters_section, 'return': self._parse_returns_section, 'returns': self._parse_returns_section, 'raises': self._parse_raises_section, 'references': self._parse_references_section, 'see also': self._parse_see_also_section, 'tip': partial(self._parse_admonition, 'tip'), 'todo': partial(self._parse_admonition, 'todo'), 'warning': partial(self._parse_admonition, 'warning'), 'warnings': partial(self._parse_admonition, 'warning'), 'warns': self._parse_warns_section, 'yield': self._parse_yields_section, 'yields': self._parse_yields_section, } # type: Dict[unicode, Callable] self._load_custom_sections() self._parse()
reserved = dict(rez_version=__version__, ModifyList=ModifyList) globs = reserved.copy() result = {} with open(filepath) as f: try: code = compile(f.read(), filepath, 'exec') exec_(code, _globs_=globs) except Exception, e: raise ConfigurationError( "Error loading configuration from %s: %s" % (filepath, str(e))) for k, v in globs.iteritems(): if k != '__builtins__' \ and not ismodule(v) \ and k not in reserved: result[k] = v return result @lru_cache() def _load_config_yaml(filepath): with open(filepath) as f: content = f.read() try: doc = yaml.load(content) or {} except YAMLError as e: raise ConfigurationError("Error loading configuration from %s: %s" % (filepath, str(e)))
def ismodule(obj): """TFDecorator-aware replacement for inspect.ismodule.""" return _inspect.ismodule(tf.__internal__.decorator.unwrap(obj)[1])
def generate_markdown(full_name, py_object, duplicate_of, duplicates, index, tree, reverse_index, base_dir): """Generate Markdown docs for a given object that's part of the TF API. This function uses _md_docstring to obtain the docs pertaining to `object`. This function resolves '@symbol' references in the docstrings into links to the appropriate location. It also adds a list of alternative names for the symbol automatically. It assumes that the docs for each object live in a file given by `documentation_path`, and that relative links to files within the documentation are resolvable. The output is Markdown that can be written to file and published. Args: full_name: The fully qualified name of the symbol to be documented. py_object: The Python object to be documented. Its documentation is sourced from `py_object`'s docstring. duplicate_of: A `dict` mapping fully qualified names to "master" names. This is used to resolve "@{symbol}" references to the "master" name. duplicates: A `dict` mapping fully qualified names to a set of all aliases of this name. This is used to automatically generate a list of all aliases for each name. index: A `dict` mapping fully qualified names to the corresponding Python objects. Used to produce docs for child objects, and to check the validity of "@{symbol}" references. tree: A `dict` mapping a fully qualified name to the names of all its members. Used to populate the members section of a class or module page. reverse_index: A `dict` mapping objects in the index to full names. base_dir: A base path that is stripped from file locations written to the docs. Returns: A string containing the Markdown docs for `py_object`. Raises: RuntimeError: If an object is encountered for which we don't know how to make docs. """ # Which other aliases exist for the object referenced by full_name? master_name = duplicate_of.get(full_name, full_name) duplicate_names = duplicates.get(master_name, [full_name]) # TODO(wicke): Once other pieces are ready, enable this also for partials. if (inspect.ismethod(py_object) or inspect.isfunction(py_object) or # Some methods in classes from extensions come in as routines. inspect.isroutine(py_object)): markdown = _generate_markdown_for_function(master_name, duplicate_names, py_object, duplicate_of, reverse_index) elif inspect.isclass(py_object): markdown = _generate_markdown_for_class(master_name, duplicate_names, py_object, duplicate_of, index, tree, reverse_index) elif inspect.ismodule(py_object): markdown = _generate_markdown_for_module(master_name, duplicate_names, py_object, duplicate_of, index, tree) else: raise RuntimeError('Cannot make docs for object %s: %r' % (full_name, py_object)) # Every page gets a note on the bottom about where this object is defined # TODO(wicke): If py_object is decorated, get the decorated object instead. # TODO(wicke): Only use decorators that support this in TF. try: path = os.path.relpath(inspect.getfile(py_object), base_dir) # TODO(wicke): If this is a generated file, point to the source instead. # Never include links outside this code base. if not path.startswith('..'): markdown += '\n\nDefined in [`%s`](%s%s).\n\n' % ( path, _CODE_URL_PREFIX, path) except TypeError: # getfile throws TypeError if py_object is a builtin. markdown += '\n\nThis is an alias for a Python built-in.' return markdown
y_label, title, log_x=False): """ Given a list of data, and corresponding meta info, plot a line graph using it. Args: x_axis: list The list contains the values of y axis. y_axis: list The list contains the values of x axis. log_x: Boolean If True, x_axis will be taken log. """ series = Series(data=y_axis, index=x_axis) # Plot sns.set_context("talk") fig = plt.figure() axe = fig.add_subplot(111) axe.set_xlabel(x_label) axe.set_ylabel(y_label) axe.set_title(title) series.plot(ax=axe, kind="line", logx=log_x) filename = "{}.jpg".format(title.replace(' ', '_')) fig.savefig(filename) __all__ = [name for name, x in locals().items() if not inspect.ismodule(x) and not inspect.isabstract(x)]
AnyEPG, AppProfile, AttributeCriterion, BaseContract, BGPSession, BridgeDomain, CollectionPolicy, CommonEPG, Context, Contract, ContractInterface, ContractSubject, Endpoint, EPG, EPGDomain, FexInterface, Filter, FilterEntry, IPEndpoint, InputTerminal, L2ExtDomain, L2Interface, L3ExtDomain, L3Interface, LogicalModel, MonitorPolicy, MonitorStats, MonitorTarget, NetworkPool, OSPFInterface, OSPFInterfacePolicy, OSPFRouter, OutputTerminal, OutsideEPG, OutsideL2, OutsideL2EPG, OutsideL3, OutsideNetwork, PhysDomain, PortChannel, Search, Subnet, Taboo, Tenant, TunnelInterface, VMM, VMMCredentials, VmmDomain, VMMvSwitchInfo, Tag, _interface_from_dn) from .acitoolkitlib import Credentials, AcitoolkitGraphBuilder # noqa from .acifakeapic import FakeSession # noqa # Dependent on acitoolkit from .aciConcreteLib import ( # noqa ConcreteAccCtrlRule, ConcreteArp, ConcreteBD, ConcreteContext, ConcreteEp, ConcreteFilter, ConcreteFilterEntry, ConcreteLoopback, ConcreteOverlay, ConcretePortChannel, ConcreteSVI, ConcreteVpc, ConcreteVpcIf, ConcreteTunnel, ConcreteCdp) # Dependent on aciconcretelib from .aciphysobject import ( # noqa Cluster, ExternalSwitch, Fabric, Fan, Fantray, Interface, Linecard, Link, Node, PhysicalModel, Pod, Powersupply, Process, Supervisorcard, Systemcontroller, WorkingData, ) import inspect as _inspect __all__ = _about_exports + sorted( name for name, obj in locals().items() if not (name.startswith('_') or _inspect.ismodule(obj)))
def test(): shell = novaclient.shell.OpenStackComputeShell() for name, module in shell._discover_via_entry_points(): self.assertEqual('foo', name) self.assertTrue(inspect.ismodule(module))
def include(self, other_workflow, namespace=None): """Include targets from another :class:`gwf.Workflow` into this workflow. This method can be given either an :class:`gwf.Workflow` instance, a module or a path to a workflow file. If a module or path the workflow object to include will be determined according to the following rules: 1. If a module object is given, the module must define an attribute named `gwf` containing a :class:`gwf.Workflow` object. 2. If a path is given it must point to a file defining a module with an attribute named `gwf` containing a :class:`gwf.Workflow` object. If you want to include a workflow with another name you can specify the attribute name with a colon, e.g.:: /some/path/workflow.py:myworkflow This will include all targets from the workflow `myworkflow` declared in the file `/some/path/workflow.py`. When a :class:`gwf.Workflow` instance has been obtained, all targets will be included directly into this workflow. To avoid name clashes the `namespace` argument must be provided. For example:: workflow1 = Workflow() workflow1.target('TestTarget') workflow2 = Workflow() workflow2.target('TestTarget') workflow1.include(workflow2, namespace='wf1') The workflow now contains two targets named `TestTarget` (defined in `workflow2`) and `wf1.TestTarget` (defined in `workflow1`). The `namespace` parameter can be left out if the workflow to be included has been named:: workflow1 = Workflow(name='wf1') workflow1.target('TestTarget') workflow2 = Workflow() workflow2.target('TestTarget') workflow1.include(workflow2) This yields the same result as before. The `namespace` argument can be used to override the specified name:: workflow1 = Workflow(name='wf1') workflow1.target('TestTarget') workflow2 = Workflow() workflow2.target('TestTarget') workflow1.include(workflow2, namespace='foo') The workflow will now contain targets named `TestTarget` and `foo.TestTarget`. """ if isinstance(other_workflow, Workflow): self.include_workflow(other_workflow, namespace=namespace) elif isinstance(other_workflow, str): self.include_path(other_workflow, namespace=namespace) elif inspect.ismodule(other_workflow): self.include_workflow(getattr(other_workflow, 'gwf'), namespace=namespace) else: raise TypeError( 'First argument must be either a string or a Workflow object.')
def testLayers(self): # pylint: disable=g-import-not-at-top import tensorflow as tf assert inspect.ismodule(tf.contrib.layers)
def delitem(self, key, value): if inspect.ismodule(value): del self.app.modules[key] else: del self.app.objects[key]
def check_rest(module, names, dots=True): """ Check reStructuredText formatting of docstrings Parameters ---------- module : ModuleType names : set Returns ------- result : list List of [(module_name, success_flag, output),...] """ try: skip_types = (dict, str, unicode, float, int) except NameError: # python 3 skip_types = (dict, str, float, int) results = [] if module.__name__[6:] not in OTHER_MODULE_DOCS: results += [(module.__name__, ) + validate_rst_syntax( inspect.getdoc(module), module.__name__, dots=dots)] for name in names: full_name = module.__name__ + '.' + name obj = getattr(module, name, None) if obj is None: results.append( (full_name, False, "%s has no docstring" % (full_name, ))) continue elif isinstance(obj, skip_types): continue if inspect.ismodule(obj): text = inspect.getdoc(obj) else: try: text = str(get_doc_object(obj)) except Exception: import traceback results.append( (full_name, False, "Error in docstring format!\n" + traceback.format_exc())) continue m = re.search("([\x00-\x09\x0b-\x1f])", text) if m: msg = ("Docstring contains a non-printable character %r! " "Maybe forgot r\"\"\"?" % (m.group(1), )) results.append((full_name, False, msg)) continue try: src_file = short_path(inspect.getsourcefile(obj)) except TypeError: src_file = None if src_file: file_full_name = src_file + ':' + full_name else: file_full_name = full_name results.append((full_name, ) + validate_rst_syntax(text, file_full_name, dots=dots)) return results
def _io_discrepancy(member): # _io module names itself `io`: http://bugs.python.org/issue18602 member_self = getattr(member, "__self__", None) return (member_self and inspect.ismodule(member_self) and member_self.__name__ == "_io" and member.__module__ == "io")