def test_abstractmethod_integration(self): if test_support.due_to_ironpython_bug("http://www.codeplex.com/IronPython/WorkItem/View.aspx?WorkItemId=21116"): return for abstractthing in [abc.abstractmethod, abc.abstractproperty]: class C: __metaclass__ = abc.ABCMeta @abstractthing def foo(self): pass # abstract def bar(self): pass # concrete self.assertEqual(C.__abstractmethods__, set(["foo"])) self.assertRaises(TypeError, C) # because foo is abstract self.assertTrue(isabstract(C)) class D(C): def bar(self): pass # concrete override of concrete self.assertEqual(D.__abstractmethods__, set(["foo"])) self.assertRaises(TypeError, D) # because foo is still abstract self.assertTrue(isabstract(D)) class E(D): def foo(self): pass self.assertEqual(E.__abstractmethods__, set()) E() # now foo is concrete, too self.assertFalse(isabstract(E)) class F(E): @abstractthing def bar(self): pass # abstract override of concrete self.assertEqual(F.__abstractmethods__, set(["bar"])) self.assertRaises(TypeError, F) # because bar is abstract now self.assertTrue(isabstract(F))
def delete_discussion(session, discussion_id): from assembl.models import Discussion, DiscussionBoundBase # First, delete the discussion. session.delete(Discussion.get(discussion_id)) session.flush() # See if anything is left... classes = DiscussionBoundBase._decl_class_registry.itervalues() classes_by_table = { cls.__dict__.get('__table__', None): cls for cls in classes} # Only direct subclass of abstract concrete_classes = set(filter(lambda cls: issubclass(cls, DiscussionBoundBase) and (not isabstract(cls)) and isabstract(cls.mro()[1]), classes_by_table.values())) tables = DiscussionBoundBase.metadata.sorted_tables tables.reverse() for table in tables: if table not in classes_by_table: continue cls = classes_by_table[table] if cls not in concrete_classes: continue print 'deleting', cls.__name__ query = session.query(cls.id) conds = cls.get_discussion_conditions(discussion_id) assert conds cond = and_(*conds) v = JoinColumnsVisitor(cls, query, classes_by_table) v.traverse(cond) query = v.final_query().filter(cond) if query.count(): print "*" * 20, "Not all deleted!" session.query(cls).filter( cls.id.in_(query.subquery())).delete(False)
def test_abstractmethod_integration(self): for abstractthing in [abc.abstractmethod, abc.abstractproperty]: class C(metaclass=abc.ABCMeta): @abstractthing def foo(self): pass # abstract def bar(self): pass # concrete self.assertEqual(C.__abstractmethods__, {"foo"}) self.assertRaises(TypeError, C) # because foo is abstract self.assertTrue(isabstract(C)) class D(C): def bar(self): pass # concrete override of concrete self.assertEqual(D.__abstractmethods__, {"foo"}) self.assertRaises(TypeError, D) # because foo is still abstract self.assertTrue(isabstract(D)) class E(D): def foo(self): pass self.assertEqual(E.__abstractmethods__, set()) E() # now foo is concrete, too self.assertFalse(isabstract(E)) class F(E): @abstractthing def bar(self): pass # abstract override of concrete self.assertEqual(F.__abstractmethods__, {"bar"}) self.assertRaises(TypeError, F) # because bar is abstract now self.assertTrue(isabstract(F))
def check_for_uno(value): """ """ try: inspect.isabstract(IDocumentGeneratorControlPanelSchema) except Exception: return True if 'python' not in value and os.system(value + ' -V') != 0: raise interfaces.InvalidPythonPath() if os.system(value + ' -c "import uno"') != 0: raise interfaces.InvalidUnoPath() return True
def __subclasshook__(cls, othercls): """ Checks if all the abstract methods are present on the subclass is only here for duck typing """ if not isabstract(cls) and not isabstract(othercls): return NotImplemented for C in cls.__mro__: for key in C.__dict__: item = C.__dict__[key] if hasattr(item, "__isabstractmethod__"): if not any(key in B.__dict__ for B in othercls.__mro__): return NotImplemented return True
def iscommandclass(obj): return obj is not Command \ and obj is not Lister \ and inspect.isclass(obj) \ and not inspect.isabstract(obj) \ and hasattr(obj, '__dict__') \ and 'take_action' in obj.__dict__
def _check_all_monkeypatched(): """Double-checks that instances sklearn estimators have acquired the proper "what" method. Raises an assertion error if it is not the case. """ # Make sure we have added what to sklearn stuff whatamize_sklearn(check=False) # Trick to force python to populate part of the BaseEstimator hierarchy from sklearn.ensemble.forest import RandomForestClassifier assert BaseEstimator.__subclasscheck__(RandomForestClassifier) from sklearn.cluster import KMeans assert BaseEstimator.__subclasscheck__(KMeans) from sklearn.feature_extraction import DictVectorizer assert BaseEstimator.__subclasscheck__(DictVectorizer) from sklearn.decomposition import KernelPCA assert BaseEstimator.__subclasscheck__(KernelPCA) with warnings.catch_warnings(): warnings.simplefilter('ignore') for cls in all_subclasses(BaseEstimator): if not inspect.isabstract(cls): try: obj = cls() assert hasattr(obj, 'what'), cls.__name__ assert isinstance(obj.what(), What), cls.__name__ except TypeError: pass return True
def testPSD(self): kernels = find_all_children(Kernel) for kernel, _ in kernels: if not inspect.isabstract(kernel): # ignore abstract classes for data in self.get_data(kernel): eigens, _ = la.eigh(kernel().gram(data)) self.assertTrue(np.all(eigens > -self.tol))
def available_handler_list(base_handler, filter_list=None): """ Returns a list of handlers which are sub-classes of `base_handler`. The list is then filtered to include only classes which are sub-classes of any of the classes in `filter_list. The thought is to use this something like >>> d_sinks = available_handler_list(DistributionSink) returns a list of all of the distribution sink handlers and >>> from base_file_handlers import FileHandler >>> d_file_sinks = available_handler_list(DistributionSink, [FileHandler,]) Parameters ---------- base_handler : type The base-class to find sub-classes of filter_list : list of type Only return handlers which are a subclass of any of the elements in filter_list (OR logic). """ # grab the sub-classes h_lst = [] # if base class is not abstract, keep it too if not inspect.isabstract(base_handler): h_lst.append(base_handler) # yay recursion _all_subclasses(base_handler, h_lst) # list comprehension logic return [h for h in h_lst if filter_list is None or any(issubclass(h, filt) for filt in filter_list)]
def test_abjad___init___01(class_): r'''All concrete classes initialize from empty input. ''' if inspect.isabstract(class_): return instance = class_() assert instance is not None
def binary_ops(): """Return a list of all classes used to construct arithmetic, like PLUS, DIVIDE, etc.""" # noqa allclasses = all_classes() opclasses = [opclass for opclass in allclasses if issubclass(opclass, BinaryOperator) and not inspect.isabstract(opclass)] return opclasses
def test_completeness(self): """ Tests that all rules are being tested. """ if not self.class_: return # This is the base class testing, it is always complete. classes_to_ignore = [TradingDayOfWeekRule, TradingDayOfMonthRule] dem = { k for k, v in iteritems(vars(zipline.utils.events)) if isinstance(v, type) and issubclass(v, self.class_) and v is not self.class_ and v not in classes_to_ignore and not isabstract(v) } ds = { k[5:] for k in dir(self) if k.startswith('test') and k[5:] in dem } self.assertTrue( dem <= ds, msg='This suite is missing tests for the following classes:\n' + '\n'.join(map(repr, dem - ds)), )
def __init__(cls, name, bases, nmspc): super(MetricRegistryMeta, cls).__init__(name, bases, nmspc) if not hasattr(cls, 'registry'): cls.registry = set() if not isabstract(cls): cls.registry.add(cls())
def test_java_params(self): import pyspark.ml.feature import pyspark.ml.classification import pyspark.ml.clustering import pyspark.ml.evaluation import pyspark.ml.pipeline import pyspark.ml.recommendation import pyspark.ml.regression modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering, pyspark.ml.evaluation, pyspark.ml.pipeline, pyspark.ml.recommendation, pyspark.ml.regression] for module in modules: for name, cls in inspect.getmembers(module, inspect.isclass): if not name.endswith('Model') and not name.endswith('Params') \ and issubclass(cls, JavaParams) and not inspect.isabstract(cls): # NOTE: disable check_params_exist until there is parity with Scala API check_params(self, cls(), check_params_exist=False) # Additional classes that need explicit construction from pyspark.ml.feature import CountVectorizerModel, StringIndexerModel check_params(self, CountVectorizerModel.from_vocabulary(['a'], 'input'), check_params_exist=False) check_params(self, StringIndexerModel.from_labels(['a', 'b'], 'input'), check_params_exist=False)
def list_classifiers(): from inspect import getmembers from inspect import isabstract from inspect import isclass cls = [] reg = [] clus = [] other = [] for modulename in _sklearn_estimator_modules: modulepath = '%s.%s' % (_sklearn_base, modulename) module = __import__(modulepath, fromlist=[modulename]) for name, obj in getmembers(module, lambda obj: isclass(obj) and not isabstract(obj)): obj = getattr(module, name) if obj is ClassifierMixin or obj is RegressorMixin: continue if issubclass(obj, ClassifierMixin): cls.append((modulepath, name)) elif issubclass(obj, RegressorMixin): reg.append((modulepath, name)) elif issubclass(obj, ClusterMixin): clus.append((modulepath, name)) else: other.append((modulepath, name)) return cls, reg, clus
def get_class(word, cls): if issubclass(cls, Exception): node = Node(word, "e") elif isabstract(cls): node = Node(word, "t") else: node = Node(word, "c") if hasattr(cls, "__init__"): v = cls.__init__ if ismethod(v) and hasattr(v, "im_class") and v.im_class is cls: n = get_func(word, v.im_func) node.info = n.info node.info = node.info + "\n" + getinfo(cls) for x in classify_class_attrs(cls): if x.name.startswith("_"): continue if x.defining_class != cls: continue if x.kind == "property": node.add(Node(x.name, "d", info=getinfo(x.object)), True) continue elif x.kind == "data": node.add(Node(x.name, "d"), True) continue elif x.kind == "class method" or x.kind == "static method": kind = "M" else: kind = "m" n = get_func(x.name, x.object) n.kind = kind node.add(n, True) return node
def generate_hierarchy_from_module(module): from .case import is_testcase_subclass if isinstance(module, str): module = importlib.import_module(module) logger.debug("reload %s", module) reload_module(module) children = [] for attr_name in dir(module): obj = getattr(module, attr_name) if is_testcase_subclass(obj) and not inspect.isabstract(obj): case_hierarchy = generate_hierarchy_from_testcase_class(obj) if case_hierarchy["children"]: children.append(case_hierarchy) imp_loader = pkgutil.get_loader(module) if imp_loader.is_package(module.__name__): for module_loader, sub_module_name, is_pkg in pkgutil.iter_modules(path=module.__path__): sub_suite_module = importlib.import_module(module.__name__ + "." + sub_module_name) sub_suite_hierarchy = generate_hierarchy_from_module(sub_suite_module) if sub_suite_hierarchy["children"]: children.append(sub_suite_hierarchy) return {"id": module.__name__, "name": module.__name__.rpartition(".")[2], "children": children}
def run(self, args): proc = self.proc arg = proc.cmd_argstr try: if not proc.curframe: # ?? Should we have set up a dummy globals # to have persistence? value = eval(arg, None, None) else: value = eval(arg, proc.curframe.f_globals, proc.curframe.f_locals) except: t, v = sys.exc_info()[:2] if type(t) == str: exc_type_name = t else: exc_type_name = t.__name__ if exc_type_name == 'NameError': self.errmsg("Name Error: %s" % arg) else: self.errmsg("%s: %s" % (exc_type_name, proc._saferepr(v))) return False self.section("What is for %s" % arg) get_doc = False if inspect.ismethod(value): get_doc = True self.msg('method %s%s' % (value.func_code.co_name, inspect.formatargspec(inspect.getargspec(value)))) elif inspect.isfunction(value): get_doc = True self.msg('function %s%s' % (value.func_code.co_name, inspect.formatargspec(inspect.getargspec(value)))) elif inspect.isabstract(value) or \ inspect.isbuiltin(value) or \ inspect.isclass(value) or \ inspect.isgeneratorfunction(value) or \ inspect.ismethoddescriptor(value): get_doc = True self.msg(type(value)) doc = inspect.getdoc(value) if get_doc and doc: self.msg(' doc:\n%s' % doc) comments = inspect.getcomments(value) if comments: self.msg(' comments:\n%s' % comments) try: m = inspect.getmodule(value) if m: self.msg(" module:\t%s" % m) except: try: f = inspect.getfile(value) self.msg(" file: %s" % f) except: pass pass return False
def get_available_inherited_classes(pkg, base_class): """Gets all inherited classes in modules for a given package This does not include subpackages. :type pkg: str :param pkg: a package name. :type base_class: object :param base_class: a base class. :rtype: list :returns: a list of inherited classes. """ available_classes = [] pkg_path = os.path.dirname(pkg.__file__) for _, mod_name, _ in pkgutil.iter_modules([pkg_path]): if not mod_name.startswith("_"): try: module = importlib.import_module("{0}.{1}".format(pkg.__name__, mod_name)) for clazz in inspect.getmembers(module, inspect.isclass): if clazz is not base_class: if issubclass(clazz[1], base_class) and\ not inspect.isabstract(clazz[1]) and\ clazz[1] != base_class: available_classes.append(clazz[1]) except Exception as e: logger.warn(e.__str__()) return set(available_classes)
def get_hierarchy_by_module(module: ModuleType, pattern: str = "test", reload: bool = False) -> OrderedDict: from ..case import is_testcase_subclass module = get_module_by_str_or_obj(module, reload) children = [] hierarchy = OrderedDict([ ("path", module.__name__), ("type", "module"), ("name", module.__name__.rpartition('.')[2]), ("children", children), ]) for attr_name in dir(module): obj = getattr(module, attr_name) if is_testcase_subclass(obj) and not inspect.isabstract(obj): case_hierarchy = get_hierarchy_by_testcase_class(obj) if case_hierarchy["children"]: children.append(case_hierarchy) imp_loader = pkgutil.get_loader(module) if imp_loader.is_package(module.__name__): hierarchy["type"] = "package" for module_loader, sub_module_name, is_pkg in pkgutil.iter_modules(path=module.__path__): if is_pkg or (not is_pkg and re.match(pattern, sub_module_name)): sub_suite_module = importlib.import_module(module.__name__ + "." + sub_module_name) sub_suite_hierarchy = get_hierarchy_by_module(sub_suite_module, pattern, reload) if sub_suite_hierarchy["children"]: children.append(sub_suite_hierarchy) return hierarchy
def __init__(cls, name, bases, nmspc): super(APluginRegister, cls).__init__(name, bases, nmspc) if not hasattr(cls, 'registry'): cls.registry = dict() if not inspect.isabstract(cls): cls.registry.setdefault(str(cls().name).lower(), {}).update({str(cls().version).lower():cls})
def __new__(cls, cls_name, bases, dct): '''Get all the abstract methods declared in our abstract base class, RPC-ify them, and bind them to the class we are creating.''' # we add EggsClient as one of our parent classes, so we can get all # the cool functions it provides for free. bases += (EggsClient,) # check that the first parent class is abstract. yes, at the moment # this is only reliable if your class only inherits from one abstract # class. Because of that I have the following todo: # TODO(shomik): make it so that the abstract class doesn't have to be # the first parent class, but rather it is found dynamically interface = bases[0] if not inspect.isabstract(interface): raise TypeError("Client class must inherit first from its abstract interface.") # for each abstract method, create an RPC-ified version of it and # add it to our class dict for method_name in interface.__abstractmethods__: dct[method_name] = EggsClient.remote_call(method_name) # define an __init__ method for our class that will take def init(self, server_endpoint="tcp://127.0.0.1:8085"): EggsClient.__init__(self, server_endpoint=server_endpoint) # rebind our wrapped __init__ to "__init__" dct["__init__"] = init # let our super class finish creating the class return super(EggsClientMeta, cls).__new__(cls, cls_name, bases, dct)
def __new__(mcls, name, bases, members): cls = super(_FitterMeta, mcls).__new__(mcls, name, bases, members) if not inspect.isabstract(cls) and not name.startswith("_"): mcls.registry.add(cls) return cls
def all_subclasses(in_c, sc_lst): t = in_c.__subclasses__() if len(t) > 0: sc_lst.extend(_ for _ in t if (not inspect.isabstract(_) and _.available())) for _sc in t: all_subclasses(_sc, sc_lst)
def is_descriptor_class(desc): r"""check calculatable descriptor class or not. Returns: bool """ return isinstance(desc, type) and issubclass(desc, Descriptor) and not inspect.isabstract(desc)
def test_abjad___rmul___01(class_): r'''All classes implementing __mul__ also implement __rmul__. ''' if inspect.isabstract(class_): return if hasattr(class_, '__mul__'): assert hasattr(class_, '__rmul__')
def default(self, obj): if isinstance(obj, datetime): return self.default(date_time_2_millis(obj)) elif isinstance(obj, Enum): return self.default(obj.name) elif isinstance(obj, Color): return self.default(obj.hex()) elif hasattr(obj, "__dict__"): d = dict( (key, value) for key, value in inspect.getmembers(obj) if value is not None and not key == "Position" and not key.startswith("__") and not inspect.isabstract(value) and not inspect.isbuiltin(value) and not inspect.isfunction(value) and not inspect.isgenerator(value) and not inspect.isgeneratorfunction(value) and not inspect.ismethod(value) and not inspect.ismethoddescriptor(value) and not inspect.isroutine(value) ) return self.default(d) return obj
def grab(exp_name): try: imported_module = import_module(f"{__name__}.{exp_name}") except ModuleNotFoundError: raise ImportError( "{} is not part of our experiment collection!".format(exp_name) ) for dir_name in dir(imported_module): attribute = getattr(imported_module, dir_name) if ( inspect.isclass(attribute) and issubclass(attribute, BaseProfile) and not inspect.isabstract(attribute) ): profile_class = attribute break else: raise ImportError( f"We currently don't have {exp_name}, but you are welcome to send in the " f"request for it!" ) return profile_class
def items_from_path(path): """ Looks for Item subclasses in the given path. An alternative method would involve metaclasses (as Django does it), but then it gets very hard to have two separate repos in the same process, because both of them would register config item classes globally. """ if not isdir(path): return for filename in listdir(path): filepath = join(path, filename) if not filename.endswith(".py") or \ not isfile(filepath) or \ filename.startswith("_"): continue for name, obj in \ utils.get_all_attrs_from_file(filepath).items(): if obj == items.Item or name.startswith("_"): continue try: if issubclass(obj, items.Item) and not isabstract(obj): yield obj except TypeError: pass
def _execute_generator_on_spec(self): """Renders a source file into its final form.""" api_no_aliases_cache = None for attr_key in dir(self.generator_module): attr_value = getattr(self.generator_module, attr_key) if (inspect.isclass(attr_value) and issubclass(attr_value, Generator) and not inspect.isabstract(attr_value)): self._logger.info('Running generator: %s', attr_value.__name__) generator = attr_value(self.build_path, self.generator_args) if generator.preserve_aliases: api = self.api else: if not api_no_aliases_cache: api_no_aliases_cache = remove_aliases_from_api(self.api) api = api_no_aliases_cache try: generator.generate(api) except: # Wrap this exception so that it isn't thought of as a bug # in the stone parser, but rather a bug in the generator. # Remove the last char of the traceback b/c it's a newline. raise GeneratorException(attr_value.__name__, traceback.format_exc()[:-1])
def dash_R(ns, the_module, test_name, test_func): """Run a test multiple times, looking for reference leaks. Returns: False if the test didn't leak references; True if we detected refleaks. """ # This code is hackish and inelegant, but it seems to do the job. import copyreg import collections.abc if not hasattr(sys, 'gettotalrefcount'): raise Exception("Tracking reference leaks requires a debug build " "of Python") # Avoid false positives due to various caches # filling slowly with random data: warm_caches() # Save current values for dash_R_cleanup() to restore. fs = warnings.filters[:] ps = copyreg.dispatch_table.copy() pic = sys.path_importer_cache.copy() try: import zipimport except ImportError: zdc = None # Run unmodified on platforms without zipimport support else: zdc = zipimport._zip_directory_cache.copy() abcs = {} for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]: if not isabstract(abc): continue for obj in abc.__subclasses__() + [abc]: abcs[obj] = _get_dump(obj)[0] # bpo-31217: Integer pool to get a single integer object for the same # value. The pool is used to prevent false alarm when checking for memory # block leaks. Fill the pool with values in -1000..1000 which are the most # common (reference, memory block, file descriptor) differences. int_pool = {value: value for value in range(-1000, 1000)} def get_pooled_int(value): return int_pool.setdefault(value, value) nwarmup, ntracked, fname = ns.huntrleaks fname = os.path.join(support.SAVEDCWD, fname) repcount = nwarmup + ntracked # Pre-allocate to ensure that the loop doesn't allocate anything new rep_range = list(range(repcount)) rc_deltas = [0] * repcount alloc_deltas = [0] * repcount fd_deltas = [0] * repcount getallocatedblocks = sys.getallocatedblocks gettotalrefcount = sys.gettotalrefcount fd_count = support.fd_count # initialize variables to make pyflakes quiet rc_before = alloc_before = fd_before = 0 if not ns.quiet: print("beginning", repcount, "repetitions", file=sys.stderr) print(("1234567890" * (repcount // 10 + 1))[:repcount], file=sys.stderr, flush=True) for i in rep_range: test_func() dash_R_cleanup(fs, ps, pic, zdc, abcs) # Collect cyclic trash and read memory statistics immediately after. support.gc_collect() alloc_after = getallocatedblocks() rc_after = gettotalrefcount() fd_after = fd_count() if not ns.quiet: print('.', end='', file=sys.stderr, flush=True) rc_deltas[i] = get_pooled_int(rc_after - rc_before) alloc_deltas[i] = get_pooled_int(alloc_after - alloc_before) fd_deltas[i] = get_pooled_int(fd_after - fd_before) alloc_before = alloc_after rc_before = rc_after fd_before = fd_after if not ns.quiet: print(file=sys.stderr) # These checkers return False on success, True on failure def check_rc_deltas(deltas): # Checker for reference counters and memomry blocks. # # bpo-30776: Try to ignore false positives: # # [3, 0, 0] # [0, 1, 0] # [8, -8, 1] # # Expected leaks: # # [5, 5, 6] # [10, 1, 1] return all(delta >= 1 for delta in deltas) def check_fd_deltas(deltas): return any(deltas) failed = False for deltas, item_name, checker in [ (rc_deltas, 'references', check_rc_deltas), (alloc_deltas, 'memory blocks', check_rc_deltas), (fd_deltas, 'file descriptors', check_fd_deltas) ]: # ignore warmup runs deltas = deltas[nwarmup:] if checker(deltas): msg = '%s leaked %s %s, sum=%s' % (test_name, deltas, item_name, sum(deltas)) print(msg, file=sys.stderr, flush=True) with open(fname, "a") as refrep: print(msg, file=refrep) refrep.flush() failed = True return failed
try: return self.iter.next() except StopIteration: self.iter = self.loader.__iter__() A.inc_epoch() return self.iter.next() def _forward_train(self): ret = [ th.autograd.Variable(t.cuda() if A.use_cuda() else t) for t in self.next() ] A.cache_tensor_auto_scope(ret[0], "data") A.cache_tensor_auto_scope(ret[1], "labels") return ret def _forward_val(self): ret = [ th.autograd.Variable(t.cuda() if A.use_cuda() else t) for t in self.next() ] A.cache_tensor_auto_scope(ret[0], "val_data") A.cache_tensor_auto_scope(ret[1], "val_labels") return ret __all__ = [ name for name, x in locals().items() if not inspect.ismodule(x) and not inspect.isabstract(x) ]
def _valid_callback(self, callback): if hasattr(self, callback) and not isabstract(callback) and not hasattr(getattr(self, callback), "_original"): return getattr(self, callback) return None
def test_docstring_parameters(): # Test module docstring formatting # Skip test if numpydoc is not found pytest.importorskip("numpydoc", reason="numpydoc is required to test the docstrings") # XXX unreached code as of v0.22 from numpydoc import docscrape incorrect = [] for name in PUBLIC_MODULES: if name.endswith(".conftest"): # pytest tooling, not part of the scikit-learn API continue if name == "sklearn.utils.fixes": # We cannot always control these docstrings continue with warnings.catch_warnings(record=True): module = importlib.import_module(name) classes = inspect.getmembers(module, inspect.isclass) # Exclude non-scikit-learn classes classes = [ cls for cls in classes if cls[1].__module__.startswith("sklearn") ] for cname, cls in classes: this_incorrect = [] if cname in _DOCSTRING_IGNORES or cname.startswith("_"): continue if inspect.isabstract(cls): continue with warnings.catch_warnings(record=True) as w: cdoc = docscrape.ClassDoc(cls) if len(w): raise RuntimeError("Error for __init__ of %s in %s:\n%s" % (cls, name, w[0])) cls_init = getattr(cls, "__init__", None) if _is_deprecated(cls_init): continue elif cls_init is not None: this_incorrect += check_docstring_parameters( cls.__init__, cdoc) for method_name in cdoc.methods: method = getattr(cls, method_name) if _is_deprecated(method): continue param_ignore = None # Now skip docstring test for y when y is None # by default for API reason if method_name in _METHODS_IGNORE_NONE_Y: sig = signature(method) if "y" in sig.parameters and sig.parameters[ "y"].default is None: param_ignore = ["y"] # ignore y for fit and score result = check_docstring_parameters(method, ignore=param_ignore) this_incorrect += result incorrect += this_incorrect functions = inspect.getmembers(module, inspect.isfunction) # Exclude imported functions functions = [fn for fn in functions if fn[1].__module__ == name] for fname, func in functions: # Don't test private methods / functions if fname.startswith("_"): continue if fname == "configuration" and name.endswith("setup"): continue name_ = _get_func_name(func) if not any( d in name_ for d in _DOCSTRING_IGNORES) and not _is_deprecated(func): incorrect += check_docstring_parameters(func) msg = "\n".join(incorrect) if len(incorrect) > 0: raise AssertionError("Docstring Error:\n" + msg)
def __init__(cls, name, bases, namespace): super().__init__(name, bases, namespace) if not inspect.isabstract(cls): cls.run = AlgorithmBase.run_with_logging(cls.run)
def list_model_types(): models = [] for _name, obj in getmembers(ml_models): if isclass(obj) and not isabstract(obj): models.append(obj) return models
def _get_tools_package_rst(self, tools_package): from abjad.tools import documentationtools classes, functions = self._get_tools_package_contents( tools_package, ) document = documentationtools.ReSTDocument() if self.__class__.__name__.startswith('ScoreLibrary'): heading = documentationtools.ReSTHeading( level=2, text=tools_package.__name__, ) else: heading = documentationtools.ReSTHeading( level=2, text=tools_package.__name__.split('.')[-1], ) document.append(heading) automodule_directive = documentationtools.ReSTAutodocDirective( argument=tools_package.__name__, directive='automodule', ) document.append(automodule_directive) ignored_classes = self._get_ignored_classes() classes = [_ for _ in classes if _ not in ignored_classes] if not self.__class__.__name__.startswith('ScoreLibrary'): if classes: rule = documentationtools.ReSTHorizontalRule() document.append(rule) lineage_heading = documentationtools.ReSTHeading( level=3, text='Lineage', ) document.append(lineage_heading) lineage_graph = self._get_tools_package_graph(tools_package) graphviz_directive = documentationtools.ReSTGraphvizDirective( graph=lineage_graph, ) graphviz_container = documentationtools.ReSTDirective( directive='container', argument='graphviz', ) graphviz_container.append(graphviz_directive) document.append(graphviz_container) if classes: sections = {} for cls in classes: documentation_section = getattr( cls, '__documentation_section__', None, ) if documentation_section is None: #if issubclass(cls, enum.Enum): # documentation_section = 'Enumerations' #elif issubclass(cls, Exception): if issubclass(cls, Exception): documentation_section = 'Errors' else: documentation_section = 'Classes' if inspect.isabstract(cls): documentation_section = 'Abstract Classes' if documentation_section not in sections: sections[documentation_section] = [] sections[documentation_section].append(cls) section_names = sorted(sections) if 'Main Classes' in sections: section_names.remove('Main Classes') section_names.insert(0, 'Main Classes') if 'Errors' in sections: section_names.remove('Errors') section_names.append('Errors') for section_name in section_names: rule = documentationtools.ReSTHorizontalRule() document.append(rule) heading = documentationtools.ReSTHeading( level=3, text=section_name, ) document.append(heading) toc = documentationtools.ReSTTOCDirective( options={ 'hidden': True, }, ) for cls in sections[section_name]: class_name = cls.__name__ if class_name == 'Index': class_name = '_Index' toc_item = documentationtools.ReSTTOCItem( text=class_name, ) toc.append(toc_item) document.append(toc) autosummary = documentationtools.ReSTAutosummaryDirective( options={ 'nosignatures': True, }, ) for cls in sections[section_name]: item = documentationtools.ReSTAutosummaryItem( text=cls.__name__, ) autosummary.append(item) document.append(autosummary) if functions: if classes: rule = documentationtools.ReSTHorizontalRule() document.append(rule) section_name = 'Functions' heading = documentationtools.ReSTHeading( level=3, text=section_name, ) document.append(heading) toc = documentationtools.ReSTTOCDirective( options={ 'hidden': True, }, ) for function in functions: toc_item = documentationtools.ReSTTOCItem( text=function.__name__, ) toc.append(toc_item) document.append(toc) autosummary = documentationtools.ReSTAutosummaryDirective( options={ 'nosignatures': True, }, ) for function in functions: item = documentationtools.ReSTAutosummaryItem( text=function.__name__, ) autosummary.append(item) document.append(autosummary) return document
def __init_subclass__(cls): if not inspect.isabstract(cls): try: PLUGINS[cls.name.lower()] = cls except AttributeError: FAILED_PLUGINS.add('.'.join([cls.__module__, cls.__name__]))
def _filter_concrete(classes): return list(filter(lambda c: not inspect.isabstract(c), classes))
from beagle.common import logger from beagle.config import Config from beagle.datasources.base_datasource import ExternalDataSource from beagle.datasources.json_data import JSONData from beagle.web.api.models import Graph from beagle.web.server import db api = Blueprint("api", __name__, url_prefix="/api") # Define a mapping between datasource classes to strings DATASOURCES = { # Class name is used here. cls[1].__name__: cls[1] for cls in inspect.getmembers( sys.modules["beagle.datasources"], lambda cls: inspect.isclass(cls) and not inspect.isabstract(cls), ) } # Define a mapping between transformer class *names* to class objects TRANSFORMERS = { # Human-readable name used here. cls[1].__name__: cls[1] for cls in inspect.getmembers( sys.modules["beagle.transformers"], lambda cls: inspect.isclass(cls) and not inspect.isabstract(cls), ) } BACKENDS = { cls[1].__name__: cls[1]
def resolve(self, kernel, ref): # First we need to check our reference map to see if we have any instance that # already matches this reference. try: # TODO: Handle discovery of possible new interfaces on the ObjRef return self._refs[ref.ref] except KeyError: pass # If we got to this point, then we didn't have a referene for this, in that case # we want to create a new instance, but we need to create it in such a way that # we don't try to recreate the type inside of the JSII interface. class_fqn = ref.ref.rsplit("@", 1)[0] if class_fqn in _types: klass = _types[class_fqn] # If this class is an abstract class, then we'll use the generated proxy # class instead of the abstract class to handle return values for this type. if inspect.isabstract(klass): klass = klass.__jsii_proxy_class__() # Create our instance, bypassing __init__ by directly calling __new__, and # then assign our reference to __jsii_ref__ inst = klass.__new__(klass) inst.__jsii_ref__ = ref if ref.interfaces is not None: return InterfaceDynamicProxy( [inst] + self.build_interface_proxies_for_ref(ref)) else: return inst # Legacy code path - Kernel invariant ought to guarantee that class_fqn can't be Struct (they're interfaces) elif class_fqn in _data_types: # Data types have been serialized by-reference (see aws/jsii#400). # We retrieve all of its properties right now and then construct a value # object from it. This will be slow :(. # Ugly delayed import here because I can't solve the cyclic # package dependency right now :(. from ._runtime import python_jsii_mapping data_type = _data_types[class_fqn] remote_struct = _FakeReference(ref) python_props = { python_name: kernel.get(remote_struct, jsii_name) for python_name, jsii_name in python_jsii_mapping( data_type).items() } return data_type(**python_props) elif class_fqn in _enums: return _enums[class_fqn] elif class_fqn == "Object": # If any one interface is a struct, all of them are guaranteed to be (Kernel invariant) if ref.interfaces is not None and any(fqn in _data_types for fqn in ref.interfaces): # Ugly delayed import here because I can't solve the cyclic # package dependency right now :(. from ._runtime import python_jsii_mapping structs = [_data_types[fqn] for fqn in ref.interfaces] remote_struct = _FakeReference(ref) insts = [ struct( **{ python_name: kernel.get(remote_struct, jsii_name) for python_name, jsii_name in python_jsii_mapping( struct).items() }) for struct in structs ] return StructDynamicProxy(insts) else: return InterfaceDynamicProxy( self.build_interface_proxies_for_ref(ref)) else: raise ValueError(f"Unknown type: {class_fqn}")
def get_features(namespace): for k, v in namespace.items(): if inspect.isclass(v) and issubclass( v, Feature) and not inspect.isabstract(v): yield v()
def is_verifier(cls): """Determine if a class is a Verifier that can be instantiated""" return inspect.isclass(cls) and issubclass(cls, Verifier) and \ not inspect.isabstract(cls)
def load_modules(self): """ Load modules Modules are workers and (as a subset of workers) postprocessors. These are found by importing any python files found in the given locations, and looking for relevant classes within those python files, that extend `BasicProcessor` or `BasicWorker` and are not abstract. """ # look for workers and processors in pre-defined folders and datasources paths = [Path(config.PATH_ROOT, "processors"), Path(config.PATH_ROOT, "backend", "workers"), *[self.datasources[datasource]["path"] for datasource in self.datasources]] root_match = re.compile(r"^%s" % re.escape(config.PATH_ROOT)) for folder in paths: # loop through folders, and files in those folders, recursively for file in folder.rglob("*.py"): # determine module name for file # reduce path to be relative to 4CAT root module_name = file.parts[:-1] for part in Path(config.PATH_ROOT).parts: module_name = module_name[1:] module_name = ".".join(list(module_name) + [file.stem]) # check if we've already loaded this module if module_name in sys.modules or module_name in self.ignore: continue # try importing try: module = importlib.import_module(module_name) except ImportError as e: # this is fine, just ignore this data source and give a heads up module_name_short = module_name.split(".")[-1] self.ignore.append(module_name_short) if e.name not in self.missing_modules: self.missing_modules[e.name] = [module_name_short] else: self.missing_modules[e.name].append(module_name_short) continue # see if module contains the right type of content by looping # through all of its members components = inspect.getmembers(module) for component in components: # check if found object qualifies as a worker class is_4cat_module = False if component[0][0:2] != "__" \ and inspect.isclass(component[1]) \ and (issubclass(component[1], BasicWorker) or issubclass(component[1], BasicProcessor)) \ and not inspect.isabstract(component[1]): is_4cat_module = True # nope? ignore it in the future if not is_4cat_module: continue # extract data that is useful for the scheduler and other # parts of 4CAT relative_path = root_match.sub("", str(file)) metadata = { "file": file.name, "path": relative_path, "module": relative_path[1:-3].replace(os.sep, "."), "id": component[1].type, "name": component[0], "class_name": component[0], "max": component[1].max_workers } # processors have some extra metadata that is useful to store if issubclass(component[1], BasicProcessor): metadata = {**metadata, **{ "description": component[1].description, "name": component[1].title if hasattr(component[1], "title") else component[0], "extension": component[1].extension, "category": component[1].category if hasattr(component[1], "category") else "other", "accepts": component[1].accepts if hasattr(component[1], "accepts") else [], "options": component[1].options if hasattr(component[1], "options") else {}, "datasources": component[1].datasources if hasattr(component[1], "datasources") else [], "references": component[1].references if hasattr(component[1], "references") else [], "is_filter": hasattr(component[1], "category") and "filter" in component[1].category.lower(), "further": [], "further_flat": set() }} # maintain a separate cache of processors self.processors[metadata["id"]] = metadata self.workers[metadata["id"]] = metadata sorted_processors = {id: self.processors[id] for id in sorted(self.processors, key=lambda item: self.processors[item]["name"])} categorised_processors = {id: sorted_processors[id] for id in sorted(sorted_processors, key=lambda item: "0" if sorted_processors[item]["category"] == "Presets" else sorted_processors[item]["category"])} # determine what processors are available as a follow-up for each # processor. This can only be done here because we need to know all # possible processors before we can inspect mutual compatibilities backup = categorised_processors.copy() for type in categorised_processors: categorised_processors[type]["further"] = [] for possible_child in backup: if type in backup[possible_child]["accepts"]: categorised_processors[type]["further"].append(possible_child) self.processors = categorised_processors flat_further = set() def collapse_flat_list(processor): for further_processor in processor["further"]: if further_processor not in flat_further: collapse_flat_list(self.processors[further_processor]) flat_further.add(further_processor) for processor in self.processors: flat_further = set() collapse_flat_list(self.processors[processor]) self.processors[processor]["further_flat"] = flat_further # Give a heads-up if not all modules were installed properly. if self.missing_modules: print_msg = "Warning: Not all modules could be found, which might cause data sources and modules to not function.\nMissing modules:\n" for missing_module, processor_list in self.missing_modules.items(): print_msg += "\t%s (for processors %s)\n" % (missing_module, ", ".join(processor_list)) print(print_msg, file=sys.stderr) # Cache data self.cache()
def isabstract(o) -> TypeGuard[abc.ABC]: return inspect.isabstract(o) or o in _ABCS
def subclasses(cls, clazz, concrete: bool = False): for subclass in clazz.__subclasses__(): yield from cls.subclasses(subclass, concrete=concrete) if (not concrete or not inspect.isabstract(subclass) and not subclass.__name__.startswith("_")): yield subclass
def test_module_attribute() -> None: """Ensure module attributes' signatures.""" assert hasattr(lmp.dset._ch_poem, 'ChPoemDset') assert inspect.isclass(lmp.dset._ch_poem.ChPoemDset) assert not inspect.isabstract(lmp.dset._ch_poem.ChPoemDset) assert issubclass(lmp.dset._ch_poem.ChPoemDset, BaseDset)
def test_docstring_parameters(): # Test module docstring formatting # Skip test if numpydoc is not found try: import numpydoc # noqa except ImportError: raise SkipTest("numpydoc is required to test the docstrings") from numpydoc import docscrape incorrect = [] for name in PUBLIC_MODULES: if name == 'mrex.utils.fixes': # We cannot always control these docstrings continue with warnings.catch_warnings(record=True): module = importlib.import_module(name) classes = inspect.getmembers(module, inspect.isclass) # Exclude imported classes classes = [cls for cls in classes if cls[1].__module__ == name] for cname, cls in classes: this_incorrect = [] if cname in _DOCSTRING_IGNORES or cname.startswith('_'): continue if inspect.isabstract(cls): continue with warnings.catch_warnings(record=True) as w: cdoc = docscrape.ClassDoc(cls) if len(w): raise RuntimeError('Error for __init__ of %s in %s:\n%s' % (cls, name, w[0])) cls_init = getattr(cls, '__init__', None) if _is_deprecated(cls_init): continue elif cls_init is not None: this_incorrect += check_docstring_parameters( cls.__init__, cdoc) for method_name in cdoc.methods: method = getattr(cls, method_name) if _is_deprecated(method): continue param_ignore = None # Now skip docstring test for y when y is None # by default for API reason if method_name in _METHODS_IGNORE_NONE_Y: sig = signature(method) if ('y' in sig.parameters and sig.parameters['y'].default is None): param_ignore = ['y'] # ignore y for fit and score result = check_docstring_parameters(method, ignore=param_ignore) this_incorrect += result incorrect += this_incorrect functions = inspect.getmembers(module, inspect.isfunction) # Exclude imported functions functions = [fn for fn in functions if fn[1].__module__ == name] for fname, func in functions: # Don't test private methods / functions if fname.startswith('_'): continue if fname == "configuration" and name.endswith("setup"): continue name_ = _get_func_name(func) if (not any(d in name_ for d in _DOCSTRING_IGNORES) and not _is_deprecated(func)): incorrect += check_docstring_parameters(func) msg = '\n'.join(incorrect) if len(incorrect) > 0: raise AssertionError("Docstring Error:\n" + msg)
def _default_extensions() -> Sequence[Type[Ctap2Extension]]: return [ cls for cls in Ctap2Extension.__subclasses__() if not inspect.isabstract(cls) ]
def component(cls: Type): """A decorator which turns a class into a Zookeeper component.""" if not inspect.isclass(cls): raise TypeError("Only classes can be decorated with @component.") if inspect.isabstract(cls): raise TypeError("Abstract classes cannot be decorated with @component.") if utils.is_component_class(cls): raise TypeError( f"The class {cls.__name__} is already a component; the @component decorator " "cannot be applied again." ) if cls.__init__ not in (object.__init__, __component_init__): # A component class could have `__component_init__` as its init method # if it inherits from a component. raise TypeError("Component classes must not define a custom `__init__` method.") cls.__init__ = __component_init__ if hasattr(cls, "__pre_configure__"): if not callable(cls.__pre_configure__): raise TypeError( "The `__pre_configure__` attribute of a @component class must be a " "method." ) call_args = inspect.signature(cls.__pre_configure__).parameters if len(call_args) > 2 or len(call_args) > 1 and "self" not in call_args: raise TypeError( "The `__pre_configure__` method of a @component class must take no " f"arguments except `self` and `conf`, but " f"`{cls.__name__}.__pre_configure__` accepts arguments " f"{tuple(name for name in call_args)}." ) if hasattr(cls, "__post_configure__"): if not callable(cls.__post_configure__): raise TypeError( "The `__post_configure__` attribute of a @component class must be a " "method." ) call_args = inspect.signature(cls.__post_configure__).parameters if len(call_args) > 1 or len(call_args) == 1 and "self" not in call_args: raise TypeError( "The `__post_configure__` method of a @component class must take no " f"arguments except `self`, but `{cls.__name__}.__post_configure__` " f"accepts arguments {tuple(name for name in call_args)}." ) # Populate `__component_fields__` with all fields defined on this class and # all superclasses. We have to go through the MRO chain and collect them in # reverse order so that they are correctly overriden. fields = {} for base_class in reversed(inspect.getmro(cls)): for name, value in base_class.__dict__.items(): if isinstance(value, Field): fields[name] = value if len(fields) == 0: utils.warn(f"Component {cls.__name__} has no defined fields.") # Throw an error if there is a field defined on a superclass that has been # overriden with a non-Field value. for name in dir(cls): if name in fields and not isinstance(getattr(cls, name), Field): super_class = fields[name].host_component_class raise ValueError( f"Field '{name}' is defined on super-class {super_class.__name__}. " f"In subclass {cls.__name__}, '{name}' has been overriden with value: " f"{getattr(cls, name)}.\n\n" f"If you wish to change the default value of field '{name}' in a " f"subclass of {super_class.__name__}, please wrap the new default " "value in a new `Field` instance." ) cls.__component_fields__ = fields # Override class methods to correctly interact with component fields. _wrap_getattribute(cls) _wrap_setattr(cls) _wrap_delattr(cls) _wrap_dir(cls) # Implement the `ItemsView` protocol if hasattr(cls, "__len__") and cls.__len__ != __component_len__: raise TypeError("Component classes must not define a custom `__len__` method.") cls.__len__ = __component_len__ if hasattr(cls, "__contains__") and cls.__contains__ != __component_contains__: raise TypeError( "Component classes must not define a custom `__contains__` method." ) cls.__contains__ = __component_contains__ if hasattr(cls, "__iter__") and cls.__iter__ != __component_iter__: raise TypeError("Component classes must not define a custom `__iter__` method.") cls.__iter__ = __component_iter__ # Components should have nice `__str__` and `__repr__` methods. cls.__str__ = __component_str__ cls.__repr__ = __component_repr__ # These will be overriden during configuration. cls.__component_name__ = cls.__name__ cls.__component_parent__ = None cls.__component_configured__ = False return cls
def _is_frameworkclass(o): """Filter concrete (non-abstract) subclasses of BaseFramework.""" return inspect.isclass(o) and issubclass( o, BaseFramework) and not inspect.isabstract(o)
def dash_R(the_module, test, indirect_test, huntrleaks): """Run a test multiple times, looking for reference leaks. Returns: False if the test didn't leak references; True if we detected refleaks. """ # This code is hackish and inelegant, but it seems to do the job. import copyreg import collections.abc if not hasattr(sys, 'gettotalrefcount'): raise Exception("Tracking reference leaks requires a debug build " "of Python") # Save current values for dash_R_cleanup() to restore. fs = warnings.filters[:] ps = copyreg.dispatch_table.copy() pic = sys.path_importer_cache.copy() try: import zipimport except ImportError: zdc = None # Run unmodified on platforms without zipimport support else: zdc = zipimport._zip_directory_cache.copy() abcs = {} for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]: if not isabstract(abc): continue for obj in abc.__subclasses__() + [abc]: abcs[obj] = obj._abc_registry.copy() nwarmup, ntracked, fname = huntrleaks fname = os.path.join(support.SAVEDCWD, fname) repcount = nwarmup + ntracked rc_deltas = [0] * repcount alloc_deltas = [0] * repcount fd_deltas = [0] * repcount print("beginning", repcount, "repetitions", file=sys.stderr) print(("1234567890" * (repcount // 10 + 1))[:repcount], file=sys.stderr, flush=True) # initialize variables to make pyflakes quiet rc_before = alloc_before = fd_before = 0 for i in range(repcount): indirect_test() alloc_after, rc_after, fd_after = dash_R_cleanup( fs, ps, pic, zdc, abcs) print('.', end='', file=sys.stderr, flush=True) if i >= nwarmup: rc_deltas[i] = rc_after - rc_before alloc_deltas[i] = alloc_after - alloc_before fd_deltas[i] = fd_after - fd_before alloc_before = alloc_after rc_before = rc_after fd_before = fd_after print(file=sys.stderr) # These checkers return False on success, True on failure def check_rc_deltas(deltas): # Checker for reference counters and memomry blocks. # # bpo-30776: Try to ignore false positives: # # [3, 0, 0] # [0, 1, 0] # [8, -8, 1] # # Expected leaks: # # [5, 5, 6] # [10, 1, 1] return all(delta >= 1 for delta in deltas) def check_fd_deltas(deltas): return any(deltas) failed = False for deltas, item_name, checker in [ (rc_deltas, 'references', check_rc_deltas), (alloc_deltas, 'memory blocks', check_rc_deltas), (fd_deltas, 'file descriptors', check_fd_deltas) ]: # ignore warmup runs deltas = deltas[nwarmup:] if checker(deltas): msg = '%s leaked %s %s, sum=%s' % (test, deltas, item_name, sum(deltas)) print(msg, file=sys.stderr, flush=True) with open(fname, "a") as refrep: print(msg, file=refrep) refrep.flush() failed = True return failed
def test_item_types(): for key, value in item_types.items(): assert isinstance(key, str) assert not inspect.isabstract(value)
def istopology(x): """Helper predicate to check if it's a subclass""" return inspect.isclass(x) and not inspect.isabstract(x)
def __init__(cls, name, bases=(), dct=None): # pylint:disable=redefined-builtin super().__init__(name, bases, dct) if not isabstract(cls): # skip configuration for abstracts assert hasattr(cls, 'CONFIG_ID') cls.config = SOURCES_CFG[cls.CONFIG_ID] cls.TOPICS = Topics.with_prefix(cls.config.topic_prefix)
def html_tag_map(): HTML_TAG_MAP = CaseInsensitiveDict() for sub in all_subclasses(HTMLObject): if not inspect.isabstract(sub): HTML_TAG_MAP[sub.tag] = sub return HTML_TAG_MAP
def __init_subclass__(cls, *args, **kwargs): """For avoiding a decorator for each subclass""" super().__init_subclass__(*args, **kwargs) if cls.__name__ not in EnvRegistry and not inspect.isabstract(cls): EnvRegistry[cls.__name__] = cls
def is_abstract(cls: Type) -> bool: if not abstract_classes: return inspect.isabstract(cls) return (cls in abstract_classes or any(c.__name__ == cls.__name__ for c in abstract_classes))
def test_class(): r"""Ensure class signature.""" assert inspect.isclass(TopPInfer) assert not inspect.isabstract(TopPInfer) assert issubclass(TopPInfer, BaseInfer)
def test_design() -> None: assert inspect.isabstract(HistogramMatching) is False assert len(HistogramMatching.__mro__) == 4