def parsePlugins(packageName):
    # Searching generators and views
    print 'searching for generators and views...'
    packageName = 'Plugins'
    package = __import__(packageName)
    print dir(package)
    pluginsDict = {'generators':{},'views':{}}
    for importer, modname, ispkg in pkgutil.walk_packages(package.__path__):
        if modname[0] <> '_':
            if ispkg == True and modname in pluginsToLoad:
                print "Found submodule %s (is a package: %s)" % (modname, ispkg)
                package2 = eval('package.'+modname)
                print 'package content: {0}'.format(dir(package2))
                for importer2, modname2, ispkg2 in pkgutil.walk_packages(package2.__path__):
                    # Searching for generators
                    if ispkg2 == False and string.find(modname2, 'Generators') >= 0:
                        genModule = eval('package2.'+modname2)
                        print "{0} contains generators".format(genModule.__name__)
                        genList = [o for o in getmembers(genModule) if isfunction(o[1])]
                        pluginsDict['generators'].update(genList)
                    # Searching for for view
                    if ispkg2 == False and string.find(modname2, 'View') >= 0:
                        viewModule = eval('package2.'+modname2)
                        print "{0} contains view".format(viewModule.__name__)
                        viewList = [o for o in getmembers(viewModule) if isclass(o[1]) and o[0] == modname+'Tab']
                        pluginsDict['views'].update(viewList)
    print 'search finished'
    print 'found generators: {0}'.format(pluginsDict['generators'])
    print 'found views: {0}'.format(pluginsDict['views'])
    return pluginsDict
Esempio n. 2
0
    def test_walk_packages_raises_on_string_or_bytes_input(self):

        str_input = 'test_dir'
        with self.assertRaises((TypeError, ValueError)):
            list(pkgutil.walk_packages(str_input))

        bytes_input = b'test_dir'
        with self.assertRaises((TypeError, ValueError)):
            list(pkgutil.walk_packages(bytes_input))
Esempio n. 3
0
File: imp.py Progetto: Shirk/SynFU
 def run(self):
     if self._show_help:
         # extra printout, --help-plugins disables _log()
         print('Installed jobs:')
     
     self._log('--- begin')
     self._log('--- loading plugins:')
     plugin_path = [os.path.join(os.path.dirname(os.path.abspath(__file__)),
                               'plugins'),
                    self._conf.plugin_dir]
     
     for p in plugin_path:
         if not os.path.exists(p):
             self._log('!!! skipping  "{0}" - no such file or directory'.format(p))
             continue
         
         sys.path.append(p)
         
         for (importer, name, ispkg) in pkgutil.walk_packages([p]):
             if name.startswith('ImpJob'):
                 try:
                     __import__(name)
                 except Exception, e:
                     self._log('!!! unable to import "{0}": {1}: {2}'.format(
                               name, e.__class__.__name__, e))
Esempio n. 4
0
 def loadPlugins(self, folder, namespace):
     for importer, modname, ispkg in pkgutil.walk_packages(path=[folder], prefix=namespace):
         module = importlib.import_module(modname)
         for name, t in module.__dict__.items():
             if type(t) is type and issubclass(t, pdmt.api.NodeType):
                 self.nodetypes.append(t)
         self.plugins.append(module)
def run_tests(path=PLUGINS_PATH, test_module_name_prefix="test_",
              ignored_modules=None, output_stream=sys.stderr):
  """
  Execute all modules containing unit tests located in the `path` directory. The
  names of the unit test modules start with the specified prefix (`test_` by
  default).
  
  `ignored_modules` is a list of unit test modules or packages to ignore. If a
  package is specified, all of its underlying modules are ignored.
  
  `output_stream` prints the unit test output using the specified output stream
  (`sys.stderr` by default).
  """
  
  _fix_streams_for_unittest()
  
  if ignored_modules is None:
    ignored_modules = []
  
  for unused_, module_name, unused_ in pkgutil.walk_packages(path=[path]):
    if any(module_name.startswith(ignored_module_name) for ignored_module_name in ignored_modules):
      continue
    
    module = load_module(module_name)
    
    parts = module_name.split('.')
    if parts[-1].startswith(test_module_name_prefix):
      run_test(module, stream=output_stream)
Esempio n. 6
0
def test_root_import_all_completeness():
    EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
    for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
                                               onerror=lambda _: None):
        if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
            continue
        assert_in(modname, sklearn.__all__)
Esempio n. 7
0
    def getURISource(self, url):
        try:
            sourceDict = []
            for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
            sourceDict = [i[0] for i in sourceDict if i[1] == False]
            sourceDict = [(i, __import__(i, globals(), locals(), [], -1).source()) for i in sourceDict]

            domain = (urlparse.urlparse(url).netloc).lower()

            domains = [(i[0], i[1].domains) for i in sourceDict]
            domains = [i[0] for i in domains if any(x in domain for x in i[1])]

            if len(domains) == 0: return False

            call = [i[1] for i in sourceDict if i[0] == domains[0]][0]

            self.sources = call.sources(url, self.hostDict, self.hostprDict)

            for i in range(len(self.sources)):
                try: self.sources[i]['autoplay'] = True
                except: pass

            self.sources = self.sourcesFilter()
            return self.sources
        except:
            pass
Esempio n. 8
0
def extensions_find(arg_parser):
    """ Look for command extensions. """

    subparser = arg_parser.add_subparsers(
        title="subcommands", description="Valid subcommands",
        help="Each subcommands supports --help for additional information.")

    for package in testbed.settings.PLUGINS:
        LOGGER.debug("loading commands %s", package)

        package = importlib.import_module(package)
        for _, module, ispkg in pkgutil.walk_packages(package.__path__,
                                                      package.__name__ + ".",
                                                      onerror=onerror):
            ##
            # only include commands from commands.py files.
            if ispkg or not module.endswith("commands"):
                continue
            LOGGER.debug("  loading commands from %s", module)
            module = importlib.import_module(module)
            try:
                module.add_subparser(subparser)
            except AttributeError, arg:
                ##
                # This means that the module is missing the add method.
                # All modules identified in settings to extend CLI
                # must have an add method
                LOGGER.error("adding subparser for %s.%s", package, module)
                LOGGER.exception(arg)
Esempio n. 9
0
    def load_processors(self):
        """ load Cfg file processors.  this must be done at run-time,
        not at compile-time, or we get a circular import and things
        don't work.  but finding the right way to do this at runtime
        was ... problematic. so here it is, writing to a global
        variable.  Sorry 'bout that. """
        global PROCESSORS
        if PROCESSORS is None:
            PROCESSORS = []
            if hasattr(pkgutil, 'walk_packages'):
                submodules = pkgutil.walk_packages(path=__path__)
            else:
                #python 2.4
                import glob
                submodules = []
                for path in __path__:
                    for submodule in glob.glob("%s/*.py" % path):
                        mod = '.'.join(submodule.split("/")[-1].split('.')[:-1])
                        if mod != '__init__':
                            submodules.append((None, mod, True))

            for submodule in submodules:
                module = getattr(__import__("%s.%s" %
                                            (__name__,
                                             submodule[1])).Server.Plugins.Cfg,
                                 submodule[1])
                proc = getattr(module, submodule[1])
                if set(proc.__mro__).intersection([CfgInfo, CfgFilter,
                                                   CfgGenerator, CfgVerifier]):
                    PROCESSORS.append(proc)
Esempio n. 10
0
    def get_modules(cls):
        """
        Dynamically loads all the modules in the modules folder and sorts
        them by the PRIORITY key. If no PRIORITY is defined for a given
        module, a priority of 0 is assumed.
        """

        logger = logging.getLogger(__name__)
        locations = [jasperpath.PLUGIN_PATH]
        logger.debug("Looking for modules in: %s",
                     ', '.join(["'%s'" % location for location in locations]))
        modules = []
        for finder, name, ispkg in pkgutil.walk_packages(locations):
            try:
                loader = finder.find_module(name)
                mod = loader.load_module(name)
            except:
                logger.warning("Skipped module '%s' due to an error.", name,
                               exc_info=True)
            else:
                if hasattr(mod, 'WORDS'):
                    logger.debug("Found module '%s' with words: %r", name,
                                 mod.WORDS)
                    modules.append(mod)
                else:
                    logger.warning("Skipped module '%s' because it misses " +
                                   "the WORDS constant.", name)
        modules.sort(key=lambda mod: mod.PRIORITY if hasattr(mod, 'PRIORITY')
                     else 0, reverse=True)
        return modules
Esempio n. 11
0
        def packages_in_stage(stage_name):
            stage_pymod = self.load_pymodule(stage_name)

            # where to recursively search for pymodules
            search_path = stage_pymod.__path__

            # Prefix of all pymodules and packages found in this dir. This is a little strange - suppose we are
            # searching in stage foo.bar which has the following structure:
            # |-foo
            #   |-bar
            #     |-buzz.py
            #     |-baz
            #       |-file.py
            # When walk_packages finds the directory `baz`, it won't know that the package's name is `foo.bar.baz` -
            # it's not aware that bar is contained within another package. Unless we provide a prefix, it will think
            # that `foo.bar.baz's` name is just `baz`.  This sort of makes sense, because if you added foo/bar to the
            # path then you could `import baz`. However, walk_packages will actually fail because it cannot
            # `import baz`, which it needs to do in order to get package details that inform the recursive search.
            # If there are no packages (only pymodules) in foo/bar, then `walk_packages` will succeed, but the output
            # names will be wrong (e.g. `buzz` instead of `foo.bar.buzz`).

            # `walk_packages` can generate AttributeError if the system has
            # Gtk modules, which are not designed to use with reflection or
            # introspection. Best action to take in this situation is probably
            # to simply suppress the error.
            def onerror(name):
                smv.logger.error("Skipping due to error during walk_packages: " + name)

            return pkgutil.walk_packages(
                path=search_path,
                prefix=stage_name + '.',
                onerror=onerror)
Esempio n. 12
0
def load_modules():
    # Import modules package.
    import modules

    plugins = dict()

    # Walk recursively through all modules and packages.
    for loader, module_name, ispkg in pkgutil.walk_packages(modules.__path__, modules.__name__ + '.'):
        # If current item is a package, skip.
        if ispkg:
            continue

        # Try to import the module, otherwise skip.
        try:
            module = __import__(module_name, globals(), locals(), ['dummy'], -1)
        except ImportError as e:
            continue

        # Walk through all members of currently imported modules.
        for member_name, member_object in inspect.getmembers(module):
            # Check if current member is a class.
            if inspect.isclass(member_object):
                # Yield the class if it's a subclass of Module.
                if issubclass(member_object, Module) and member_object is not Module:
                    plugins[member_object.cmd] = dict(obj=member_object, description=member_object.description)

    return plugins
Esempio n. 13
0
    def load_plugins(self):
        import pkgutil
        from rez.backport.importlib import import_module
        type_module_name = 'rezplugins.' + self.type_name
        package = import_module(type_module_name)

        # on import, the `__path__` variable of the imported package is extended
        # to include existing directories on the plugin search path (via
        # extend_path, above). this means that `walk_packages` will walk over all
        # modules on the search path at the same level (.e.g in a
        # 'rezplugins/type_name' sub-directory).
        paths = [package.__path__] if isinstance(package.__path__, basestring) \
            else package.__path__
        for path in paths:
            for loader, modname, ispkg in pkgutil.walk_packages(
                    [path], package.__name__ + '.'):
                if loader is not None:
                    plugin_name = modname.split('.')[-1]
                    if plugin_name.startswith('_'):
                        continue
                    if config.debug("plugins"):
                        print_debug("loading %s plugin at %s: %s..."
                                    % (self.type_name, path, modname))
                    try:
                        # load_module will force reload the module if it's
                        # already loaded, so check for that
                        module = sys.modules.get(modname)
                        if module is None:
                            module = loader.find_module(modname).load_module(modname)
                        if hasattr(module, 'register_plugin') and \
                                hasattr(module.register_plugin, '__call__'):
                            plugin_class = module.register_plugin()
                            if plugin_class != None:
                                self.register_plugin(plugin_name, plugin_class, module)
                            else:
                                if config.debug("plugins"):
                                    print_warning(
                                        "'register_plugin' function at %s: %s did not return a class."
                                        % (path, modname))
                        else:
                            if config.debug("plugins"):
                                print_warning(
                                    "no 'register_plugin' function at %s: %s"
                                    % (path, modname))

                            # delete from sys.modules?

                    except Exception as e:
                        nameish = modname.split('.')[-1]
                        self.failed_plugins[nameish] = str(e)
                        if config.debug("plugins"):
                            import traceback
                            from StringIO import StringIO
                            out = StringIO()
                            traceback.print_exc(file=out)
                            print_debug(out.getvalue())

            # load config
            data, _ = _load_config_from_filepaths([os.path.join(path, "rezconfig")])
            deep_update(self.config_data, data)
Esempio n. 14
0
def load_modules():
    # Import modules package.
    import viper.modules as modules

    plugins = dict()

    # Walk recursively through all modules and packages.
    for loader, module_name, ispkg in pkgutil.walk_packages(modules.__path__, modules.__name__ + '.'):
        # If current item is a package, skip.
        if ispkg:
            continue
        # Try to import the module, otherwise skip.
        try:
            module = importlib.import_module(module_name)
        except ImportError as e:
            print_warning("Something wrong happened while importing the module {0}: {1}".format(module_name, e))
            continue

        # Walk through all members of currently imported modules.
        for member_name, member_object in inspect.getmembers(module):
            # Check if current member is a class.
            if inspect.isclass(member_object):
                # Yield the class if it's a subclass of Module.
                if issubclass(member_object, Module) and member_object is not Module:
                    plugins[member_object.cmd] = dict(obj=member_object,
                                                      description=member_object.description,
                                                      parser_args=get_argparse_parser_actions(member_object().parser),
                                                      subparser_args=get_argparse_subparser_actions(member_object().parser))

    return plugins
Esempio n. 15
0
def all_estimators():
    def is_abstract(c):
        if not(hasattr(c, '__abstractmethods__')):
            return False
        if not len(c.__abstractmethods__):
            return False
        return True

    all_classes = []
    # get parent folder
    path = sklearn.__path__
    for importer, modname, ispkg in pkgutil.walk_packages(path=path,
                            prefix='sklearn.', onerror=lambda x: None):
        module = __import__(modname, fromlist="dummy")
        if ".tests." in modname:
            continue
        classes = inspect.getmembers(module, inspect.isclass)
        all_classes.extend(classes)

    all_classes = set(all_classes)

    estimators = [c for c in all_classes if issubclass(c[1], BaseEstimator)]
    # get rid of abstract base classes
    estimators = [c for c in estimators if not is_abstract(c[1])]
    # We sort in order to have reproducible test failures
    return sorted(estimators)
Esempio n. 16
0
def walk_packages(packages: List[str]) -> Iterator[str]:
    """Iterates through all packages and sub-packages in the given list.

    Python packages have a __path__ attribute defined, which pkgutil uses to determine
    the package hierarchy.  However, packages in C extensions do not have this attribute,
    so we have to roll out our own.
    """
    for package_name in packages:
        package = importlib.import_module(package_name)
        yield package.__name__
        # get the path of the object (needed by pkgutil)
        path = getattr(package, '__path__', None)
        if path is None:
            # object has no path; this means it's either a module inside a package
            # (and thus no sub-packages), or it could be a C extension package.
            if is_c_module(package):
                # This is a C extension module, now get the list of all sub-packages
                # using the inspect module
                subpackages = [package.__name__ + "." + name
                               for name, val in inspect.getmembers(package)
                               if inspect.ismodule(val)
                               and val.__name__ == package.__name__ + "." + name]
                # recursively iterate through the subpackages
                for submodule in walk_packages(subpackages):
                    yield submodule
            # It's a module inside a package.  There's nothing else to walk/yield.
        else:
            all_packages = pkgutil.walk_packages(path, prefix=package.__name__ + ".",
                                                 onerror=lambda r: None)
            for importer, qualified_name, ispkg in all_packages:
                yield qualified_name
Esempio n. 17
0
    def _load_handlers(self):
        # this must be called at run-time, not at compile-time, or we
        # get wierd circular import issues.
        self._handlers = dict()
        if hasattr(pkgutil, 'walk_packages'):
            submodules = pkgutil.walk_packages(path=__path__)
        else:
            # python 2.4
            import glob
            submodules = []
            for path in __path__:
                for submodule in glob.glob(os.path.join(path, "*.py")):
                    mod = os.path.splitext(os.path.basename(submodule))[0]
                    if mod not in ['__init__']:
                        submodules.append((None, mod, True))

        for submodule in submodules:
            if submodule[1] == 'base':
                continue
            module = getattr(__import__("%s.%s" %
                                        (__name__,
                                         submodule[1])).Client.Tools.POSIX,
                             submodule[1])
            hdlr = getattr(module, "POSIX" + submodule[1])
            if POSIXTool in hdlr.__mro__:
                # figure out what entry type this handler handles
                etype = hdlr.__name__[5:].lower()
                self._handlers[etype] = hdlr(self.logger,
                                             self.setup,
                                             self.config)
Esempio n. 18
0
def get_all_methods(dirname, pkg_name):
    ret = dict()
    for _, module_name, _ in walk_packages([dirname], pkg_name + '.'):
        mod = import_module(module_name)
        for key, val in mod.__dict__.iteritems():
            ret[key] = val
    return ret
Esempio n. 19
0
    def sourcesResolve(self, url, provider):
        try:
            provider = provider.lower()

            if not provider.endswith(('_mv', '_tv', '_mv_tv')):
                sourceDict = []
                for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
                provider = [i[0] for i in sourceDict if i[1] == False and i[0].startswith(provider + '_')][0]

            source = __import__(provider, globals(), locals(), [], -1).source()
            url = source.resolve(url)
            #control.log("[suurces]   my url 1 ************ %s " % url)
            #url = resolvers.request(url)
            #control.log("[sources]   my url 2 ************ %s " % url)

            try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
            except: headers = dict('')

            result = client.request(url.split('|')[0], headers=headers, output='chunk', timeout='20')
            if result == None: raise Exception()
            #control.log("!!!!!!!!!!!!!!!!!!!  %s prov: %s" % (url,provider))
            self.url = url
            return url
        except:
            return
 def test_pkgutil_walk_packages(self):
     # This is a dodgy hack to use the test_runpy infrastructure to test
     # issue #15343. Issue #15348 declares this is indeed a dodgy hack ;)
     import pkgutil
     max_depth = 4
     base_name = "__runpy_pkg__"
     package_suffixes = ["uncle", "uncle.cousin"]
     module_suffixes = ["uncle.cousin.nephew", base_name + ".sibling"]
     expected_packages = set()
     expected_modules = set()
     for depth in range(1, max_depth):
         pkg_name = ".".join([base_name] * depth)
         expected_packages.add(pkg_name)
         for name in package_suffixes:
             expected_packages.add(pkg_name + "." + name)
         for name in module_suffixes:
             expected_modules.add(pkg_name + "." + name)
     pkg_name = ".".join([base_name] * max_depth)
     expected_packages.add(pkg_name)
     expected_modules.add(pkg_name + ".runpy_test")
     pkg_dir, mod_fname, mod_name, mod_spec = (
            self._make_pkg("", max_depth))
     self.addCleanup(self._del_pkg, pkg_dir, max_depth, mod_name)
     for depth in range(2, max_depth+1):
         self._add_relative_modules(pkg_dir, "", depth)
     for finder, mod_name, ispkg in pkgutil.walk_packages([pkg_dir]):
         self.assertIsInstance(finder,
                               importlib.machinery.FileFinder)
         if ispkg:
             expected_packages.remove(mod_name)
         else:
             expected_modules.remove(mod_name)
     self.assertEqual(len(expected_packages), 0, expected_packages)
     self.assertEqual(len(expected_modules), 0, expected_modules)
Esempio n. 21
0
    def _get_modules(self):
        """Gets modules given the repo paths passed in to init"""
        modules = []
        error = False
        for repo in self.testrepos:

            # We're assuming only packages have __file__ attribute strings
            # that end with __init__.py (or .pyc).  If this doesn't match
            # that pattern, assume it's a module.
            if "__init__.py" not in getattr(repo, "__file__", ""):
                modules.append(repo)
                continue

            # We're still in a package, so walk it and find the rest of the
            # modules.
            prefix = "{0}.".format(repo.__name__)
            for _, modname, is_pkg in pkgutil.walk_packages(
                    path=repo.__path__, prefix=prefix, onerror=lambda x: None):
                if not is_pkg:
                    try:
                        modules.append(importlib.import_module(modname))
                    except Exception as exception:
                        print_exception(
                            "Suite Builder", "import_module", modname,
                            exception)
                        error = True
        if self.exit_on_error and error:
            exit(get_error(exception))

        return modules
Esempio n. 22
0
    def test_walkpackages_zipfile(self):
        """Tests the same as test_walkpackages_filesys, only with a zip file."""

        zip = 'test_walkpackages_zipfile.zip'
        pkg1 = 'test_walkpackages_zipfile'
        pkg2 = 'sub'

        zip_file = os.path.join(self.dirname, zip)
        z = zipfile.ZipFile(zip_file, 'w')
        z.writestr(pkg2 + '/__init__.py', "")
        z.writestr(pkg2 + '/' + pkg1 + '/__init__.py', "")
        z.writestr(pkg2 + '/' + pkg1 + '/mod.py', "")
        z.writestr(pkg1 + '/__init__.py', "")
        z.writestr(pkg1 + '/' + pkg2 + '/__init__.py', "")
        z.writestr(pkg1 + '/' + pkg2 + '/mod.py', "")
        z.close()

        sys.path.insert(0, zip_file)
        expected = [
            'sub',
            'sub.test_walkpackages_zipfile',
            'sub.test_walkpackages_zipfile.mod',
            'test_walkpackages_zipfile',
            'test_walkpackages_zipfile.sub',
            'test_walkpackages_zipfile.sub.mod',
        ]
        actual= [e[1] for e in pkgutil.walk_packages([zip_file])]
        self.assertEqual(actual, expected)
        del sys.path[0]

        for pkg in expected:
            if pkg.endswith('mod'):
                continue
            del sys.modules[pkg]
Esempio n. 23
0
def find_analysis_classes():
    analyses = {}

    package_itr = pkgutil.walk_packages(
        path=oopa.modules.__path__,
        prefix=oopa.modules.__name__ + "."
    );

    for importer, mod_name, is_package in package_itr:
        if is_package:
            continue

        mod = importlib.import_module(mod_name)

        analysis = None
        for name, member in inspect.getmembers(mod, inspect.isclass):
            if Analysis.__name__ in [c.__name__ for c in member.__bases__]:
                analysis = member
        
        if analysis is None:
            print "Error: Analysis subclass not found in %s" % (mod.__path__)

        analyses[mod_name.split(".")[-1]] = analysis

    return analyses
Esempio n. 24
0
def morepath_packages():
    namespace_packages = set()
    paths = []
    m = DependencyMap()
    m.load()
    for dist in m.relevant_dists('morepath'):
        if dist.has_metadata('namespace_packages.txt'):
            data = dist.get_metadata('namespace_packages.txt')
            for ns in data.split('\n'):
                ns = ns.strip()
                if ns:
                    namespace_packages.add(ns)
        paths.append(dist.location)

    seen = set()

    for importer, dotted_name, is_pkg in walk_packages(paths):
        if not is_pkg:
            continue
        if dotted_name in namespace_packages:
            continue
        if known_prefix(dotted_name, seen):
            continue
        for prefix in prefixes(dotted_name):
            if prefix not in namespace_packages:
                seen.add(prefix)
        m = importer.find_module(dotted_name).load_module(dotted_name)
        # XXX hack to work around bug in walk_packages that will load
        # more than one namespace package: http://bugs.python.org/issue14787
        # XXX performance
        if in_path(m, paths):
            yield m
Esempio n. 25
0
    def get_modules(self):
        """
        walks all modules in the test repo, filters by
        product and module filter. Filter passed in with -m
        returns a list of module dotpath strings
        """
        test_repo = import_module(self.test_repo_name)
        prefix = "{0}.".format(test_repo.__name__)
        product_path = "{0}{1}".format(prefix, self.product)
        modnames = []
        for importer, modname, is_pkg in pkgutil.walk_packages(
                path=test_repo.__path__, prefix=prefix,
                onerror=lambda x: None):
            if not is_pkg and modname.startswith(product_path):
                if (not self.module_regex or
                        self.module_regex in modname.rsplit(".", 1)[1]):
                    modnames.append(modname)

        filter_mods = []
        for modname in modnames:
            add_package = not bool(self.packages)
            for package in self.packages:
                if package in modname.rsplit(".", 1)[0]:
                    add_package = True
                    break
            if add_package:
                filter_mods.append(modname)
        filter_mods.sort()
        return filter_mods
Esempio n. 26
0
 def discover(self, path=os.path.dirname(__file__), prefix=None):
     dirname = os.path.basename(path)
     if prefix is None:
         prefix = dirname
     for importer, name, ispkg in pkgutil.walk_packages([path]):
         try:
             pkg_name = '%s.%s' % (prefix, name)
             if pkg_name not in sys.modules:
                 module_meta = importer.find_module(name)
                 module = module_meta.load_module(pkg_name)
             else:
                 module = sys.modules[pkg_name]
             if ispkg:
                 self.discover(os.path.join(path, name), pkg_name)
         except PluginFormatError as error:
             logging.warn(' *** [{name}] Plugin error: {error}'
                          ''.format(name=name, error=error))
         except PluginLoadError as error:
             logging.warn(' *** [{name}] Plugin failed to load: {error}'
                          ''.format(name=name, error=error))
         except PluginCrashed as error:
             logging.warn(' *** [{name}] Plugin crashed: {error}'
                          ''.format(name=name, error=error))
         except ImportError as error:
             logging.exception(error)
Esempio n. 27
0
        def walk_packages(path=None, prefix='', onerror=None):
            """ Implementation of walk_packages for python 2.5 """
            def seen(path, seenpaths={}):  # pylint: disable=W0102
                """ detect if a path has been 'seen' (i.e., considered
                for inclusion in the generator).  tracks what has been
                seen through the magic of python default arguments """
                if path in seenpaths:
                    return True
                seenpaths[path] = True

            for importer, name, ispkg in iter_modules(path, prefix):
                yield importer, name, ispkg

                if ispkg:
                    try:
                        __import__(name)
                    except ImportError:
                        if onerror is not None:
                            onerror(name)
                    except Exception:
                        if onerror is not None:
                            onerror(name)
                        else:
                            raise
                    else:
                        path = getattr(sys.modules[name], '__path__', [])

                        # don't traverse path items we've seen before
                        path = [p for p in path if not seen(p)]

                        for item in walk_packages(path, name + '.', onerror):
                            yield item
Esempio n. 28
0
    def load_scrapers(cls):

        """
        Dynamically loads all the scrapers in the 'scrapers' folder
	that contain 'Scraper' class inheriting from 'ScraperBase'.
        """

        logger = logging.getLogger('filer')

	locations = ['/Filed/src/scrapers']
        logger.info("Loading scrapers from: %s",
                     ', '.join(["'%s'" % location for location in locations]))
        scrapers = []
        for finder, name, ispkg in pkgutil.walk_packages(locations):
            try:
                loader = finder.find_module(name)
                scraper = loader.load_module(name)
            except:
                logger.warning("Skipped scraper '%s' due to an error.", name,
                               exc_info=True)
            else:
                if hasattr(scraper, 'Scraper') and issubclass(scraper.Scraper, ScraperBase):
                    logger.debug("Found scraper '%s'", name)
                    scrapers.append(scraper)
                else:
                    logger.warning("Skipped scraper '%s' because it misses " +
                                   "the 'Scraper' class or it does not inherit " +
				   "the 'ScraperBase' parent class", name)
	logger.info("Scrapers loaded")
        return scrapers
Esempio n. 29
0
 def _load_all_views(cls):
     for pkg in cls.view_packages:
         walk = pkgutil.walk_packages(pkg.__path__, pkg.__name__ + '.')
         for loader, name, is_pkg in walk:
             if name not in sys.modules:
                 module = loader.find_module(name).load_module(name)
                 yield module
def test_tabs():
    """Test that there are no tabs in our source files."""
    # avoid importing modules that require mayavi if mayavi is not installed
    ignore = _tab_ignores[:]
    try:
        import mayavi  # noqa: F401 analysis:ignore
    except ImportError:
        ignore.extend('mne.gui.' + name for name in
                      ('_coreg_gui', '_fiducials_gui', '_file_traits', '_help',
                       '_kit2fiff_gui', '_marker_gui', '_viewer'))

    for importer, modname, ispkg in walk_packages(mne.__path__, prefix='mne.'):
        # because we don't import e.g. mne.tests w/mne
        if not ispkg and modname not in ignore:
            # mod = importlib.import_module(modname)  # not py26 compatible!
            try:
                with pytest.warns(None):
                    __import__(modname)
            except Exception:  # can't import properly
                continue
            mod = sys.modules[modname]
            try:
                source = getsource(mod)
            except IOError:  # user probably should have run "make clean"
                continue
            assert '\t' not in source, ('"%s" has tabs, please remove them '
                                        'or add it to the ignore list'
                                        % modname)
Esempio n. 31
0
__author__ = ['Miguel Ramos Pernas']
__email__ = ['*****@*****.**']

# Python
import importlib
import inspect
import os
import pkgutil

PACKAGE_PATH = os.path.dirname(os.path.abspath(__file__))

__all__ = ['PACKAGE_PATH']

for loader, module_name, ispkg in pkgutil.walk_packages(__path__):

    if module_name.endswith('setup') or module_name.endswith('__'):
        continue

    # Import all classes and functions
    mod = importlib.import_module('.' + module_name, package='hep_spt.stats')

    __all__ += mod.__all__

    for n, c in inspect.getmembers(mod):
        if n in mod.__all__:
            globals()[n] = c

__all__ = list(sorted(__all__))
Esempio n. 32
0
import importlib
import pkgutil

from .hooks import get_builder, get_tool  # noqa

# Import all the packages in this directory so their hooks get run.
for _, name, _ in pkgutil.walk_packages(__path__, '.'):
    importlib.import_module(name, __package__)
Esempio n. 33
0
    def sourcesResolve(self, item, info=False):
        try:
            self.url = None

            u = url = item['url']

            d = item['debrid']
            direct = item['direct']

            provider = item['provider'].lower()

            host = item['source'].lower()

            if not provider.endswith(('_movies', '_series', '_movies_series')):
                sourceDict = []
                for package, name, is_pkg in pkgutil.walk_packages(__path__):
                    sourceDict.append((name, is_pkg))
                provider = [
                    i[0] for i in sourceDict
                    if i[1] == False and i[0].startswith(provider + '_')
                ][0]

            source = __import__(provider, globals(), locals(), [], -1).source()
            u = url = source.resolve(url)

            if url == None or not '://' in str(url): raise Exception()

            if not d == '':
                url = debrid.resolver(url, d)

            elif not direct == True:
                url = None
                if host in self.resDict:
                    try:
                        from res import selfresolver
                        url = selfresolver.resolve(u, host)
                    except:
                        pass
                if url == None:
                    hmf = urlresolver.HostedMediaFile(url=u,
                                                      include_disabled=True,
                                                      include_universal=False)
                    if hmf.valid_url() == True: url = hmf.resolve()

            if url == False or url == None: raise Exception()

            ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit(
                '.')[-1].replace('/', '').lower()
            if ext == 'rar': raise Exception()

            try:
                headers = url.rsplit('|', 1)[1]
            except:
                headers = ''
            headers = urllib.quote_plus(headers).replace(
                '%3D', '=') if ' ' in headers else headers
            headers = dict(urlparse.parse_qsl(headers))

            if url.startswith('http') and '.m3u8' in url:
                result = client.request(url.split('|')[0],
                                        headers=headers,
                                        output='geturl',
                                        timeout='20')
                if result == None: raise Exception()

            #elif url.startswith('http'):
            #   result = client.request(url.split('|')[0], headers=headers, output='chunk', timeout='20')
            #   if result == None: raise Exception()

            self.url = url
            return url
        except:
            if info == True: self.errorForSources()
            return
Esempio n. 34
0
    def getSources(self,
                   title,
                   year,
                   imdb,
                   tvdb,
                   season,
                   episode,
                   tvshowtitle,
                   premiered,
                   timeout=30):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__):
            sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'

        if content == 'movie':
            sourceDict = [
                i for i in sourceDict
                if i.endswith(('_movies', '_movies_series'))
            ]
        else:
            sourceDict = [
                i for i in sourceDict
                if i.endswith(('_series', '_movies_series'))
            ]

        try:
            sourceDict = [
                (i,
                 control.setting(
                     'provider.' +
                     re.sub('_movies_series$|_movies$|_series$', '', i)))
                for i in sourceDict
            ]
        except:
            sourceDict = [(i, 'true') for i in sourceDict]

        sourceDict = [i[0] for i in sourceDict if not i[1] == 'false']

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.providercacheFile

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict:
                threads.append(
                    workers.Thread(
                        self.getMovieSource, title, year, imdb,
                        re.sub('_movies_series$|_movies$|_series$', '',
                               source),
                        __import__(source, globals(), locals(), [],
                                   -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            for source in sourceDict:
                threads.append(
                    workers.Thread(
                        self.getEpisodeSource, title, year, imdb, tvdb, season,
                        episode, tvshowtitle, premiered,
                        re.sub('_movies_series$|_movies$|_series$', '',
                               source),
                        __import__(source, globals(), locals(), [],
                                   -1).source()))

        try:
            timeout = int(control.setting('scrapers.timeout.1'))
        except:
            pass

        [i.start() for i in threads]

        sourceLabel = [
            re.sub('_movies_series$|_movies$|_series$', '', i)
            for i in sourceDict
        ]
        sourceLabel = [re.sub('v\d+$', '', i).upper() for i in sourceLabel]

        progressDialog = control.progressDialog if control.setting(
            'progress.dialog') == '0' else control.progressDialogBG
        progressDialog.create(control.addonInfo('name'), '')
        progressDialog.update(0)

        string1 = control.lang(32404).encode('utf-8')
        string2 = control.lang(32405).encode('utf-8')
        string3 = control.lang(32406).encode('utf-8')

        for i in range(0, timeout * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()

                try:
                    info = [
                        sourceLabel[int(re.sub('[^0-9]', '', str(x.getName())))
                                    - 1] for x in threads
                        if x.is_alive() == True
                    ]
                except:
                    info = []

                try:
                    if progressDialog.iscanceled(): break
                    string4 = string1 % str(int(i * 0.5))
                    if len(info) > 5: string5 = string3 % str(len(info))
                    else: string5 = string3 % str(info).translate(None, "[]'")
                    progressDialog.update(
                        int((100 / float(len(threads))) *
                            len([x
                                 for x in threads if x.is_alive() == False])),
                        str(string4), str(string5))
                except:
                    string4 = string2 % str(int(i * 0.5))
                    if len(info) > 5: string5 = string3 % str(len(info))
                    else: string5 = str(info).translate(None, "[]'")
                    progressDialog.update(
                        int((100 / float(len(threads))) *
                            len([x
                                 for x in threads if x.is_alive() == False])),
                        str(string4), str(string5))

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        for i in range(0, 30 * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()

                try:
                    info = [
                        sourceLabel[int(re.sub('[^0-9]', '', str(x.getName())))
                                    - 1] for x in threads
                        if x.is_alive() == True
                    ]
                except:
                    info = []

                try:
                    if progressDialog.iscanceled(): break
                    string4 = string1 % str(int(i * 0.5) + timeout)
                    if len(info) > 5: string5 = string3 % str(len(info))
                    else: string5 = string3 % str(info).translate(None, "[]'")
                    progressDialog.update(
                        int((100 / float(len(threads))) *
                            len([x
                                 for x in threads if x.is_alive() == False])),
                        str(string4), str(string5))
                except:
                    string4 = string2 % str(int(i * 0.5) + timeout)
                    if len(info) > 5: string5 = string3 % str(len(info))
                    else: string5 = str(info).translate(None, "[]'")
                    progressDialog.update(
                        int((100 / float(len(threads))) *
                            len([x
                                 for x in threads if x.is_alive() == False])),
                        str(string4), str(string5))

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                if self.sources: break
                time.sleep(0.5)
            except:
                pass

        try:
            progressDialog.close()
        except:
            pass

        self.sourcesFilter()

        return self.sources
Esempio n. 35
0
import pkgutil
import inspect
import datetime
import os

__all__ = []

for loader, name, is_pkg in pkgutil.walk_packages(__path__):
    module = loader.find_module(name).load_module(name)

    for name, value in inspect.getmembers(module):
        if name.startswith('__'):
            continue

        globals()[name] = value
        __all__.append(name)


class Formatter(object):
    @staticmethod
    def WinOrLinux(winStr, linStr):
        if os.name == 'nt':
            return winStr
        else:
            return linStr

    @staticmethod
    def day(d):
        return Formatter.utc_to_local(d).strftime(
            Formatter.WinOrLinux('%b %d, %Y', '%b %e, %Y'))
Esempio n. 36
0
"""

import pkgutil
import os
import json
from .utils import get_info

__author__ = "Andrea Tramacere"

pkg_dir = os.path.abspath(os.path.dirname(__file__))
pkg_name = os.path.basename(pkg_dir)
__all__ = []

_info = get_info()
__version__ = _info['version']

if 'label' in _info.keys():
    __label__ = _info['label']
else:
    __label__ = None

for importer, modname, ispkg in pkgutil.walk_packages(path=[pkg_dir],
                                                      prefix=pkg_name + '.',
                                                      onerror=lambda x: None):

    if ispkg == True:
        __all__.append(modname)
    else:
        pass

    data_dir = os.path.dirname(__file__) + '/data'
Esempio n. 37
0
def all_estimators(include_meta_estimators=False,
                   include_other=False,
                   type_filter=None,
                   include_dont_test=False):
    """Get a list of all estimators from sklearn.

    This function crawls the module and gets all classes that inherit
    from BaseEstimator. Classes that are defined in test-modules are not
    included.
    By default meta_estimators such as GridSearchCV are also not included.

    Parameters
    ----------
    include_meta_estimators : boolean, default=False
        Whether to include meta-estimators that can be constructed using
        an estimator as their first argument. These are currently
        BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
        OneVsRestClassifier, RFE, RFECV.

    include_other : boolean, default=False
        Wether to include meta-estimators that are somehow special and can
        not be default-constructed sensibly. These are currently
        Pipeline, FeatureUnion and GridSearchCV

    include_dont_test : boolean, default=False
        Whether to include "special" label estimator or test processors.

    type_filter : string, list of string,  or None, default=None
        Which kind of estimators should be returned. If None, no filter is
        applied and all estimators are returned.  Possible values are
        'classifier', 'regressor', 'cluster' and 'transformer' to get
        estimators only of these specific types, or a list of these to
        get the estimators that fit at least one of the types.

    Returns
    -------
    estimators : list of tuples
        List of (name, class), where ``name`` is the class name as string
        and ``class`` is the actuall type of the class.
    """
    def is_abstract(c):
        if not (hasattr(c, '__abstractmethods__')):
            return False
        if not len(c.__abstractmethods__):
            return False
        return True

    all_classes = []
    # get parent folder
    path = sklearn.__path__
    for importer, modname, ispkg in pkgutil.walk_packages(
            path=path, prefix='sklearn.', onerror=lambda x: None):
        if ".tests." in modname:
            continue
        module = __import__(modname, fromlist="dummy")
        classes = inspect.getmembers(module, inspect.isclass)
        all_classes.extend(classes)

    all_classes = set(all_classes)

    estimators = [
        c for c in all_classes
        if (issubclass(c[1], BaseEstimator) and c[0] != 'BaseEstimator')
    ]
    # get rid of abstract base classes
    estimators = [c for c in estimators if not is_abstract(c[1])]

    if not include_dont_test:
        estimators = [c for c in estimators if not c[0] in DONT_TEST]

    if not include_other:
        estimators = [c for c in estimators if not c[0] in OTHER]
    # possibly get rid of meta estimators
    if not include_meta_estimators:
        estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
    if type_filter is not None:
        if not isinstance(type_filter, list):
            type_filter = [type_filter]
        else:
            type_filter = list(type_filter)  # copy
        filtered_estimators = []
        filters = {
            'classifier': ClassifierMixin,
            'regressor': RegressorMixin,
            'transformer': TransformerMixin,
            'cluster': ClusterMixin
        }
        for name, mixin in filters.items():
            if name in type_filter:
                type_filter.remove(name)
                filtered_estimators.extend(
                    [est for est in estimators if issubclass(est[1], mixin)])
        estimators = filtered_estimators
        if type_filter:
            raise ValueError(
                "Parameter type_filter must be 'classifier', "
                "'regressor', 'transformer', 'cluster' or None, got"
                " %s." % repr(type_filter))

    # drop duplicates, sort for reproducibility
    return sorted(set(estimators))
Esempio n. 38
0
    def getSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'


        if content == 'movie':
            sourceDict = [i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i))) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]
        else:
            sourceDict = [i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i) + '_tv')) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.sourcescacheFile

        sourceDict = [i[0] for i in sourceDict if i[1] == 'true']

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
            for source in sourceDict: threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))


        try: timeout = int(control.setting('sources_timeout_40'))
        except: timeout = 40

        [i.start() for i in threads]

        control.idle()

        sourceLabel = [re.sub('_mv_tv$|_mv$|_tv$', '', i) for i in sourceDict]
        sourceLabel = [re.sub('v\d+$', '', i).upper() for i in sourceLabel]


        self.progressDialog = control.progressDialog
        self.progressDialog.create(control.addonInfo('name'), '')
        self.progressDialog.update(0)

        string1 = control.lang(30512).encode('utf-8')
        string2 = control.lang(30513).encode('utf-8')
        string3 = control.lang(30514).encode('utf-8')

        for i in range(0, timeout * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()

                try: info = [sourceLabel[int(re.sub('[^0-9]', '', str(x.getName()))) - 1] for x in threads if x.is_alive() == True]
                except: info = []

                if len(info) > 5: info = len(info)

                self.progressDialog.update(int((100 / float(len(threads))) * len([x for x in threads if x.is_alive() == False])), str('%s: %s %s' % (string1, int(i * 0.5), string2)), str('%s: %s' % (string3, str(info).translate(None, "[]'"))))

                if self.progressDialog.iscanceled(): break

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        self.progressDialog.close()

        return self.sources
Esempio n. 39
0
#  -*- coding: UTF-8 -*-
#  File: __init__.py
#  Author: Yuxin Wu <*****@*****.**>

from pkgutil import walk_packages
import os
import os.path

__all__ = []


def global_import(name):
    p = __import__(name, globals(), locals(), level=1)
    lst = p.__all__ if '__all__' in dir(p) else dir(p)
    del globals()[name]
    for k in lst:
        globals()[k] = p.__dict__[k]
        __all__.append(k)


_CURR_DIR = os.path.dirname(__file__)
for _, module_name, _ in walk_packages(
        [_CURR_DIR]):
    srcpath = os.path.join(_CURR_DIR, module_name + '.py')
    if not os.path.isfile(srcpath):
        continue
    if not module_name.startswith('_'):
        global_import(module_name)
Esempio n. 40
0
import warnings
import importlib

from pkgutil import walk_packages
from inspect import getsource, isabstract

import sklearn
from sklearn.base import signature
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_docstring_parameters
from sklearn.utils.testing import _get_func_name
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.deprecation import _is_deprecated

PUBLIC_MODULES = set([
    pckg[1] for pckg in walk_packages(prefix='sklearn.', path=sklearn.__path__)
    if not ("._" in pckg[1] or ".tests." in pckg[1])
])

# TODO Uncomment all modules and fix doc inconsistencies everywhere
# The list of modules that are not tested for now
IGNORED_MODULES = (
    'cluster',
    'datasets',
    'decomposition',
    'feature_extraction',
    'gaussian_process',
    'linear_model',
    'ensemble',
    'feature_selection',
    'kernel_approximation',
Esempio n. 41
0
    def getObjects(self,
                   modulePath,
                   reFilter=None,
                   parentClass=None,
                   recurse=False):
        """ Search for modules under a certain path

        modulePath is the import string needed to access the parent module.
        Root modules will be included automatically (like DIRAC). For instance "ConfigurationSystem.Service"

        reFilter is a regular expression to filter what to load. For instance ".*Handler"
        parentClass is a class object from which the loaded modules have to import from. For instance RequestHandler
    """

        if 'OrderedDict' in dir(collections):
            modules = collections.OrderedDict()
        else:
            modules = {}

        if type(reFilter) in types.StringTypes:
            reFilter = re.compile(reFilter)

        for rootModule in self.__rootModules:
            if rootModule:
                impPath = "%s.%s" % (rootModule, modulePath)
            else:
                impPath = modulePath
            gLogger.debug("Trying to load %s" % impPath)

            result = self.__recurseImport(impPath)
            if not result['OK']:
                return result
            if not result['Value']:
                continue

            parentModule = result['Value']
            fsPath = parentModule.__path__[0]
            gLogger.verbose("Loaded module %s at %s" % (impPath, fsPath))

            for _modLoader, modName, isPkg in pkgutil.walk_packages(
                    parentModule.__path__):
                if reFilter and not reFilter.match(modName):
                    continue
                if isPkg:
                    if recurse:
                        result = self.getObjects("%s.%s" %
                                                 (modulePath, modName),
                                                 reFilter=reFilter,
                                                 parentClass=parentClass,
                                                 recurse=recurse)
                        if not result['OK']:
                            return result
                        modules.update(result['Value'])
                    continue
                modKeyName = "%s.%s" % (modulePath, modName)
                if modKeyName in modules:
                    continue
                fullName = "%s.%s" % (impPath, modName)
                result = self.__recurseImport(modName,
                                              parentModule=parentModule,
                                              fullName=fullName)
                if not result['OK']:
                    return result
                if not result['Value']:
                    continue
                modObj = result['Value']

                try:
                    modClass = getattr(modObj, modName)
                except AttributeError:
                    gLogger.warn("%s does not contain a %s object" %
                                 (fullName, modName))
                    continue

                if parentClass and not issubclass(modClass, parentClass):
                    continue

                #Huge success!
                modules[modKeyName] = modClass

        return S_OK(modules)
Esempio n. 42
0
def load_object(obj_dict):
    """
    Creates an instantiation of a class based on a dictionary representation. We implicitly
    determine the Class through introspection along with information in the dictionary.

    We search for a class with the _fw_name property equal to obj_dict['_fw_name']
    If the @module key is set, that module is checked first for a matching class
    to improve speed of lookup.
    Afterwards, the modules in the USER_PACKAGES global parameter are checked.

    Refactoring class names, module names, etc. will not break object loading
    as long as:

    i) the _fw_name property is maintained the same AND
    ii) the refactored module is kept within USER_PACKAGES

    You can get around these limitations if you really want:
    i) If you want to change the fw_name of an object you can set the FW_NAME_UPDATES key
    ii) If you want to put a refactored module in a new place add an entry to USER_PACKAGES

    :param obj_dict: the dict representation of the class
    """

    # override the name in the obj_dict if there's an entry in FW_NAME_UPDATES
    fw_name = FW_NAME_UPDATES.get(obj_dict['_fw_name'], obj_dict['_fw_name'])
    obj_dict['_fw_name'] = fw_name

    # check for explicit serialization, e.g. {{fireworks.tasks.MyTask}} - based on pymatgen method
    if fw_name.startswith('{{') and fw_name.endswith('}}'):
        modname, classname = fw_name.strip('{} ').rsplit(".", 1)
        mod = __import__(modname, globals(), locals(), [classname], 0)
        if hasattr(mod, classname):
            cls_ = getattr(mod, classname)
            return cls_.from_dict(obj_dict)

    # first try to load from known location
    if fw_name in SAVED_FW_MODULES:
        m_module = importlib.import_module(SAVED_FW_MODULES[fw_name])
        m_object = _search_module_for_obj(m_module, obj_dict)
        if m_object is not None:
            return m_object

    # failing that, look for the object within all of USER_PACKAGES
    # this will be slow, but only needed the first time

    found_objects = []  # used to make sure we don't find multiple hits
    for package in USER_PACKAGES:
        root_module = importlib.import_module(package)
        for loader, mod_name, is_pkg in pkgutil.walk_packages(
                root_module.__path__, package + '.'):
            try:
                m_module = loader.find_module(mod_name).load_module(mod_name)
                m_object = _search_module_for_obj(m_module, obj_dict)
                if m_object is not None:
                    found_objects.append((m_object, mod_name))
            except ImportError as ex:
                import warnings
                warnings.warn(
                    "%s in %s cannot be loaded because of %s. Skipping.." %
                    (m_object, mod_name, str(ex)))
                traceback.print_exc(ex)

    if len(found_objects) == 1:
        SAVED_FW_MODULES[fw_name] = found_objects[0][1]
        return found_objects[0][0]
    elif len(found_objects) > 0:
        raise ValueError(
            'load_object() found multiple objects with cls._fw_name {} -- {}'.
            format(fw_name, found_objects))

    raise ValueError(
        'load_object() could not find a class with cls._fw_name {}'.format(
            fw_name))
Esempio n. 43
0
def init():
    # Import all the packages in this directory so their hooks get run.
    for _, name, _ in pkgutil.walk_packages(__path__, '.'):
        importlib.import_module(name, __package__)
Esempio n. 44
0
            return self.encoders[t]
        except KeyError:
            logger.warn('Requested unknown encoder %s' % t, exc_info=True)
            return None

    def get_decoder(self, t):
        try:
            return self.decoders[t]
        except KeyError:
            logger.warn('Requested unknown decoder %s' % t, exc_info=True)
            return None


_ctx = MarshallingCtx(ENCODERS, DECODERS)

packages = pkgutil.walk_packages(encoders.__path__, encoders.__name__ + '.')
for module_loader, name, ispkg in packages:
    m = importlib.import_module(name)
    try:
        t, encoder = m.encoder
        ENCODERS[t] = encoder(_ctx)
    except AttributeError:
        pass

packages = pkgutil.walk_packages(decoders.__path__, decoders.__name__ + '.')
for module_loader, name, ispkg in packages:
    m = importlib.import_module(name)
    try:
        t, decoder = m.decoder
        DECODERS[t] = decoder(_ctx)
    except AttributeError:
Esempio n. 45
0
def find_packages(path=["."], prefix=""):
    yield prefix
    prefix = prefix + "."
    for _, name, ispkg in walk_packages(path, prefix):
        if ispkg:
            yield name
Esempio n. 46
0
    if options.copy:
        copy_qtgui_to_qtwidgets()

    elif options.generate:
        generate_common_members()

    elif options.sort:
        sort_common_members()

    else:

        # Import <binding>
        binding = __import__(options.binding)

        for importer, modname, ispkg in pkgutil.walk_packages(
                path=binding.__path__,
                prefix=binding.__name__ + '.',
                onerror=lambda x: None):
            if modname not in SKIP_MODULES:
                MODULES.append(modname)
                basemodule = modname[:modname.rfind('.')]
                submodule = modname[modname.rfind('.')+1:]
                try:
                    import_statement = (
                        'from ' + basemodule + ' import ' + submodule)
                    exec(import_statement)
                    # print(import_statement)
                except (ImportError, AttributeError, SyntaxError) as error:
                    # SyntaxError catched here because e.g. _port3
                    # running on Python 2...
                    print('WARNING: Skipped import', modname, error)
Esempio n. 47
0
#!/usr/bin/env python

# BSD 3-Clause License; see https://github.com/scikit-hep/uproot3-methods/blob/master/LICENSE

def hasmethods(name):
    if name not in globals():
        if name in hasmethods.loaders:
            globals()[name] = hasmethods.loaders[name].load_module(name)
        elif '_3c_' in name and '_3e_' in name:
            bare_name = name.split('_3c_')[0]
            if bare_name in hasmethods.loaders:
                globals()[name] = hasmethods.loaders[bare_name].load_module(bare_name)

    return name in globals() and isinstance(getattr(globals()[name], "Methods", None), type)

import pkgutil

hasmethods.loaders = dict([(module_name, loader.find_module(module_name)) for loader, module_name, is_pkg in pkgutil.walk_packages(__path__)])

del pkgutil
Esempio n. 48
0
def all_estimators(type_filter=None,):
    """Get a list of all estimators from imblearn.
    This function crawls the module and gets all classes that inherit
    from BaseEstimator. Classes that are defined in test-modules are not
    included.
    By default meta_estimators are also not included.
    This function is adapted from sklearn.
    Parameters
    ----------
    type_filter : string, list of string, or None, default=None
        Which kind of estimators should be returned. If None, no
        filter is applied and all estimators are returned.  Possible
        values are 'sampler' to get estimators only of these specific
        types, or a list of these to get the estimators that fit at
        least one of the types.
    Returns
    -------
    estimators : list of tuples
        List of (name, class), where ``name`` is the class name as string
        and ``class`` is the actual type of the class.
    """

    def is_abstract(c):
        if not (hasattr(c, "__abstractmethods__")):
            return False
        if not len(c.__abstractmethods__):
            return False
        return True

    all_classes = []
    modules_to_ignore = {"tests"}
    root = str(Path(__file__).parent.parent)
    # Ignore deprecation warnings triggered at import time and from walking
    # packages
    with ignore_warnings(category=FutureWarning):
        for importer, modname, ispkg in pkgutil.walk_packages(
                path=[root], prefix='skclean.'):
            mod_parts = modname.split(".")
            if (any(part in modules_to_ignore for part in mod_parts)
                    or '._' in modname):
                continue
            module = import_module(modname)
            classes = inspect.getmembers(module, inspect.isclass)
            classes = [(name, est_cls) for name, est_cls in classes
                       if not name.startswith("_")]

            all_classes.extend(classes)

    all_classes = set(all_classes)

    estimators = [
        c
        for c in all_classes
        if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
    ]
    # get rid of abstract base classes
    estimators = [c for c in estimators if not is_abstract(c[1])]

    # get rid of sklearn estimators which have been imported in some classes
    estimators = [c for c in estimators if "sklearn" not in c[1].__module__]

    # drop duplicates, sort for reproducibility
    # itemgetter is used to ensure the sort does not extend to the 2nd item of
    # the tuple
    return sorted(set(estimators), key=itemgetter(0))
Esempio n. 49
0
def command_names():
    from seed import commands
    names = set((pkg[1] for pkg in walk_packages(path=commands.__path__)))
    return list(names)
Esempio n. 50
0
from sklearn.utils.estimator_checks import _enforce_estimator_tags_y
from sklearn.utils.estimator_checks import _enforce_estimator_tags_x
from sklearn.utils.deprecation import _is_deprecated
from sklearn.externals._pep562 import Pep562
from sklearn.datasets import make_classification

import pytest


# walk_packages() ignores DeprecationWarnings, now we need to ignore
# FutureWarnings
with warnings.catch_warnings():
    warnings.simplefilter('ignore', FutureWarning)
    PUBLIC_MODULES = set([
        pckg[1] for pckg in walk_packages(
            prefix='sklearn.',
            # mypy error: Module has no attribute "__path__"
            path=sklearn.__path__)  # type: ignore  # mypy issue #1422
        if not ("._" in pckg[1] or ".tests." in pckg[1])
    ])

# functions to ignore args / docstring of
_DOCSTRING_IGNORES = [
    'sklearn.utils.deprecation.load_mlcomp',
    'sklearn.pipeline.make_pipeline',
    'sklearn.pipeline.make_union',
    'sklearn.utils.extmath.safe_sparse_dot',
    'sklearn.utils._joblib'
]

# Methods where y param should be ignored if y=None by default
_METHODS_IGNORE_NONE_Y = [
Esempio n. 51
0
def discover_modules(module_or_name):
    """Given an import name, provide an iterable of module filepath,name pairs.

    This function imports package __init__.py but does not import modules.

    More specifically, given a package name, this function walks the package
    (which requires importing the package __init__) and yields the filepath and
    name of each module (as a tuple of strings) in the package.  Python treats
    both packages and modules as module objects, and this function makes no
    distinction.  Give this function an import name and it returns an iterable
    regardless of whether the import name maps to a package or a module; in the
    case of a module name, the iterable results in only one element.

    Example:
    >>> discover_modules('testsite.stash') # doctest:+ELLIPSIS
    <generator object discover_modules at 0x...>
    >>> for item in discover_modules('testsite.stash'):
    ...     print item # doctest: +ELLIPSIS
    ...
    testsite.stash
    testsite.stash.blank
    testsite.stash.blankexport
    testsite.stash.dummy
    testsite.stash.index
    testsite.stash.multiple
    testsite.stash.noexports
    testsite.stash.package
    testsite.stash.package.module
    testsite.stash.view_arg
    >>> list(discover_modules('testsite.stash.index')) # doctest:+ELLIPSIS
    ['testsite.stash.index']
    >>>

    For flexibility & support, a module may be used in addition to a string.
    >>> import testsite.stash
    >>> discover_modules(testsite.stash) # doctest:+ELLIPSIS
    <generator object discover_modules at 0x...>
    >>> for item in discover_modules(testsite.stash):
    ...     print item # doctest: +ELLIPSIS
    ...
    testsite.stash
    testsite.stash.blank
    testsite.stash.blankexport
    testsite.stash.dummy
    testsite.stash.index
    testsite.stash.multiple
    testsite.stash.noexports
    testsite.stash.package
    testsite.stash.package.module
    testsite.stash.view_arg
    >>> import testsite.stash.index
    >>> list(discover_modules(testsite.stash.index))
    ['testsite.stash.index']
    >>>

    :param module_or_name: Tango site stash import name, or imported module
    :type module_or_name: str
    """
    if isinstance(module_or_name, types.ModuleType):
        module = module_or_name
        name = module.__name__
        yield name
    else:
        module = None
        name = module_or_name
        yield name
    if module_is_package(name):
        if module is None:
            module = get_module(name)
        prefix = name + '.'
        for _, name, _ in pkgutil.walk_packages(module.__path__, prefix):
            yield name
Esempio n. 52
0
def import_subpackages(module_prefix: str, module_path: Path):
    """Import all subpackages of a module."""
    for _, module, _ in walk_packages([str(module_path)]):
        sub_module_name = '%s.%s' % (module_prefix, module)
        import_module(sub_module_name)
Esempio n. 53
0
    def checkSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season,
                     episode, tvshowtitle, alter, date):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__):
            sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'

        if content == 'movie':
            sourceDict = [
                i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))
            ]
            try:
                sourceDict = [
                    (i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i)))
                    for i in sourceDict
                ]
            except:
                sourceDict = [(i, 'true') for i in sourceDict]
        else:
            sourceDict = [
                i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))
            ]
            try:
                sourceDict = [(i,
                               control.setting(
                                   re.sub('_mv_tv$|_mv$|_tv$', '', i) + '_tv'))
                              for i in sourceDict]
            except:
                sourceDict = [(i, 'true') for i in sourceDict]

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.sourcescacheFile

        sourceDict = [i[0] for i in sourceDict if i[1] == 'true']

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict:
                threads.append(
                    workers.Thread(
                        self.getMovieSource, title, year, imdb,
                        re.sub('_mv_tv$|_mv$|_tv$', '', source),
                        __import__(source, globals(), locals(), [],
                                   -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            season, episode = alterepisode.alterepisode().get(
                imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
            for source in sourceDict:
                threads.append(
                    workers.Thread(
                        self.getEpisodeSource, title, year, imdb, tvdb, season,
                        episode, tvshowtitle, date,
                        re.sub('_mv_tv$|_mv$|_tv$', '', source),
                        __import__(source, globals(), locals(), [],
                                   -1).source()))

        try:
            timeout = int(control.setting('sources_timeout_40'))
        except:
            timeout = 40

        [i.start() for i in threads]

        for i in range(0, timeout * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()

                if len(self.sources) >= 10: break

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        if len(self.sources) >= 10: return True
        else: return False
Esempio n. 54
0
    if upfront or toctree2 is None:
        if functionname not in common:
            toctree.write("    " + functionname + "\n")
        toctree2.write("    " + functionname + " <" + functionname + ">\n")
    else:
        toctree2.write("    " + functionname + "\n")


for modulename in order:
    module = importlib.import_module(modulename)

    if modulename != "uproot":
        toctree2 = open(modulename + ".toctree", "w")
        toctree2.write(""".. toctree::
    :caption: {0}
    :hidden:

""".format(modulename.replace("uproot.", "")))

    handle_module(modulename, module)
    if module.__file__.endswith("__init__.py") and modulename != "uproot":
        for submodulename in sorted([
                modulename + "." + name for loader, name, is_pkg in
                pkgutil.walk_packages(module.__path__)
        ]):
            submodule = importlib.import_module(submodulename)
            handle_module(submodulename, submodule)

toctree.close()
toctree2.close()
Esempio n. 55
0
def _get_module_names(package_name):
    return [
        "{}.{}".format(package_name, name)
        for _, name, _ in pkgutil.walk_packages([package_name])
    ]
Esempio n. 56
0
def all_estimators(include_meta_estimators=False,
                   include_other=False,
                   type_filter=None):
    """Get a list of all estimators from sklearn.

    This function crawls the module and gets all classes that inherit
    from BaseEstimator. Classes that are defined in test-modules are not
    included.
    By default meta_estimators such as GridSearchCV are also not included.

    Parameters
    ----------
    include_meta_estimators : boolean, default=False
        Whether to include meta-estimators that can be constructed using
        an estimator as their first argument. These are currently
        BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
        OneVsRestClassifier, RFE, RFECV.

    include_others : boolean, default=False
        Wether to include meta-estimators that are somehow special and can
        not be default-constructed sensibly. These are currently
        Pipeline, FeatureUnion and GridSearchCV

    type_filter : string or None, default=None
        Which kind of estimators should be returned. If None, no filter is
        applied and all estimators are returned.  Possible values are
        'classifier', 'regressor', 'cluster' and 'transformer' to get
        estimators only of these specific types.

    Returns
    -------
    estimators : list of tuples
        List of (name, class), where ``name`` is the class name as string
        and ``class`` is the actuall type of the class.
    """
    def is_abstract(c):
        if not (hasattr(c, '__abstractmethods__')):
            return False
        if not len(c.__abstractmethods__):
            return False
        return True

    all_classes = []
    # get parent folder
    path = sklearn.__path__
    for importer, modname, ispkg in pkgutil.walk_packages(
            path=path, prefix='sklearn.', onerror=lambda x: None):
        if ".tests." in modname:
            continue
        module = __import__(modname, fromlist="dummy")
        classes = inspect.getmembers(module, inspect.isclass)
        all_classes.extend(classes)

    all_classes = set(all_classes)

    estimators = [
        c for c in all_classes
        if (issubclass(c[1], BaseEstimator) and c[0] != 'BaseEstimator')
    ]
    # get rid of abstract base classes
    estimators = [c for c in estimators if not is_abstract(c[1])]

    if not include_other:
        estimators = [c for c in estimators if not c[0] in other]
    # possibly get rid of meta estimators
    if not include_meta_estimators:
        estimators = [c for c in estimators if not c[0] in meta_estimators]

    if type_filter == 'classifier':
        estimators = [
            est for est in estimators if issubclass(est[1], ClassifierMixin)
        ]
    elif type_filter == 'regressor':
        estimators = [
            est for est in estimators if issubclass(est[1], RegressorMixin)
        ]
    elif type_filter == 'transformer':
        estimators = [
            est for est in estimators if issubclass(est[1], TransformerMixin)
        ]
    elif type_filter == 'cluster':
        estimators = [
            est for est in estimators if issubclass(est[1], ClusterMixin)
        ]
    elif type_filter is not None:
        raise ValueError("Parameter type_filter must be 'classifier', "
                         "'regressor', 'transformer', 'cluster' or None, got"
                         " %s." % repr(type_filter))

    # We sort in order to have reproducible test failures
    return sorted(estimators)
Esempio n. 57
0
def _backported_all_estimators(type_filter=None):
    """
    Backported from scikit-learn 0.23.2:
    https://github.com/scikit-learn/scikit-learn/blob/0.23.2/sklearn/utils/__init__.py#L1146

    Use this backported `all_estimators` in old versions of sklearn because:
    1. An inferior version of `all_estimators` that old versions of sklearn use for testing,
       might function differently from a newer version.
    2. This backported `all_estimators` works on old versions of sklearn that don’t even define
       the testing utility variant of `all_estimators`.

    ========== original docstring ==========
    Get a list of all estimators from sklearn.
    This function crawls the module and gets all classes that inherit
    from BaseEstimator. Classes that are defined in test-modules are not
    included.
    By default meta_estimators such as GridSearchCV are also not included.
    Parameters
    ----------
    type_filter : string, list of string,  or None, default=None
        Which kind of estimators should be returned. If None, no filter is
        applied and all estimators are returned.  Possible values are
        'classifier', 'regressor', 'cluster' and 'transformer' to get
        estimators only of these specific types, or a list of these to
        get the estimators that fit at least one of the types.
    Returns
    -------
    estimators : list of tuples
        List of (name, class), where ``name`` is the class name as string
        and ``class`` is the actuall type of the class.
    """
    # lazy import to avoid circular imports from sklearn.base
    import pkgutil
    import platform
    import sklearn
    from importlib import import_module
    from operator import itemgetter

    # pylint: disable=no-name-in-module, import-error
    from sklearn.utils.testing import ignore_warnings
    from sklearn.base import (
        BaseEstimator,
        ClassifierMixin,
        RegressorMixin,
        TransformerMixin,
        ClusterMixin,
    )

    IS_PYPY = platform.python_implementation() == "PyPy"

    def is_abstract(c):
        if not (hasattr(c, "__abstractmethods__")):
            return False
        if not len(c.__abstractmethods__):
            return False
        return True

    all_classes = []
    modules_to_ignore = {"tests", "externals", "setup", "conftest"}
    root = sklearn.__path__[0]  # sklearn package
    # Ignore deprecation warnings triggered at import time and from walking
    # packages
    with ignore_warnings(category=FutureWarning):
        for _, modname, _ in pkgutil.walk_packages(path=[root],
                                                   prefix="sklearn."):
            mod_parts = modname.split(".")
            if any(part in modules_to_ignore
                   for part in mod_parts) or "._" in modname:
                continue
            module = import_module(modname)
            classes = inspect.getmembers(module, inspect.isclass)
            classes = [(name, est_cls) for name, est_cls in classes
                       if not name.startswith("_")]

            # TODO: Remove when FeatureHasher is implemented in PYPY
            # Skips FeatureHasher for PYPY
            if IS_PYPY and "feature_extraction" in modname:
                classes = [(name, est_cls) for name, est_cls in classes
                           if name == "FeatureHasher"]

            all_classes.extend(classes)

    all_classes = set(all_classes)

    estimators = [
        c for c in all_classes
        if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
    ]
    # get rid of abstract base classes
    estimators = [c for c in estimators if not is_abstract(c[1])]

    if type_filter is not None:
        if not isinstance(type_filter, list):
            type_filter = [type_filter]
        else:
            type_filter = list(type_filter)  # copy
        filtered_estimators = []
        filters = {
            "classifier": ClassifierMixin,
            "regressor": RegressorMixin,
            "transformer": TransformerMixin,
            "cluster": ClusterMixin,
        }
        for name, mixin in filters.items():
            if name in type_filter:
                type_filter.remove(name)
                filtered_estimators.extend(
                    [est for est in estimators if issubclass(est[1], mixin)])
        estimators = filtered_estimators
        if type_filter:
            raise ValueError("Parameter type_filter must be 'classifier', "
                             "'regressor', 'transformer', 'cluster' or "
                             "None, got"
                             " %s." % repr(type_filter))

    # drop duplicates, sort for reproducibility
    # itemgetter is used to ensure the sort does not extend to the 2nd item of
    # the tuple
    return sorted(set(estimators), key=itemgetter(0))
Esempio n. 58
0
import importlib
import logging
import pkgutil

# Load all repository handlers.
modules = dict()
_required_functions = ('verify', 'resolve_latest', 'get_artifact', 'repro_arg')
for importer, modname, ispkg in pkgutil.walk_packages(path=__path__,
                                                      prefix=__name__ + '.'):
    module = importlib.import_module(modname)
    bad_module = False
    for function in _required_functions:
        if not hasattr(module, function):
            logging.error('Ignoring %s: No %s function' % function)
            bad_module = True
    if bad_module:
        continue
    assert modname.startswith('%s.' % __name__)
    shortname = modname[len('%s.' % __name__):]
    modules[shortname] = module
Esempio n. 59
0
        ):
            for task_measure, task_measure_name in task_related_measures:
                if custom_measure == task_measure:
                    return

        if 'CUSTOM' not in AntMeasuresFactory.factory_measures:
            AntMeasuresFactory.factory_measures['CUSTOM'] = []

        AntMeasuresFactory.factory_measures['CUSTOM'].append(
            (custom_measure, None))


def _global_import(name):
    p = __import__(name, globals(), locals(), level=1)
    lst = p.default if 'default' in dir(p) else {}
    if len(lst) > 0:
        globals().pop(name)

    for measure_method, keys in lst.items():
        globals()[measure_method] = p.__dict__[measure_method]
        measure_name, task_type = keys
        if task_type not in AntMeasuresFactory.factory_measures:
            AntMeasuresFactory.factory_measures[task_type] = []
        AntMeasuresFactory.factory_measures[task_type].append(
            (p.__dict__[measure_method], measure_name))


for _, module_name, _ in walk_packages([os.path.dirname(__file__)]):
    if not module_name.startswith('_'):
        _global_import(module_name)
Esempio n. 60
0
__author__ = 'dxl'
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
for imp, module, ispackage in pkgutil.walk_packages(path=__path__,
                                                    prefix=__name__ + '.'):
    __import__(module)